diff --git a/.gitattributes b/.gitattributes index 0ef25423d0f94ac0d337ebaeec7af8e467718b34..e81778419094422e00b8406e5101bf20d9de441a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1676,3 +1676,11 @@ data/2025/2503_09xxx/2503.09573/48dbe47b-de29-42fc-bf05-7830a092a51d_origin.pdf data/2025/2503_09xxx/2503.09594/1a9d7e5b-676e-4927-af63-f7f6355fd077_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2503_09xxx/2503.09780/08214423-a242-4bd0-9a40-e8d6d3d1934d_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2503_09xxx/2503.09799/13deeb11-b9f7-4b0d-afd2-8ce11506db61_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_09xxx/2503.09089/c2c2c95f-facf-4f82-977c-1820c00d4eb0_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_09xxx/2503.09198/ef3c6a72-d844-464a-90c5-502ddc16df65_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_09xxx/2503.09277/39fed2ad-9645-4fde-a1c3-86b0c99b7b36_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_09xxx/2503.09501/7c196e4e-1362-4974-a470-65c83d863927_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_09xxx/2503.09567/17e53201-29b3-43fd-8f2e-78d7b00a58a6_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_09xxx/2503.09595/b59876e9-da8b-438b-ab54-bb4c4d76820f_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_11xxx/2503.11701/f561bad2-8e9b-4fb7-9083-b32d2bfd8f1f_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2503_13xxx/2503.13502/db12ade8-3943-4647-bb0d-ce8160710750_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2503_09xxx/2503.09089/c2c2c95f-facf-4f82-977c-1820c00d4eb0_content_list.json b/data/2025/2503_09xxx/2503.09089/c2c2c95f-facf-4f82-977c-1820c00d4eb0_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..d6f9d7853b03ff9a4b4d44c7f158cc3cdd176ab9 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/c2c2c95f-facf-4f82-977c-1820c00d4eb0_content_list.json @@ -0,0 +1,2651 @@ +[ + { + "type": "text", + "text": "LocAgent: Graph-Guided LLM Agents for Code Localization", + "text_level": 1, + "bbox": [ + 176, + 90, + 818, + 112 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhaoling Chen\\*, Xiangru Tang\\*, Gangda Deng\\*, Fang Wu\\*, Jialong Wu\\*, Zhiwei Jiang, Viktor Prasanna\\*, Arman Cohan\\*, Xingyao Wang", + "bbox": [ + 102, + 135, + 897, + 175 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{\\spadesuit}$ Yale University $^{\\spadesuit}$ University of Southern California $^{\\spadesuit}$ Stanford University $^{\\spadesuit}$ All Hands AI xiangru.tang@yale.edu, gangdade@usc.edu, xingyao@all-hands.dev", + "bbox": [ + 104, + 175, + 892, + 212 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 260, + 260, + 339, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Code localization—identifying precisely where in a codebase changes need to be made—is a fundamental yet challenging task in software maintenance. Existing approaches struggle to efficiently navigate complex codebases when identifying relevant code sections. The challenge lies in bridging natural language problem descriptions with the appropriate code elements, often requiring reasoning across hierarchical structures and multiple dependencies. We introduce LOCAGENT, a framework that addresses code localization through graph-based representation. By parsing codebases into directed heterogeneous graphs, LOCAGENT creates a lightweight representation that captures code structures (files, classes, functions) and their dependencies (imports, invocations, inheritance), enabling LLM agents to effectively search and locate relevant entities through powerful multi-hop reasoning. Experimental results on real-world benchmarks demonstrate that our approach significantly enhances accuracy in code localization. Notably, our method with the fine-tuned Qwen-2.5-Coder-Instruct-32B model achieves comparable results to SOTA proprietary models at greatly reduced cost (approximately $86\\%$ reduction), reaching up to $92.7\\%$ accuracy on file-level localization while improving downstream GitHub issue resolution success rates by $12\\%$ for multiple attempts (Pass@10). Our code is available at https://github.com/gersteinlab/LocAgent.", + "bbox": [ + 141, + 287, + 460, + 741 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 114, + 765, + 258, + 780 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Code localization can be viewed as an information retrieval (IR) task that aims to identify relevant code snippets given natural language descriptions (Yu et al., 2025; Yang et al., 2024; Xia et al., 2024). Developers spend up to $66\\%$ of their debugging time (Böhme et al., 2017) understanding code to", + "bbox": [ + 112, + 790, + 487, + 887 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c20269c8cebd98330e89168b5cf72fa71b25a1845eac8027149a6e7bbe018c9f.jpg", + "image_caption": [ + "Figure 1: Code localization across four common programming scenarios. Given a codebase and an issue description, the goal of code localization is to identify the relevant code snippets that require modification to resolve the issue." + ], + "image_footnote": [], + "bbox": [ + 510, + 255, + 882, + 444 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "make changes, and automated tools often struggle with the same challenge. Poor code localization leads to incomplete fixes, introduces new bugs, and significantly extends development cycles. Unlike traditional retrieval tasks that primarily focus on lexical or semantic matching between queries and documents (Guo et al., 2016, 2020), code localization requires bridging the gap between natural language and programming languages. It also necessitates reasoning capabilities to analyze the issue, while considering the structural and semantic properties of code (Lewis et al., 2020; Guu et al., 2020; Qu et al., 2020). This capability has become fundamental to powerful AI assistants (OpenAI, 2023; Anthropic, 2023), code-aware search engines (PerplexityAI, 2023), and automated programming agents (Cognition.ai, 2024; Wang et al., 2025; Gauthier, 2024). In particular, accurate code localization is crucial for software maintenance and evolution, as it enables precise code modifications for bug fixes, refactoring, and feature additions (Wang et al., 2024), thereby streamlining the development workflow.", + "bbox": [ + 507, + 533, + 884, + 903 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Existing approaches to code localization face", + "bbox": [ + 527, + 904, + 880, + 921 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.09089v2 [cs.SE] 29 Apr 2025", + "bbox": [ + 21, + 310, + 60, + 722 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Equal contribution. This work was done during Zhaoling's time at Yale.", + "bbox": [ + 112, + 894, + 487, + 921 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "significant limitations. Dense retrieval methods require maintaining and continuously updating vector representations of the entire codebase (Wang et al., 2023b; Günther et al., 2023), creating engineering challenges for large, evolving repositories where code changes frequently. While LLMs demonstrate strong code understanding capabilities (Kang et al., 2023; Wu et al., 2023), models with large context windows cannot process entire codebases at once, necessitating strategic navigation through relevant parts. Moreover, issue descriptions often mention only symptoms rather than underlying causes. For instance, a report of 'XSS vulnerability in user profile' might require changes to a shared validation utility used throughout the codebase but not explicitly referenced in the issue. This disconnect between issue descriptions and affected code components presents a substantial challenge for traditional retrieval approaches, which struggle to trace implicit dependencies across the codebase structure. Recent agent-based methods attempt to address these limitations through iterative exploration (Yang et al., 2024; Qin et al., 2024) but still struggle to efficiently navigate and comprehend complex code structures and dependencies, particularly when multi-hop reasoning is required to trace from issue descriptions to affected code regions that aren't directly mentioned.", + "bbox": [ + 115, + 84, + 485, + 533 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This raises a key question: How can we design efficient indexing as intermediate representations that are structure-aware and both easy and performant for LLM agents to consume? It is intuitive to design an agentic retrieval system that carefully combines traditional IR methods and LLM agent's reasoning ability to achieve accurate, efficient, and cost-effective code localization in codebases.", + "bbox": [ + 115, + 535, + 485, + 662 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address this challenge, we propose LOCAGENT, a framework that builds directed heterogeneous graph indexing to unify code structures, dependencies, and contents. Our approach leverages a structured graph representation that enables powerful multi-hop reasoning capabilities, allowing agents to navigate complex dependency relationships between code elements even when target code isn't explicitly mentioned in issue descriptions. This graph-based approach significantly outperforms previous methods on challenging localization tasks that require traversing multiple code relationships. Our lightweight representation, coupled with sparse indexing techniques, enables efficient entity search while maintaining rich structural information. The indexing process typically", + "bbox": [ + 115, + 664, + 485, + 920 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "takes only a few seconds per codebase, making it highly practical for real-time use. The framework integrates a set of unified tools that guide the agent through a systematic exploration of the codebase, allowing autonomous navigation based on contextual needs. Furthermore, by fine-tuning Qwen-2.5-Coder-Instruct (Hui et al., 2024) 7B and 32B models(abbr. as Qwen-2.5-7B and Qwen-2.5-32B respectively), our system achieves performance comparable to state-of-the-art models like Claude-3-5-sonnet-20241022 (Anthropic, 2023) (abbr. as Claude-3.5) while significantly reducing API costs by over $80\\%$ (from \\ $0.66 to \\$ 0.09 per example), making it practical for real-world deployment.", + "bbox": [ + 512, + 84, + 880, + 324 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Additionally, to facilitate a comprehensive evaluation of code localization methods, we introduce LOC-BENCH, a new benchmark specifically designed for this task. Existing benchmarks like SWE-Bench present significant limitations: (1) they risk contamination through data overlap with LLM training sets (Mündler et al., 2024), and (2) they primarily focus on bug fixing, lacking diversity in maintenance scenarios such as feature requests, performance optimizations, and security fixes. In contrast, LOC-BENCH covers diverse scenarios and mitigates potential contamination concerns by incorporating more recent examples from popular Python repositories collected after known LLM training cutoff dates. Additionally, we provide tooling to continuously update the benchmark with new examples, allowing researchers to maintain a fresh evaluation dataset as models evolve and training data cutoffs advance.", + "bbox": [ + 512, + 326, + 880, + 629 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contributions address critical gaps in existing approaches:", + "bbox": [ + 512, + 632, + 880, + 663 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We introduce a heterogeneous graph representation that captures both explicit and implicit code relationships, enabling efficient multi-hop reasoning. Our lightweight graph-based indexing process takes only seconds per repository and requires minimal storage.", + "- We design unified tools for agent-based code exploration that leverage our graph representation, allowing LLM agents to perform complex multi-hop navigation and reasoning across code dependencies even when target code isn't explicitly mentioned in issue descriptions.", + "- We introduce Loc-Bench, a new benchmark" + ], + "bbox": [ + 534, + 675, + 880, + 919 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "specifically designed for code localization that addresses limitations in existing datasets. Unlike previous benchmarks dominated by bug reports, Loc-Bench offers a balanced distribution across bug fixes, feature requests, security patches, and performance optimizations.", + "bbox": [ + 149, + 84, + 487, + 181 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- By fine-tuning open-source models on this task, we reduce the cost of code localization by $86\\%$ while maintaining competitive performance.", + "bbox": [ + 136, + 195, + 489, + 258 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 112, + 288, + 268, + 304 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Traditional Retrieval-based Methods", + "text_level": 1, + "bbox": [ + 112, + 316, + 448, + 331 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Traditional IR methods rely on lexical or semantic matching to return ranked lists of code snippets. Sparse retrievers, such as BM25 (Robertson et al., 1994, 2009), have demonstrated robustness to domain adaptation. Dense retrievers utilize embeddings for improved semantic searching, including models with open checkpoints such as general text embedding models E5-base-v2 (Wang et al., 2022) and proprietary APIs (VoyageAI, 2024). Code embedding models such as Jina-Code-v2 (Günther et al., 2023), Codesage-large-v2 (Zhang et al., 2024), and CodeRankEmbed (Suresh et al., 2024), trained specifically for code related tasks, showing significant performance in Code2Code and NL2Code semantic search tasks. However, while the embedding models themselves are small, the engineering challenges of maintaining these indexing systems (e.g., storage requirements, update mechanisms, and infrastructure maintenance) make them difficult to adapt to fast-evolving codebases.", + "bbox": [ + 112, + 338, + 489, + 659 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 LLM-based Generative Retrieval Methods", + "text_level": 1, + "bbox": [ + 112, + 673, + 485, + 688 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recently, LLMs with advanced code reasoning capabilities have demonstrated superior performance by directly processing queries and raw code for code localization (Kang et al., 2023; Wu et al., 2023; Xia et al., 2024; Kang et al., 2024). For example, Agentless (Xia et al., 2024), initially designed for automated program repair, uses a simplistic hierarchical localization process powered by LLM. It employs a straightforward three-phase approach that first localizes relevant code sections before attempting to fix the identified issues, challenging the assumption that complex agent architectures are necessary for effective code understanding and modification tasks.", + "bbox": [ + 112, + 696, + 489, + 920 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Expanding on these techniques, agent-based methods utilize multi-step reasoning to enable automated codebase traversal. Specifically, OpenHands (Wang et al., 2025) implements a generalist coding agent that supports bash commands like grep and tools for viewing files. SWE-Agent (Yang et al., 2024) integrates a custom Agent-Computer Interface to support agents to navigate entire repositories. MoatlessTools (Örwall, 2024) combines an agentic searching loop and semantic search to obtain code locations. However, existing agent-based methods face two critical limitations: (a) they primarily navigate codebases through directory traversal rather than understanding semantic relationships, (b) and they struggle to extract and reason about complex cross-file dependencies when these relationships aren't explicitly represented in the repository structure. This significantly impairs their ability to locate code that requires modification when the issue involves interactions between structurally distant components in the codebase.", + "bbox": [ + 507, + 84, + 884, + 437 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3 Graph-based Code Representation Methods", + "text_level": 1, + "bbox": [ + 507, + 449, + 828, + 480 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Due to the inherent structure of code, several works have employed graph-based representations to improve code understanding by capturing key relationships between components. Aider (2023) constructs a RepoMap and uses a graph ranking algorithm to identify the most significant contextual elements. Similarly, as a plugin, RepoGraph (Ouyang et al., 2025) performs subgraph retrieval – extracting an ego-network of relevant lines and their neighbors – to provide structured context. CodexGraph (Liu et al., 2024) indexes the repository into a Neo4j graph database, where LLM agents query the database precisely using Cypher. The efficiency of its retrieval process depends heavily on the querying capabilities of the LLM. These methods focus primarily on providing relevant context but do not enhance the traversal process itself, as they do not explicitly model directory structure or file hierarchies.", + "bbox": [ + 507, + 486, + 884, + 790 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In contrast, RepoUnderstander (Ma et al., 2024) builds hierarchical and function-call graphs, using Monte Carlo Tree Search (MCTS) guided by an LLM for exploration. While thorough, MCTS introduces extra computational overhead, making it less efficient than simpler traversal methods like BFS, particularly in large repositories. OrcaLoca (Yu et al., 2025) uses a simplified graph", + "bbox": [ + 507, + 793, + 884, + 921 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/68d78298f4ef4a0462b402972796e7a35e59e5ec5b78ccafd6cd08b74dd8ad0d.jpg", + "image_caption": [ + "Figure 2: Overview of LOCAGENT framework. LOCAGENT first parses the given codebase to build a graph-based code representation with various types of entities and relations. It then constructs sparse indexes for exploring structures and searching content. Using these indexes, it performs agent-guided searches that combine the graph and tools." + ], + "image_footnote": [], + "bbox": [ + 114, + 82, + 884, + 319 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/eff607d6667dc8fa01afca421fe5518165076b8c38bf5f9855a1411d560992d1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodRelation TypesNode TypesSearch/Traversal Strategy
ContainImportInheritInvokeDirectoryFileClassFunction
CodexGraph(Liu et al., 2024)XXXCypher queries
RepoGraph(Ouyang et al., 2025)XXXEgo-graph retrieval
RepoUnderstander(Ma et al., 2024)XMCTS
OrcaLoca(Yu et al., 2025)XXSimple search tools
LOCAGENT(Ours)Unified retrieval tools
", + "bbox": [ + 117, + 417, + 880, + 524 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 1: Comparison of Graph-Based Code Representation Methods.", + "bbox": [ + 262, + 532, + 732, + 546 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "enhanced by priority scheduling and context pruning. It maintains efficient search but may miss complex invocation dependencies. Table 1 summarizes the differences between these methods and LOCAGENT. Compared to these approaches, LOCAGENT offers a more comprehensive and unified representation of the repository, along with efficient, unified retrieval tools specifically designed for LLM consumption.", + "bbox": [ + 112, + 562, + 489, + 707 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 The LOCAGENT Framework", + "text_level": 1, + "bbox": [ + 112, + 718, + 400, + 734 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We introduce LOCAGENT, a graph-oriented LLM-agent framework for code localization. Figure 2 illustrates the overall framework. When given a repository, LOCAGENT can locate all the relevant code sections at various granularities (file, class, function, or line level) for different types of GitHub issues (such as bug reports, feature requests, performance bottlenecks, and security vulnerabilities) through automated in-depth exploration and analysis of the codebase. Section 3.1 proposes a novel graph-based indexing approach as an intermediate", + "bbox": [ + 112, + 744, + 490, + 921 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "representation for codebases. Section 3.2 presents our agent-based code search on the indexes and Section 3.3 describes our model fine-tuning and distillation process.", + "bbox": [ + 507, + 562, + 882, + 626 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Graph-based Code Representation", + "text_level": 1, + "bbox": [ + 507, + 640, + 828, + 657 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Codebases contain rich structural information, both explicit and implicit, that is essential for agent reasoning. Building on this insight, we develop a graph-based indexing that comprehensively captures codebase relationships while maintaining a granularity suitable for LLM-agents to retrieve.", + "bbox": [ + 505, + 662, + 884, + 758 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Code Graph Construction. We construct a heterogeneous directed graph $\\mathcal{G}(\\mathcal{V},\\mathcal{E},\\mathcal{A},\\mathcal{R})$ to index the codebase, where $\\nu = \\{v_{i}\\}_{i = 1}^{n}$ is the node set and $\\mathcal{E}\\subseteq \\mathcal{V}\\times \\mathcal{V}$ is the edge set. Each node $v\\in \\mathcal{V}$ and edge $e\\in \\mathcal{E}$ has an associated type mapping function. For nodes, $\\tau (v):\\mathcal{V}\\to \\mathcal{A}$ maps to types $\\mathcal{A} = \\{\\mathrm{directory},\\mathrm{file},\\mathrm{class},\\mathrm{function}\\}$ . For edges, $\\phi (e):\\mathcal{E}\\rightarrow \\mathcal{R}$ maps to relationships $\\mathcal{R} = \\{\\mathrm{contain},\\mathrm{import},\\mathrm{invoke},\\mathrm{inherit}\\}$ . In this paper, we focus our study on Python reposito", + "bbox": [ + 507, + 760, + 885, + 921 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "ries and leave codebases with other programming languages as future work.", + "bbox": [ + 112, + 84, + 485, + 116 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "First, we include all directories and Python files as nodes. Then, we parse each Python file using the abstract syntax tree (AST) to identify inner functions and classes recursively as nodes. We set the function level as the smallest node granularity and use each function's code content as the document for agent retrieval. This approach creates a good balance of information density between the index and documents, allowing LLMs to reason effectively within their context window limitations.", + "bbox": [ + 112, + 118, + 487, + 279 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Figure 2, all nodes with different types can be connected as a single tree using the contain relationship. This structure supports standard codebase-navigation operations from existing works. Our code graph further incorporates more advanced codebase relationships as edges: (1) the invoke relationship from function/class to function/class, where an invoke to a class represents class instantiation; (2) the import relationship from file to function/class; and (3) the inherit relationship between classes.", + "bbox": [ + 115, + 282, + 489, + 458 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Sparse Hierarchical Entity Indexing. We treat nodes in our code graph as entities and build hierarchical indexing based on their contents. For each keyword, we lookup the indexes from top to bottom: (1) We build an entity ID index as a unique identifier for each node using its fully qualified name. For example, a function calculate_sum in the MathUtils class located in src/utils.py would be represented as: src/utils.py:MathUtilscalculate_sum. (2) We construct a global dictionary to map the entity name (e.g., calculate_sum) to all nodes that share the same name. (3) We index entity IDs through an inverted index (i.e., BM25) to handle keyword searches that don't exactly match the IDs or names of entities. (4) For cases where input keywords aren't part of the entities' IDs (e.g., when a keyword refers to a global variable), we build an inverted index that maps code chunk(s) to each entity to cover all possible matches.", + "bbox": [ + 115, + 461, + 489, + 783 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Remark. Rather than relying solely on directory structures or hierarchical module indexing, our approach captures module dependencies that transcend directory boundaries. Two modules in distant directories (A and B) may appear unrelated in traditional navigation, but if they invoke each other or share inheritance, they're syntactically close in our graph representation. This syntactic", + "bbox": [ + 112, + 791, + 489, + 921 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/3ee9190f79ad7623fb4b180523bd53a7f66d2934c043f7c4febdadae07f9b9c0.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Tool NameInput ParamsOutput
SearchEntityKeywordsRelated Entities with Code Snippets
TraverseGraphStart Entity IDs Direction Traverse Hops Entity Types Relation TypesTraversed Subgraph, including Entities and Relations
RetrieveEntityEntity IDsComplete Code of Specified Entities
", + "bbox": [ + 514, + 80, + 880, + 237 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 2: List of unified APIs provided by LocAgent for code search and exploration.", + "bbox": [ + 507, + 244, + 882, + 275 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "proximity is essential for code localization because issues typically manifest through call relationships rather than directory structure. By capturing these functional dependencies, our approach efficiently identifies related components even when physically distant in the codebase.", + "bbox": [ + 505, + 286, + 882, + 381 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 Agent-guided Code Search", + "text_level": 1, + "bbox": [ + 507, + 397, + 766, + 412 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We develop tools based on the indexes built offline. During runtime, LOCAGENT takes issue statements as input and launches agents that autonomously use tools to localize target code sections. While the agent may iteratively invoke multiple tools internally to explore the codebase, LOCAGENT presents a simplified interface to users, requiring only a single-turn interaction—users submit an issue statement and receive localization results without additional input. This autonomous, self-contained workflow makes LOCAGENT both easy to deploy and highly practical for real-world use.", + "bbox": [ + 505, + 419, + 882, + 611 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Tool Design for Codebase Exploration. Recent works (Örwall, 2024; Wang et al., 2025), inspired by GUI-based IDEs, have developed numerous specialized tools for agents to explore codebases. However, these tools are initially designed for human readability, which sacrifices the compactness and efficiency that LLM agents prefer (Yang et al., 2024). Building upon our graph-based code representation, we can develop tools that support efficient higher-order codebase exploration to address these challenges. We unify all codebase navigation, search, and view operations into three tools (Table 2), introduced as follows.", + "bbox": [ + 505, + 614, + 884, + 822 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "SearchEntity: This tool searches codebases using keywords to locate relevant entities through our Hierarchical Entity Index. When an exact match isn't found in the upper index, the system performs a fuzzy search using the lower index. For each entity found, we return its code snippet in three detail", + "bbox": [ + 507, + 824, + 882, + 921 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "levels: fold, preview, and full code (Figure 6). This effectively prevents lengthy code context and reduces noise fed into agents.", + "bbox": [ + 112, + 84, + 487, + 131 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "**TraverseGraph:** This tool performs a type-aware breadth-first search (BFS) on the code graph, starting from input entities and allowing control over both traversal direction and number of hops. This supports agents to perform arbitrary multi-hop codebase navigation through only one action, significantly improving the efficiency compared with existing agent systems. Note that by allowing agents to select entity types and relation types for each traversal, this tool effectively leverages the LLM agents' coding expertise to generate proper meta paths—a crucial element for heterogeneous graph analysis (Lv et al., 2021). For example, by specifying entity types to {class, function} and relation types to {contain, inherit}, this tool returns the UML diagram. Additionally, we design an expanded tree-based format for the output subgraph that encodes both relation types and directions (Figure 7). (Fatemi et al., 2023) demonstrates that LLM performance on graph reasoning depends on the input graph format. Converting a graph into a tree structure encodes topology through the spatial distance between entity names, thereby deriving better performance. For detailed comparisons with alternative graph formats, please see Appendix A.1.2.", + "bbox": [ + 115, + 133, + 489, + 533 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "RetreiveEntity: This tool retrieves complete entity attributes for each input entity ID, including essential information such as file path, line number, and code content.", + "bbox": [ + 112, + 535, + 487, + 598 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Chain-of-Thought Agent Planning. We use chain-of-thought (CoT) prompting (shown in Appendix D) to guide the agent in solving code localization problems step by step. The agent systematically follows these steps: (1) Keyword extraction. The agent begins by breaking down the issue statement into different categories and then extracts relevant keywords that are closely related to the problem. (2) Linking keywords to code entities. The agent invokes SearchEntity to complete and clarify each extracted keyword.", + "bbox": [ + 115, + 599, + 489, + 775 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(3) Generate the logical flow from fault to failure. The agent first identifies the entry points that trigger the problem. Then, it iteratively traverse the codebase with TraverseGraph, retrieves code contents with RetrieveEntity, and searches new keywords with SearchEntity. Finally, it generates the logic flow based on the issue and additional context. (4) Locate the target entities. The agent pinpoints all suspicious code entities that need modification", + "bbox": [ + 112, + 777, + 487, + 920 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "based on the logic flow. Then, it ranks these entities based on their relevance.", + "bbox": [ + 507, + 84, + 880, + 115 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Confidence Estimation Based on Consistency. After generating a complete ranked list of candidate entities, to obtain a more consistent ranking, we measure the consistency (Wang et al., 2023a) of the LLM's predictions across multiple iterations. Specifically, we use the Reciprocal Rank as the initial confidence score for each predicted location. We then aggregate the scores for each entity across iterations to compute its final confidence score. The intuition behind this approach is that if the LLM consistently ranks a location higher in multiple iterations, it is more likely to be relevant.", + "bbox": [ + 507, + 117, + 882, + 309 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.3 Open-source Model Fine-tuning", + "text_level": 1, + "bbox": [ + 507, + 322, + 808, + 338 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Given the high costs of proprietary LLM APIs and data security concerns, we fine-tuned open-source models to improve their code localization capabilities and enable local deployment. We collect 433 successful trajectories generated with Claude-3.5, where the agent completed tasks from the SWEBench training set. Due to budget constraints, we sample an additional 335 trajectories generated by the initially fine-tuned Qwen2.5-32B model. Importantly, we only select successful trajectories where the model correctly localized the issues, creating a high-quality dataset of correct reasoning paths. These successful examples are then used to refine the same 32B model further, reinforcing effective reasoning patterns through this self-improvement loop. The entire dataset, combining both Claude-3.5 trajectories and successful Qwen2.5-32B samples, was then used to distill knowledge to a smaller 7B model.", + "bbox": [ + 507, + 343, + 882, + 648 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To fine-tune the smaller model, we employ Supervised Fine-Tuning (SFT) with LoRA (Hu et al., 2021). Our experiments show that this straightforward distillation method significantly enhances the performance of smaller models. See Appendix C.1.3 for more training details.", + "bbox": [ + 507, + 650, + 882, + 746 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 LOC-BENCH: A New Benchmark for Code Localization", + "text_level": 1, + "bbox": [ + 507, + 760, + 863, + 791 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Revisiting Existing Benchmark", + "text_level": 1, + "bbox": [ + 507, + 803, + 800, + 818 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "SWE-Bench(Jimenez et al., 2023) is a widely used benchmark that collects GitHub issues and corresponding code patches that resolve them. Xia et al. (2024); Suresh et al. (2024) adapt its subset, SWE-Bench-Lite, for code localization, treating the patched files and functions as the targets.", + "bbox": [ + 507, + 824, + 882, + 921 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "However, existing datasets, including SWE-Bench, present challenges for effectively evaluating code localization methods. First, they are at risk of contamination, as they may include data overlapping with the repositories or issues used by modern models during pre-training. Second, existing datasets are not specifically designed for code localization (Tomassi et al., 2019). SWE-Bench, for instance, was created primarily to evaluate end-to-end bug-fixing capabilities, with localization being only an implicit intermediate step. This focus results in datasets dominated by bug reports (85% of SWE-Bench-Lite examples) while severely underrepresenting other common software maintenance tasks such as feature requests (14%), security vulnerabilities (1%), and performance optimizations (0%). This imbalance fails to capture the diverse localization challenges faced in real-world software development.", + "bbox": [ + 115, + 84, + 489, + 388 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 Dataset Construction", + "text_level": 1, + "bbox": [ + 115, + 401, + 324, + 414 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To address the limitations of existing benchmarks, we introduce LOC-BENCH, a new dataset specifically designed for code localization. This dataset collects up-to-date issues from Python repositories to mitigate the influence of pre-training bias in the latest LLMs. Additionally, LOC-BENCH covers wider categories, including bug reports, feature requests, security, and performance issues, enabling a more comprehensive evaluation of code localization methods. The statistics of LOC-BENCH are shown in Table 3.", + "bbox": [ + 115, + 420, + 487, + 596 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For the Bug Report category, we collect GitHub issues created after October 2024, which is later than the release dates of most modern LLMs. To enrich the dataset with more instances of security and performance issues, we use the GitHub Search API to search for relevant keywords, such as \"latency improvement\" for performance-related issues. We exclude instances that involve modifying more than five Python files or more than ten functions in the corresponding patch. For further details, see Appendix B.1.", + "bbox": [ + 115, + 598, + 487, + 774 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5 Experiments", + "text_level": 1, + "bbox": [ + 115, + 785, + 257, + 802 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Our experiments aim to evaluate four key aspects of LOCAGENT: (1) the effectiveness of our graph-based representation and tooling for code localization compared to existing methods, (2) the performance of fine-tuned open-source models as cost-effective alternatives to proprietary LLMs, (3) a detailed analysis of how performance varies across", + "bbox": [ + 115, + 809, + 487, + 919 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/dffede0cb6e1140d33ef35874b6f55be329c72cec08ada3fd8a7e8684a261136.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetCategory#Sample
SWE-Bench-Lite (Total = 300)Bug Report254
Feature Request43
Security Issue3
Performance Issue0
Loc-Bench (Totoal = 560)Bug Report242
Feature Request150
Security Issue29
Performance Issue139
", + "bbox": [ + 524, + 82, + 868, + 265 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3: Distribution of samples across different categories in the SWE-Bench-Lite and Loc-Bench datasets.", + "bbox": [ + 510, + 275, + 880, + 304 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "task categories, and (4) the contribution of each component in our framework through comprehensive ablation studies. We evaluate on both SWE-Bench-Lite and our introduced Loc-Bench dataset. Additionally, we examine the impact of improved localization on downstream software maintenance tasks.", + "bbox": [ + 510, + 317, + 880, + 426 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.1 Experimental Settings", + "text_level": 1, + "bbox": [ + 510, + 432, + 727, + 449 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Datasets. We first conduct experiments on SWEBench-Lite, treating the patched files and functions as the targets for localization. Following Suresh et al. (2024), we excluded examples where no existing functions were modified by the patch, ultimately retaining 274 out of the original 300 examples.", + "bbox": [ + 510, + 455, + 880, + 565 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Metrics. To assess performance, we use a modified accuracy metric inspired by R-Precision from information retrieval, following Agentless(Xia et al., 2024). To assess performance, we use Acc@k (Accuracy at k) as our evaluation metric, following Agentless(Xia et al., 2024). For each example, we select the top-k predicted locations and consider a localization attempt successful only if all relevant locations are correctly identified within these top-k predictions. This approach measures the ability to fully identify all necessary code sections that require modification. We report results across multiple $k$ values: file localization at Acc@1, Acc@3, and Acc@5, and function localization at Acc@5 and Acc@10. Additionally, to provide a more relaxed evaluation criteria, we assess module localization, which only requires finding any function within the patched class.", + "bbox": [ + 510, + 568, + 880, + 854 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2 Baselines", + "text_level": 1, + "bbox": [ + 510, + 868, + 626, + 882 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We evaluate LOCAGENT against three categories of competitive baselines: (a) Retrieval-based meth", + "bbox": [ + 510, + 890, + 880, + 919 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/ad9888427e4959ea8df866a9f22f434e24d97aa01a98851cc1f0d57d84fed76f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TypeMethodLoc-ModelFile (%)Module (%)Function (%)
Acc@1Acc@3Acc@5Acc@5Acc@10Acc@5Acc@10
Embedding-BasedBM25 (Robertson et al., 1994)38.6951.8261.6845.2652.9231.7536.86
E5-base-v2 (Wang et al., 2022)49.6474.4580.2967.8872.2639.4251.09
Jina-Code-v2 (Günther et al., 2023)43.4371.1780.2963.5072.6342.3452.19
Codesage-large-v2 (Zhang et al., 2024)47.8169.3478.1060.5869.7133.9444.53
CodeRankEmbed (Suresh et al., 2024)52.5577.7484.6771.9078.8351.8258.76
Procedure-BasedAgentless (Xia et al., 2024)GPT-4o67.1574.4574.4567.1567.1555.4755.47
Claude-3.572.6379.2079.5668.9868.9858.7658.76
Agent-BasedMoutlessTools (Örwall, 2024)GPT-4o73.3684.3185.0474.8276.2857.3059.49
Claude-3.572.6385.7786.1376.2876.2864.6064.96
SWE-agent (Yang et al., 2024)GPT-4o57.3064.9668.9858.0358.0345.9946.35
Claude-3.577.3787.2390.1577.7478.1064.2364.60
Openhands (Wang et al., 2025)GPT-4o60.9571.9073.7262.4163.8749.6450.36
Claude-3.576.2889.7890.1583.2183.5868.2570.07
LOCAGENT (Ours)Qwen2.5-7B(ft)70.8084.6788.3281.0282.8564.2371.53
Qwen2.5-32B(ft)75.9190.5192.7085.7787.2371.9077.01
Claude-3.577.7491.9794.1686.5087.5973.3677.37
", + "bbox": [ + 119, + 64, + 882, + 300 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4: Performance comparison with baseline methods on code localization on SWE-bench lite. Results show the accuracy at file, module, and function levels. For Agent-Based methods, we use GPT-4o-2024-0513 (abbr. as GPT-4o) and Claude-3-5-sonnet-20241022 (abbr. as Claude-3.5) as the localization model. Additionally, the performance of our fine-tuned open-source models, Qwen2.5-7B(ft) and Qwen2.5-32B(ft), are included for comparison.", + "bbox": [ + 112, + 309, + 884, + 381 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ods: We include the sparse retrieval approach BM25 (Robertson et al., 1994) and several state-of-the-art embedding models, including the general-purpose E5-base-v2 (Wang et al., 2022) and specialized code embedding models such as JinaCode-v2 (Günther et al., 2023), Codesage-large-v2 (Zhang et al., 2024), and the current SOTA code embedding model CodeRankEmbed (Suresh et al., 2024). Proprietary embedding solutions were excluded due to API costs. (b) Procedure-based methods: We compare against Agentless (Xia et al., 2024), which employs a structured hierarchical approach to code localization without complex agent architectures. (c) Agent-based methods: We include several advanced agent frameworks designed for code exploration and modification, specifically OpenHands (Wang et al., 2025) (using its default CodeActAgent implementation), SWE-Agent (Yang et al., 2024), and MoatlessTools (Örwall, 2024). For implementation details, please refer to Appendix C.1.1.", + "bbox": [ + 110, + 388, + 490, + 728 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3 Evaluation Results on SWE-Bench-Lite", + "text_level": 1, + "bbox": [ + 112, + 739, + 472, + 753 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As shown in Table 4, Agent-Based methods consistently outperform other approaches, and our method demonstrates competitive performance by achieving the best results across all levels of code localization. Unlike traditional retrieval-based methods, Agentless identifies only a limited number of locations due to its narrow repository scope, which hinders performance gains when considering a broader set of candidates. The results of the NDCG are presented in Table 11 in the Appendix.", + "bbox": [ + 112, + 760, + 490, + 921 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f929dd53f4adf74c47eab0524e9c10d9df2c8753a81e68bd756d7c86d5d4876b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 521, + 384, + 870, + 511 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/dd0a043ec99e4aa7d3cdd9cab21f0f8c8ff70d258cc383c3b7961f81be3880dc.jpg", + "image_caption": [ + "Figure 3: Performance analysis at different difficulty levels for file- and function-level localization. All agent-based methods and Agentless use Claude-3.5 as the localization model. Hop $N$ refers to the distances between functions mentioned in the issue description and the ground truth patch on our code graph." + ], + "image_footnote": [], + "bbox": [ + 522, + 514, + 870, + 642 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To further analyze the results, we examine performance across different task difficulty levels. We measure the task difficulty by calculating the shortest hops between the functions mentioned in the issue descriptions and the patched functions on our code graph (See Appendix C.1.2 for more details). As shown in Figure 3, performance decreases for all methods as the task becomes more challenging. However, Agent-based methods demonstrate better robustness as the difficulty increases, with", + "bbox": [ + 507, + 760, + 884, + 921 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/594e4e848668151fa0f0d585dcf3a37fcc51677d8a82c5fdceaf97947d73c7bb.jpg", + "image_caption": [ + "Figure 4: Comparison of performance between the original and fine-tuned Qwen models. The metrics used are file-level Acc@5 and module/function-level Acc@10. Dashed lines represent the performance of the Claude-3.5 model for reference." + ], + "image_footnote": [], + "bbox": [ + 126, + 74, + 475, + 209 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "our method maintaining competitive performance across various difficulty levels. Retrieval-based methods, such as E5-Base-v2 and CodeRankEmbed, perform poorly at the function level, even when the patched functions are explicitly mentioned in the query. This is because they treat the query as a whole, failing to capture fine-grained details. Agentless performs even worse than retrieval-based methods when exploration beyond the query is needed ( $hop \\geq 0$ ) due to its simplistic localization process and limited view focused only on the repository structure.", + "bbox": [ + 112, + 309, + 489, + 502 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.4 Fine-tuned Open-source Models", + "text_level": 1, + "bbox": [ + 112, + 514, + 413, + 529 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Figure 4 demonstrates that after fine-tuning, both the 7B and 32B models show significant improvements on this task. LOCAGENT with finetuned Qwen-2.5-Coder-Instruct-32B (abbreviated as Qwen2.5-32B(ft)) achieves performance comparable to Claude-3.5, and LOCAGENT with Qwen2.5-7B(ft) also delivers results on par with that obtained using GPT-4o. As shown in Table 4, our method with Qwen2.5-32B(ft) outperforms nearly all baselines, including those that use larger and more powerful LLMs. The original 7B model performs poorly due to its limited tool-use capability (Chen et al., 2024). These results validate the feasibility of deploying our fine-tuned open-source models as promising alternatives to proprietary APIs, especially in resource-constrained applications.", + "bbox": [ + 112, + 533, + 489, + 808 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.5 Efficiency Analysis", + "text_level": 1, + "bbox": [ + 112, + 819, + 310, + 835 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 5 presents an efficiency analysis comparing agent-based methods in terms of cost and the number of agent interactions required. MoatlessTools demonstrates good cost-efficiency and requires relatively fewer rounds of interaction. However, the", + "bbox": [ + 112, + 841, + 489, + 921 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/ca58188c6fe4c2ffec34aeccdb6ee5a73bdf63c458fea192f02d6865ca76db6a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodLM#RoundCost($)Acc@10
Cost
MoatlessToolsGPT-4o50.461.3
Claude-3.550.461.4
SWE-agentGPT-4o80.560.8
Claude-3.590.671.0
OpenhandsGPT-4o150.830.6
Claude-3.5130.790.9
OursClaude-3.570.661.2
Qwen2.5-7B(ft)60.0513.2
Qwen2.5-32B(ft)90.098.6
", + "bbox": [ + 507, + 80, + 873, + 214 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/cbbf57fce918f5d0e7521a35c3e24e9a0612bad18584b2403a531e9d40562501.jpg", + "table_caption": [ + "Table 5: Efficiency analysis comparing the average cost and number of agent interaction rounds required by different methods. The cost-efficiency of each method is evaluated using the ratio of function-level Acc@10 to average cost." + ], + "table_footnote": [], + "table_body": "
Model SettingFile Acc@5Module Acc@10Function Acc@10
Ours88.3282.8571.53
w/o TraverseGraph86.1378.4766.06
Relation Types: contain86.5079.5666.42
Traverse Hops: 186.8680.2966.79
w/o RetrieveEntity87.5981.3969.34
w/o SearchEntity68.9861.3153.28
w/o BM25 index75.1868.9860.22
", + "bbox": [ + 510, + 300, + 878, + 441 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 6: The ablation study of our model. The metrics used here are file-level Acc@5, module-level Acc@10, and function-level Acc@10. The impact of removing or fixing components is analyzed to observe how each component contributes to the overall accuracy.", + "bbox": [ + 507, + 451, + 882, + 524 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "dense embeddings it uses make it difficult and slow to adapt to fast-evolving codebases. SWE-agent and Openhands also show moderate costs but still do not match the efficiency of LOCAGENT. For LOCAGENT with Claude-3.5, although more rounds of interaction are required, the cost remains lower than that of Openhands, illustrating the token efficiency of our tool's outputs. LOCAGENT with fine-tuned Qwen models stands out for its superior efficiency1. Qwen2.5-7B(ft) is the most cost-efficient option, requiring only $0.05 per example, while Qwen2.5-32B(ft) offers a more cost-effective alternative to Claude-3.5. These results highlight the potential of fine-tuned open-source models as efficient alternatives, providing an optimal balance of cost-effectiveness and performance that surpasses other methods.", + "bbox": [ + 505, + 546, + 884, + 820 + ], + "page_idx": 8 + }, + { + "type": "page_footnote", + "text": "1We calculate the cost based on the prices from AI inference providers (Hyperbolic, 2025; artificialanalysis.ai, 2025). Specifically, for the Qwen2.5-32B(ft) model, the cost is $0.20/1M tokens for both input and output. For the Qwen2.5-7B(ft) model, the cost is $0.14/1M tokens for input and $0.28/1M tokens for output.", + "bbox": [ + 507, + 847, + 882, + 921 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.6 Ablation Study", + "text_level": 1, + "bbox": [ + 114, + 84, + 278, + 99 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We conduct an ablation study to evaluate the effectiveness of each component of our toolsets. Due to budget constraints, we use the fine-tuned Qwen-2.5-7B as the localization model for these experiments.", + "bbox": [ + 112, + 107, + 487, + 185 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) Each tool in our toolset plays a critical role in code localization performance. As shown in Table 6, removing any tool, especially the SearchEntity tool, leads to varying degrees of accuracy degradation, particularly in module and function level localization. This highlights the critical role each tool plays in identifying relevant modules and functions.", + "(2) The graph structure provides essential information for accurate code localization. Removing TraverseGraph tool decreases module and function level performance since the agent cannot obtain any structure information about the codebase and relies on reasoning capability to identify call relationship or directory structure. Adding contain relationship provides only marginal improvements compared to fully removing TraverseGraph, emphasizing the importance of the other three relationship types and explaining why our method surpasses others relying only on the repository structure.", + "(3) Multi-hop exploration is crucial for deep code understanding. When compared to the full setting, fixing $Hops = 1$ leads to a moderate decline in file and module-level accuracy, but it causes a more significant decrease in function-level accuracy, underscoring the importance of multi-hop exploration for identifying relevant entities.", + "(4) Sparse indexing significantly enhances localization performance. Removing SearchEntity tool, or even partial removal of its index, causes a substantial drop in performance across all metrics. This demonstrates the effectiveness of building a sparse index on our code graph for improving localization performance." + ], + "bbox": [ + 112, + 187, + 489, + 753 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5.7 Evaluation Results on Loc-Bench", + "text_level": 1, + "bbox": [ + 112, + 769, + 420, + 784 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "To ensure the robustness and generalization of our methods and fine-tuned Qwen models, and to eliminate potential data leakage, we evaluate our new dataset. Since Loc-Bench includes examples that edit 1 to 5 files, we assess file localization at top-5 and top-10 ranks, and function/module localization at top-10 and top-15 ranks. Table 7 shows that our fine-tuned Qwen2.5-7B model exhibits strong gen", + "bbox": [ + 112, + 791, + 487, + 921 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/40113cdec1e1feca7000cb77d3a5294a6135d63a0d343288699aecc05a7b7a58.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 521, + 79, + 870, + 195 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/f3ff500f60d006d9ab2100b2e34cdaaed024cc944a6d5f1861937bb334d1a3cf.jpg", + "image_caption": [ + "Figure 5: Performance analysis at different difficulty category for file- and function-level localization. All agent-based baselines and Agentless use Claude-3.5 as the localization model." + ], + "image_footnote": [], + "bbox": [ + 522, + 199, + 870, + 315 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "eralization capabilities, maintaining competitive performance compared to SWE-agent using more expensive and strong model. These results highlight the practicality of the fine-tuned Qwen2.5-7B model for real-world applications. Despite being an open-source alternative, it achieves a performance comparable to Claude-3.5, supporting its feasibility as a cost-effective substitute for commercial models in practical scenarios.", + "bbox": [ + 505, + 399, + 882, + 543 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Additionally, we evaluate the performance across four different difficulty categories. Figure 5 clearly shows that our method outperforms other methods in almost all categories of code localization. However, it also highlights a noticeable decrease in performance across the other three categories compared to the Bug Report category. This performance gap likely reflects our training data distribution, which contained more bug report examples, potentially leading to scaffolds better optimized for bug localization tasks. This trend suggests that while our method is highly effective for bug report localization, there is still room for improvement in handling the other categories through more balanced training data and category-specific optimization strategies.", + "bbox": [ + 507, + 545, + 882, + 802 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5.8 Application: Better Localization Leads to More Solved GitHub Issues", + "text_level": 1, + "bbox": [ + 507, + 816, + 880, + 848 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "To assess the impact of localization methods on downstream tasks, we evaluated their effectiveness in solving GitHub issues. We choose Agentless as the baseline, ranking among the top-performing", + "bbox": [ + 507, + 856, + 880, + 921 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/6c9737e3fa48ebc0fa6a563806676ce4a098be8490a88f39f510258c9a12539f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodLoc ModelFile (%)Module (%)Function (%)
Acc@5Acc@10Acc@10Acc@15Acc@10Acc@15
IR-BasedCodeRankEmbed74.2980.8963.2167.5043.3946.61
AgentlessClaude-3.567.5067.5053.3953.3942.6842.68
OpenHandsClaude-3.579.8280.0068.9369.1159.1159.29
SWE-agentClaude-3.577.6877.6863.5763.7551.9651.96
LocAgent (Ours)Qwen2.5-7B(ft)78.5779.6463.0463.0451.4351.79
Claude-3.583.3986.0770.8971.0759.2960.71
", + "bbox": [ + 147, + 80, + 845, + 225 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/cd90b427caeb94e68300cb0963a3939e90034375ecc1b70e22bb7ba12b53914b.jpg", + "table_caption": [ + "Table 7: Performance evaluation on the real-world LocBench dataset." + ], + "table_footnote": [], + "table_body": "
MethodLocalization LMAcc@5Pass@1Pass@10
AgentlessClaude-3.558.3926.3133.58
OursQwen2.5-32B(ft)69.3426.7936.13
Claude-3.573.3627.9237.59
", + "bbox": [ + 114, + 273, + 482, + 344 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Table 8: Impact of localization accuracy on downstream bug repair tasks.", + "bbox": [ + 112, + 353, + 485, + 384 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "open-source submissions on SWE-Bench-Lite. For consistency, we utilized Claude-3.5 as the editing model in conjunction with the Agentless editing method. Table 8 shows that the success rate for solving GitHub issues improves significantly with better code localization accuracy.", + "bbox": [ + 112, + 397, + 487, + 494 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 112, + 505, + 247, + 519 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In conclusion, LOCAGENT enhances code localization by structuring codebases as graphs, enabling efficient repository-level exploration for LLM agents. With fine-tuned open-source models, our method achieves high localization accuracy while significantly reducing costs compared to larger proprietary models. Experimental results demonstrate the effectiveness of LOCAGENT in identifying relevant code components and improving downstream tasks.", + "bbox": [ + 112, + 530, + 489, + 690 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Limitations", + "text_level": 1, + "bbox": [ + 112, + 703, + 218, + 717 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "First, our study primarily focused on fine-tuning Qwen-2.5-Coder models. Exploring a broader range of base models, including other open-source LLMs like CodeLlama, Mistral, or Yi, could provide valuable insights into model selection trade-offs. Additionally, investigating different finetuning approaches beyond LoRA, such as full finetuning or other parameter-efficient methods, could potentially yield better performance.", + "bbox": [ + 112, + 728, + 487, + 872 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Second, though we demonstrated improved bug repair performance with better localization, we only scratched the surface of potential downstream", + "bbox": [ + 112, + 873, + 487, + 922 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "applications. Future work should evaluate LocAgent's impact on other software engineering tasks like refactoring, feature addition, security vulnerability patching, and performance optimization. This would provide a more comprehensive understanding of the framework's practical utility.", + "bbox": [ + 507, + 275, + 884, + 370 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Moreover, our fine-tuning process relied heavily on trajectories generated by Claude-3.5 and the fine-tuned Qwen2.5-32B model. A more diverse training dataset incorporating examples from different models, tasks, and repositories could improve the robustness and generalization of fine-tuned models. Additionally, analyzing the impact of different dataset compositions and filtering strategies on model performance could yield valuable insights.", + "bbox": [ + 507, + 372, + 884, + 533 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Finally, the current evaluation focuses primarily on Python codebases. Extending LOCAGENT to support other programming languages and evaluating its performance across different language paradigms would better demonstrate its generalizability. Further, our evaluation metrics could be expanded to include more nuanced measures of localization quality beyond accuracy and NDCG.", + "bbox": [ + 507, + 533, + 885, + 663 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 510, + 690, + 608, + 705 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Aider. 2023. Building a better repository map with tree sitter. Accessed: April 15, 2025.", + "Anthropic. 2023. Claude: Conversational ai by anthropic. Accessed: January 21, 2025.", + "artificialanalysis.ai. 2025. Artificial analysis. https://artificialanalysis.ai/models/. Accessed: 2025-04-28.", + "Marcel Böhme, Ezekiel O Soremekun, Sudipta Chattopadhyay, Emamurho Ugherughe, and Andreas Zeller. 2017. Where is the bug and how is it fixed? an experiment with practitioners. In Proceedings of the 2017 11th joint meeting on foundations of software engineering, pages 117-128." + ], + "bbox": [ + 509, + 713, + 884, + 921 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zehui Chen, Weihua Du, Wenwei Zhang, Kuikun Liu, Jiangning Liu, Miao Zheng, Jingming Zhuo, Songyang Zhang, Dahua Lin, Kai Chen, et al. 2024. T-eval: Evaluating the tool utilization capability of large language models step by step. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9510-9529.", + "Cognition.ai. 2024. Introducing devin, the first ai software engineer.", + "John Ellson, Emden Gansner, Lefteris Koutsofios, Stephen C North, and Gordon Woodhull. 2002. Graphviz—open source graph drawing tools. In Graph Drawing: 9th International Symposium, GD 2001 Vienna, Austria, September 23–26, 2001 Revised Papers 9, pages 483–484. Springer.", + "Bahare Fatemi, Jonathan Halcrow, and Bryan Perozzi. 2023. Talk like a graph: Encoding graphs for large language models. arXiv preprint arXiv:2310.04560.", + "Paul Gauthier. 2024. How aider scored sota $26.3\\%$ on swe bench lite | aider. Accessed: January 21, 2025.", + "Jiafeng Guo, Yixing Fan, Qingyao Ai, and W Bruce Croft. 2016. A deep relevance matching model for ad-hoc retrieval. In Proceedings of the 25th ACM international on conference on information and knowledge management, pages 55-64.", + "Jiafeng Guo, Yixing Fan, Liang Pang, Liu Yang, Qingyao Ai, Hamed Zamani, Chen Wu, W Bruce Croft, and Xueqi Cheng. 2020. A deep look into neural ranking models for information retrieval. Information Processing & Management, 57(6):102067.", + "Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. 2020. Retrieval augmented language model pre-training. In International conference on machine learning, pages 3929-3938. PMLR.", + "Michael Gunther, Louis Milliken, Jonathan Geuter, Georgios Mastrupas, Bo Wang, and Han Xiao. 2023. Jina embeddings: A novel set of high-performance sentence embedding models. Preprint, arXiv:2307.11224.", + "Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685.", + "Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, Kai Dang, Yang Fan, Yichang Zhang, An Yang, Rui Men, Fei Huang, Bo Zheng, Yibo Miao, Shanghaoran Quan, Yunlong Feng, Xingzhang Ren, Xuancheng Ren, Jingren Zhou, and Junyang Lin. 2024. Qwen2.5-coder technical report. Preprint, arXiv:2409.12186.", + "Hyperbolic. 2025. Hyperbolic website. https:// hyperbolic.xyz/. Accessed: 2025-04-15." + ], + "bbox": [ + 115, + 85, + 485, + 919 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Carlos E Jimenez, John Yang, Alexander Wettig, Shunyu Yao, Kexin Pei, Ofir Press, and Karthik Narasimhan. 2023. Swe-bench: Can language models resolve real-world github issues? arXiv preprint arXiv:2310.06770.", + "Sungmin Kang, Gabin An, and Shin Yoo. 2023. A preliminary evaluation of llm-based fault localization. arXiv preprint arXiv:2308.05487.", + "Sungmin Kang, Gabin An, and Shin Yoo. 2024. A quantitative and qualitative evaluation of llm-based explainable fault localization. Proceedings of the ACM on Software Engineering, 1(FSE):1424-1446.", + "Patrick Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, et al. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems, 33:9459-9474.", + "Xiangyan Liu, Bo Lan, Zhiyuan Hu, Yang Liu, Zhicheng Zhang, Fei Wang, Michael Shieh, and Wenmeng Zhou. 2024. Codexgraph: Bridging large language models and code repositories via code graph databases. Preprint, arXiv:2408.03910.", + "Qingsong Lv, Ming Ding, Qiang Liu, Yuxiang Chen, Wenzheng Feng, Siming He, Chang Zhou, Jianguo Jiang, Yuxiao Dong, and Jie Tang. 2021. Are we really making much progress? revisiting, benchmarking and refining heterogeneous graph neural networks. In Proceedings of the 27th ACM SIGKDD conference on knowledge discovery & data mining, pages 1150-1160.", + "Yingwei Ma, Qingping Yang, Rongyu Cao, Binhua Li, Fei Huang, and Yongbin Li. 2024. How to understand whole software repository? arXiv e-prints, pages arXiv-2406.", + "Niels Mündler, Mark Müller, Jingxuan He, and Martin Vechev. 2024. Swt-bench: Testing and validating real-world bug-fixes with code agents. Advances in Neural Information Processing Systems, 37:81857-81887.", + "OpenAI. 2023. Chatgpt: Language model by openai. Accessed: January 21, 2025.", + "Siru Ouyang, Wenhao Yu, Kaixin Ma, Zilin Xiao, Zhihan Zhang, Mengzhao Jia, Jiawei Han, Hongming Zhang, and Dong Yu. 2025. Repograph: Enhancing AI software engineering with repository-level code graph. In The Thirteenth International Conference on Learning Representations.", + "PerplexityAI. 2023. Perplexity ai: An ai-powered search engine. Accessed: January 21, 2025.", + "Yihao Qin, Shangwen Wang, Yiling Lou, Jinhao Dong, Kaixin Wang, Xiaoling Li, and Xiaoguang Mao. 2024. Agentfl: Scaling llm-based fault localization to project-level context. arXiv preprint arXiv:2403.16362." + ], + "bbox": [ + 510, + 85, + 880, + 917 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Chen Qu, Liu Yang, Cen Chen, Minghui Qiu, W Bruce Croft, and Mohit Iyyer. 2020. Open-retrieval conversational question answering. In Proceedings of the 43rd International ACM SIGIR conference on research and development in Information Retrieval, pages 539-548.", + "Stephen Robertson, Hugo Zaragoza, et al. 2009. The probabilistic relevance framework: Bm25 and beyond. Foundations and Trends® in Information Retrieval, 3(4):333-389.", + "Stephen E. Robertson, Steve Walker, Susan Jones, Micheline Hancock-Beaulieu, and Mike Gatford. 1994. Okapi at trec-3. In Text Retrieval Conference.", + "Tarun Suresh, Revanth Gangi Reddy, Yifei Xu, Zach Nussbaum, Andriy Mulyar, Brandon Duderstadt, and Heng Ji. 2024. Cornstack: High-quality contrastive data for better code ranking. arXiv preprint arXiv:2412.01007.", + "David A. Tomassi, Naji Dmeiri, Yichen Wang, Antara Bhowmick, Yen-Chuan Liu, Premkumar Devanbu, Bogdan Vasilescu, and Cindy Rubio-Gonzalez. 2019. Bugswarm: Mining and continuously growing a dataset of reproducible failures and fixes. Preprint, arXiv:1903.06725.", + "VoyageAI. 2024. Voyage-code-2: Elevate your code retrieval. Accessed: 2024-02-02.", + "Liang Wang, Nan Yang, Xiaolong Huang, Bixing Jiao, Linjun Yang, Daxin Jiang, Rangan Majumder, and Furu Wei. 2022. Text embeddings by weakly-supervised contrastive pre-training. arXiv preprint arXiv:2212.03533.", + "Xingyao Wang, Boxuan Li, Yufan Song, Frank F. Xu, Xiangru Tang, Mingchen Zhuge, Jiayi Pan, Yueqi Song, Bowen Li, Jaskirat Singh, Hoang H. Tran, Fuqiang Li, Ren Ma, Mingzhang Zheng, Bill Qian, Yanjun Shao, Niklas Muennighoff, Yizhe Zhang, Binyuan Hui, Junyang Lin, Robert Brennan, Hao Peng, Heng Ji, and Graham Neubig. 2025. Open hands: An open platform for AI software developers as generalist agents. In The Thirteenth International Conference on Learning Representations.", + "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2023a. Self-consistency improves chain of thought reasoning in language models. Preprint, arXiv:2203.11171.", + "Yue Wang, Hung Le, Akhilesh Deepak Gotmare, Nghi D. Q. Bui, Junnan Li, and Steven C. H. Hoi. 2023b. Codet5+: Open code large language models for code understanding and generation. Preprint, arXiv:2305.07922.", + "Zora Zhiruo Wang, Akari Asai, Xinyan Velocity Yu, Frank F. Xu, Yiqing Xie, Graham Neubig, and Daniel Fried. 2024. Coderag-bench: Can retrieval augment code generation? Preprint, arXiv:2406.14497." + ], + "bbox": [ + 115, + 85, + 489, + 919 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yonghao Wu, Zheng Li, Jie M Zhang, Mike Papadakis, Mark Harman, and Yong Liu. 2023. Large language models in fault localisation. arXiv preprint arXiv:2308.15276.", + "Chunqiu Steven Xia, Yinlin Deng, Soren Dunn, and Lingming Zhang. 2024. Agentless: Demystifying llm-based software engineering agents. arXiv preprint arXiv:2407.01489.", + "John Yang, Carlos E Jimenez, Alexander Wettig, Kili-ian Lieret, Shunyu Yao, Karthik Narasimhan, and Ofir Press. 2024. Swe-agent: Agent-computer interfaces enable automated software engineering. arXiv preprint arXiv:2405.15793.", + "Zhongming Yu, Hejia Zhang, Yujie Zhao, Hanxian Huang, Matrix Yao, Ke Ding, and Jishen Zhao. 2025. Ocaloca: An llm agent framework for software issue localization. arXiv preprint arXiv:2502.00350.", + "Dejiao Zhang, Wasi Uddin Ahmad, Ming Tan, Hantian Ding, Ramesh Nallapati, Dan Roth, Xiaofei Ma, and Bing Xiang. 2024. CODE REPRESENTATION LEARNING AT SCALE. In The Twelfth International Conference on Learning Representations.", + "Albert Örwall. 2024. Moatless tools." + ], + "bbox": [ + 510, + 85, + 882, + 438 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A LOCAGENT Design Details", + "text_level": 1, + "bbox": [ + 112, + 83, + 386, + 99 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.1 Tool Output Design", + "text_level": 1, + "bbox": [ + 112, + 109, + 319, + 124 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.1.1 Three-level format for SearchEntity output", + "text_level": 1, + "bbox": [ + 112, + 130, + 467, + 162 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Once invoked by the LLM agent, the retrieval APIs search for files, classes, methods, and code snippets in the codebase, and return the results back to the agent. To avoid forming very lengthy code context that may containing noisy information to LLM, we return only necessary information as API outputs. To achieve this, we desgined four granular standard output formats (Figure 6): fold, preview, full code.", + "bbox": [ + 112, + 166, + 487, + 309 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.1.2 Tree-based Subgraph Formatting for TraverseGraph Output", + "text_level": 1, + "bbox": [ + 112, + 319, + 468, + 351 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The TraverseGraph tool traverses the code graph and returns a local subgraph for each input entity. The agent reasons about these subgraphs to understand each entity's complex dependencies. However, reasoning about graphs remains challenging for LLMs. Research by (Fatemi et al., 2023) demonstrates that LLM performance varies significantly based on graph formatting (how graphs are encoded as text). This makes the format design for output subgraphs crucial.", + "bbox": [ + 112, + 356, + 487, + 516 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We have developed a new tree-based format, shown in Figure 7, with several features that enhance LLM reasoning: (1) We represent subgraphs as trees, allowing LLMs to use indentation to determine a node's distance from the root, (2) We display complete entity IDs for each node (e.g., django/core-validators.py:RegexValidator) to help LLMs locate nodes easily, and (3) We explicitly specify relation types for each edge, including reversed relations", + "bbox": [ + 112, + 517, + 490, + 676 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "To evaluate how different graph formats impact code localization performance, we conducted an experiment using 37 challenging samples from SWEBench-Lite. These samples were considered \"challenging\" because they could not be solved by any baseline agent methods. Using Claude-3.5 as the Localization Model across all settings, we compared various output formats. Table 9 presents our findings. The baseline output formats we tested are described below:", + "bbox": [ + 112, + 678, + 489, + 837 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "- row: For each line, list one row of the adjacency matrix. For example,", + "bbox": [ + 134, + 851, + 487, + 883 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "function \"fileA.py:funcA\" invokes function \"fileA.py:funcB\", \"fileA.py:funcC\"", + "bbox": [ + 147, + 889, + 487, + 921 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- row (w/ entity attributes): Additionally include entity attributes for format row.", + "- incident: The incident format mentioned in (Fatemi et al., 2023). An integer instead of entity ID is used to represent each node. For example," + ], + "bbox": [ + 529, + 84, + 882, + 190 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Map function \"fileA.py:funcA\" to index 0. Map function \"fileA.py:funcB\" to index 1. Map function \"fileA.py:funcC\" to index 2.", + "bbox": [ + 541, + 196, + 880, + 243 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "function $O$ invokes function 1,2.", + "bbox": [ + 542, + 249, + 779, + 265 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Graphviz DOT: Represent graph in Graphviz DOT language (Ellson et al., 2002).", + "- JSON: Expand the subgraph as a tree, and convert it to JSON format." + ], + "bbox": [ + 531, + 274, + 880, + 348 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "As shown in Table 9, expanding subgraphs as trees (i.e., JSON, tree-based) can significantly improve the performance. Our tree-based format achieves the best overall performance across different levels of localization tasks. We also test returning entity attributes along with subgraphs. We notice that row (w/ entity attributes) consistently underperforms row, indicating the attributes for all the nodes may be very noisy. Besides, although using incident format can simplify the output and show improvements in file-level localization, it degradation the module- and file-level localization.", + "bbox": [ + 507, + 359, + 884, + 551 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.2 Implementation", + "text_level": 1, + "bbox": [ + 507, + 563, + 685, + 577 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "To enable the LLM agent to invoke the Code Localization APIs, we handle the interaction differently based on the LLM's capabilities. For LLMs that support tool-calling features, we define the tools as a list of JSON objects, which are then used as parameters for the API calls. For LLMs that do not support tool-calling (such as Qwen), we provide the description of the API and the expected output as part of the LLM's prompt. When the agent decides to invoke a set of retrieval APIs, it responds with a list of API call names and their corresponding arguments. These retrieval API requests are processed locally by searching over the built code graph. The results from executing these APIs locally are returned to the agent.", + "bbox": [ + 505, + 583, + 882, + 824 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "By default, we query the LLM with a temperature setting of 1.0. We conduct two interactions, after which we rerank the results based on mean reciprocal rank (MRR) scores. We also leverage multiprocessing execution to speed up the process. Since all our tools are read-only, LOCAGENT does", + "bbox": [ + 507, + 825, + 882, + 921 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/e3da4d4339e99b1a14d6f8d73ea975c643cc44ad95a8dc1803fff42294d4f99a.jpg", + "image_caption": [ + "Figure 6: Different output formats designed for efficient agent-code interaction. Left: Full code output when matched entities $\\leq 3$ . Middle: Preview output showing module skeleton for large files. Right: Fold output showing only entity IDs when matches $>3$ ." + ], + "image_footnote": [], + "bbox": [ + 127, + 85, + 366, + 343 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/003dcde246a439f5b9b36cc33df7c37daaa5e4eaf0478ed30be80b30cbe72965.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 374, + 86, + 621, + 343 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/86b464c56ef20eb2e4a58b1077f46f88aa113910e37ea427df9e03b94670489d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 625, + 86, + 872, + 343 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/503072355158697ea6cb6ac7e2712ec0ffc412f1cd70207465e4cb36e21f3c9b.jpg", + "image_caption": [ + "Figure 7: A truncated example of the expanded tree-based format for the output subgraph of tool TraverseGraph." + ], + "image_footnote": [], + "bbox": [ + 268, + 410, + 727, + 636 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "not require a specialized Docker environment to operate.", + "bbox": [ + 112, + 686, + 485, + 717 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B Dataset construction and statistics", + "text_level": 1, + "bbox": [ + 114, + 730, + 445, + 744 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.1 Dataset construction details", + "text_level": 1, + "bbox": [ + 114, + 755, + 381, + 769 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Example collection. We collected examples on popular Python repositories on Github follow (Jimenez et al., 2023). To gather issues related to performance and security, we searched for the keywords listed in Table 10 using the GitHub Search APIs. We then used GPT-4o-2024-0513 as the classifier based on the issue descriptions.", + "bbox": [ + 112, + 776, + 487, + 888 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Ground Truth Locations. The affected files or functions in the original codebase, as identified in", + "bbox": [ + 112, + 889, + 487, + 921 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "the patches, are considered the target locations for the given issue. While it is possible to fix a bug in a location different from the ground truth, the extracted ground-truth locations still serve as approximate targets for localization. Additionally, edited code such as documents, import statements, and comments are excluded from the localization target. These elements are not considered relevant for bug localization, as they do not directly impact the functionality of the code or its execution. By filtering out these elements, the focus is maintained on the core code changes that are relevant for localization.", + "bbox": [ + 507, + 686, + 884, + 892 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/65aa2b242676fd4ce7ba9e60ceadb537140f50dff384a8c6651710edad591512.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Output FormatFile(%)Module(%)Function(%)
Acc@1Acc@3Acc@5Acc@5Acc@10Acc@5Acc@10
row41.1867.6570.5961.7661.7635.2938.24
row (w/ entity attributes)41.1864.7164.7150.0050.0032.3532.35
incident41.1870.5973.5355.8855.8829.4132.35
Graphviz DOT41.1873.5382.3564.7164.7135.2935.29
JSON41.1867.6576.4767.6570.5938.2441.18
tree-based (Ours)47.0679.4179.4164.7164.7138.2441.18
", + "bbox": [ + 157, + 80, + 840, + 225 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/2976dea7e0ff07dfee13b430fc8f8efa94af68a9c7230eb416ed229809e5c751.jpg", + "table_caption": [ + "Table 9: Localization performance under different TraverseGraph output formats." + ], + "table_footnote": [], + "table_body": "
CategoryKeywords
Performancebottleneck, performance improvement, memory usage optimization, time complexity reduction, latency improvement, scalability improvement, CPU usage reduction, caching improvement, concurrency optimization
SecurityOut-of-bounds Write, Out-of-bounds Read, NULL Pointer Dereference, Missing Authorization, memory leak fix, security vulnerability, security issue, authentication bypass, authentication issue, better maintained, buffer overflow, denial of service, security hardening, security patch, unsafe deserialization, Use After Free, Integer Overflow or Wraparound, Uncontrolled Resource Consumption, Missing Authentication for Critical Function
", + "bbox": [ + 157, + 260, + 840, + 439 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 10: We use these Keywords to search for Performance and Security related issues with Github Search APIs.", + "bbox": [ + 112, + 448, + 880, + 464 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C Additional Experiments", + "text_level": 1, + "bbox": [ + 112, + 488, + 359, + 505 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.1 Implementation Details", + "text_level": 1, + "bbox": [ + 112, + 514, + 349, + 530 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.1.1 Baselines Implementation", + "text_level": 1, + "bbox": [ + 112, + 535, + 381, + 551 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Regarding the embedding-based methods in our evaluation, these approaches operate primarily at the function level, where each function is embedded as a separate unit. The function's context (its containing file and class) is appended to the function representation before embedding, rather than being embedded separately. While theoretically these methods could employ hierarchical indexing, the standard implementations we evaluated use flat indexing structures where each function is embedded as a single unit.", + "bbox": [ + 112, + 554, + 489, + 731 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We use OpenHands's remote runtime feature to parallelize evaluation on OpenHands and SWEagent. We use Openhands version 0.12.0 released on Oct 31, 2024.", + "bbox": [ + 112, + 732, + 489, + 796 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.1.2 Quantifying Task Difficulty Based on Code Graph Distance", + "text_level": 1, + "bbox": [ + 112, + 804, + 470, + 837 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We measure task difficulty by computing the average shortest hop distance between the functions mentioned in the issue descriptions and the patched functions within our code graph. Specifically, we first extract potential function names from each", + "bbox": [ + 112, + 841, + 489, + 921 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "issue description using GPT-4o-2024-0513, and identify their corresponding nodes in the code graph using the global dictionary. These identified nodes form the set of predicted nodes, denoted as $\\mathcal{C}$ . Similarly, we link the ground truth functions from the patch to their corresponding nodes in the code graph, forming the set of target nodes, denoted as $\\mathcal{T}$ . To quantify the difficulty $\\delta$ , we calculate the average shortest hop distance between the predicted nodes $\\mathcal{C}$ and the target nodes $\\mathcal{T}$ , defined as:", + "bbox": [ + 507, + 489, + 884, + 650 + ], + "page_idx": 15 + }, + { + "type": "equation", + "text": "\n$$\n\\delta = \\frac {1}{| \\mathcal {C} |} \\sum_ {c \\in \\mathcal {C}} \\frac {1}{m i n _ {t \\in \\mathcal {T}} d (c , t) + 1}\n$$\n", + "text_format": "latex", + "bbox": [ + 576, + 656, + 815, + 693 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where $d(c, t)$ represents the shortest hop distance between nodes $c$ and $t$ in the graph. For performance analysis stratified by difficulty, we round $\\delta$ down to $\\lfloor \\delta \\rfloor$ to group samples by difficulty levels, and we exclude samples where the LLM fails to extract any valid function names.", + "bbox": [ + 507, + 701, + 884, + 797 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.1.3 Training details.", + "text_level": 1, + "bbox": [ + 507, + 804, + 702, + 821 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Fine-tuning Settings. We use Qwen-2.5-Coder-Instruct (Hui et al., 2024) 7B and 32B variants as our base models. We fine-tuned Qwen-2.5-Coder-Instruct 7B and 32B models on 768 training samples from the SWE-Bench training dataset, leveraging LoRA", + "bbox": [ + 507, + 825, + 882, + 921 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/d58f8658622664aa6fdd9f0ba4233e824e52755bf040929bc8346fe186a5d5e3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TypeMethodLoc-ModelFile (%)Module (%)Function (%)
NDCG@1NDCG@3NDCG@5NDCG@5NDCG@10NDCG@5NDCG@10
Embedding-BasedBM25 (Robertson et al., 2009)38.6946.550.6137.3139.8626.1527.92
E5-base-v2 (Wang et al., 2022)49.6464.1966.653.1554.4531.3935.3
Jina-Code-v2 (Günther et al., 2023)43.4359.9363.751.0254.1333.2836.44
Codesage-large-v2 (Zhang et al., 2024)47.8160.8264.3949.3852.2227.0330.74
CodeRankEmbed (Suresh et al., 2024)52.5567.5470.3957.5159.7640.2842.55
Procedure-BasedAgentless (Xia et al., 2024)GPT-4o67.1571.7671.7664.3164.3153.8153.81
Claude-3.572.6376.7276.8767.3667.3657.5557.55
Agent-BasedMoatlessTools (Örwall, 2024)GPT-4o73.3680.0380.3368.5769.0949.7750.62
Claude-3.572.6380.7380.8869.1169.1153.0353.16
SWE-agent (Yang et al., 2024)GPT-4o57.363.9664.1253.9553.9542.3242.44
Claude-3.577.3784.3284.9372.7772.959.6759.79
Openshands (Wang et al., 2025)GPT-4o60.9567.6268.3958.1858.644.3444.66
Claude-3.576.2884.2784.4375.7975.9263.1363.8
LocAgent (Ours)Qwen2.5-7B(ft)70.8079.3680.970.9971.6855.6258.09
Qwen2.5-32B(ft)75.9184.7485.6476.2876.7764.2765.93
Claude-3.577.7486.1987.1477.7378.164.3465.57
", + "bbox": [ + 119, + 82, + 878, + 284 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 11: NDCG scores comparison showing ranking quality of different methods.", + "bbox": [ + 218, + 294, + 776, + 309 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "for efficient adaptation. The training set included 447 samples generated by Claude-3.5, while the remaining samples were iteratively generated using the fine-tuned Qwen2.5-32B model. The fine-tuning process was conducted over 5 epochs with max_token set to $128k$ and a learning rate of $2 \\times 10^{-4}$ .", + "bbox": [ + 112, + 335, + 487, + 447 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D Prompt", + "text_level": 1, + "bbox": [ + 114, + 458, + 220, + 475 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In this section, we go through the prompt template that make up the agent's history.", + "bbox": [ + 112, + 485, + 485, + 517 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Prompt", + "text_level": 1, + "bbox": [ + 127, + 254, + 171, + 265 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Given the following GitHub problem description, your objective is to localize the specific files, classes or functions, and lines of code that need modification or contain key information to resolve the issue.", + "bbox": [ + 127, + 268, + 870, + 287 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Follow these steps to localize the issue:", + "bbox": [ + 127, + 292, + 366, + 302 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Step 1: Categorize and Extract Key Problem Information", + "bbox": [ + 127, + 302, + 458, + 310 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Classify the problem statement into the following categories:", + "Problem description, error trace, code to reproduce the bug, and additional context.", + "- Identify modules in the '{package_name}' package mentioned in each category.", + "- Use extracted keywords and line numbers to search for relevant code references for additional context." + ], + "bbox": [ + 127, + 311, + 724, + 342 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Step 2: Locate Referenced Modules", + "bbox": [ + 127, + 349, + 337, + 357 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Accurately determine specific modules", + "bbox": [ + 139, + 359, + 354, + 365 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- Explore the repo to familiarize yourself with its structure.", + "bbox": [ + 139, + 366, + 495, + 373 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- Analyze the described execution flow to identify specific modules or components being referenced.", + "bbox": [ + 139, + 374, + 707, + 381 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- Pay special attention to distinguishing between modules with similar names using context and described execution flow.", + "bbox": [ + 127, + 382, + 816, + 388 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- Output Format for collected relevant modules:", + "bbox": [ + 127, + 390, + 400, + 397 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- Use the format: 'file path:QualifiedName'", + "bbox": [ + 139, + 398, + 383, + 405 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- E.q., for a function `calculate_sum` in the `MathUtilities` class located in `src/helpers/mathHelpers.py`, represent it as:", + "bbox": [ + 139, + 406, + 840, + 413 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "'src/helpers/mathHelpers.py:MathUtil calculator_sum'.", + "bbox": [ + 156, + 414, + 462, + 422 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "## Step 3: Analyze and Reproducing the Problem", + "bbox": [ + 127, + 428, + 394, + 437 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Clarify the Purpose of the Issue", + "bbox": [ + 139, + 438, + 324, + 445 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- If expanding capabilities: Identify where and how to incorporate new behavior, fields, or modules.", + "bbox": [ + 139, + 445, + 714, + 454 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- If addressing unexpected behavior: Focus on localizing modules containing potential bugs.", + "bbox": [ + 139, + 455, + 663, + 462 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- Reconstruct the execution flow", + "bbox": [ + 127, + 463, + 315, + 470 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- Identify main entry points triggering the issue.", + "bbox": [ + 139, + 470, + 426, + 479 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- Trace function calls, class interactions, and sequences of events.", + "bbox": [ + 139, + 479, + 531, + 486 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- Identify potential breakpoints causing the issue.", + "bbox": [ + 139, + 487, + 433, + 494 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Important: Keep the reconstructed flow focused on the problem, avoiding irrelevant details.", + "bbox": [ + 139, + 495, + 662, + 502 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "## Step 4: Locate Areas for Modification", + "bbox": [ + 127, + 508, + 361, + 517 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- Locate specific files, functions, or lines of code requiring changes or containing critical information for resolving the issue.", + "bbox": [ + 127, + 518, + 873, + 526 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- Consider upstream and downstream dependencies that may affect or be affected by the issue.", + "bbox": [ + 127, + 527, + 657, + 533 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- If applicable, identify where to introduce new fields, functions, or variables.", + "bbox": [ + 127, + 535, + 594, + 542 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- Think Thoroughly: List multiple potential solutions and consider edge cases that could impact the resolution.", + "bbox": [ + 127, + 543, + 766, + 550 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Output Format for Final Results:", + "bbox": [ + 127, + 557, + 329, + 565 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Your final output should list the locations requiring modification, wrapped with triple back ticks", + "bbox": [ + 127, + 567, + 685, + 574 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Each location should include the file path, class name (if applicable), function name, or line numbers, ordered by importance.", + "bbox": [ + 127, + 575, + 850, + 582 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Your answer would better include about 5 files.", + "bbox": [ + 127, + 583, + 400, + 590 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Examples:", + "bbox": [ + 127, + 596, + 205, + 607 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "full_path1/file1.py", + "bbox": [ + 127, + 613, + 240, + 621 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "line: 10", + "bbox": [ + 129, + 623, + 176, + 629 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "class: MyClass1", + "bbox": [ + 129, + 630, + 216, + 638 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "function: my_function1", + "bbox": [ + 129, + 638, + 257, + 646 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "full path2/file2.py", + "bbox": [ + 129, + 653, + 240, + 662 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "line:76", + "bbox": [ + 129, + 663, + 176, + 670 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "function: MyClass2.my_function2", + "bbox": [ + 129, + 670, + 307, + 678 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "full_path3/file3.py", + "bbox": [ + 129, + 684, + 240, + 694 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "line: 24", + "bbox": [ + 129, + 695, + 176, + 702 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "line: 156", + "bbox": [ + 129, + 702, + 183, + 709 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "function: my_function3", + "bbox": [ + 129, + 709, + 257, + 718 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "#", + "bbox": [ + 129, + 718, + 144, + 724 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Return just the location(s)", + "bbox": [ + 127, + 733, + 284, + 741 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Note: Your thinking should be thorough and so it's fine if it's very long.", + "bbox": [ + 127, + 743, + 554, + 751 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Figure 8: The task instruction prompt for LOCAGENT.", + "bbox": [ + 310, + 766, + 682, + 781 + ], + "page_idx": 17 + } +] \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09089/c2c2c95f-facf-4f82-977c-1820c00d4eb0_model.json b/data/2025/2503_09xxx/2503.09089/c2c2c95f-facf-4f82-977c-1820c00d4eb0_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a3e0f59e4c80cc9831ca325b8147e7751be984be --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/c2c2c95f-facf-4f82-977c-1820c00d4eb0_model.json @@ -0,0 +1,3261 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.178, + 0.091, + 0.82, + 0.113 + ], + "angle": 0, + "content": "LocAgent: Graph-Guided LLM Agents for Code Localization" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.136, + 0.898, + 0.176 + ], + "angle": 0, + "content": "Zhaoling Chen\\*, Xiangru Tang\\*, Gangda Deng\\*, Fang Wu\\*, Jialong Wu\\*, Zhiwei Jiang, Viktor Prasanna\\*, Arman Cohan\\*, Xingyao Wang" + }, + { + "type": "text", + "bbox": [ + 0.105, + 0.177, + 0.894, + 0.213 + ], + "angle": 0, + "content": "\\(^{\\spadesuit}\\)Yale University \\(^{\\spadesuit}\\)University of Southern California \\(^{\\spadesuit}\\)Stanford University \\(^{\\spadesuit}\\)All Hands AI xiangru.tang@yale.edu, gangdade@usc.edu, xingyao@all-hands.dev" + }, + { + "type": "title", + "bbox": [ + 0.261, + 0.261, + 0.341, + 0.276 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.288, + 0.461, + 0.743 + ], + "angle": 0, + "content": "Code localization—identifying precisely where in a codebase changes need to be made—is a fundamental yet challenging task in software maintenance. Existing approaches struggle to efficiently navigate complex codebases when identifying relevant code sections. The challenge lies in bridging natural language problem descriptions with the appropriate code elements, often requiring reasoning across hierarchical structures and multiple dependencies. We introduce LOCAGENT, a framework that addresses code localization through graph-based representation. By parsing codebases into directed heterogeneous graphs, LOCAGENT creates a lightweight representation that captures code structures (files, classes, functions) and their dependencies (imports, invocations, inheritance), enabling LLM agents to effectively search and locate relevant entities through powerful multi-hop reasoning. Experimental results on real-world benchmarks demonstrate that our approach significantly enhances accuracy in code localization. Notably, our method with the fine-tuned Qwen-2.5-Coder-Instruct-32B model achieves comparable results to SOTA proprietary models at greatly reduced cost (approximately \\(86\\%\\) reduction), reaching up to \\(92.7\\%\\) accuracy on file-level localization while improving downstream GitHub issue resolution success rates by \\(12\\%\\) for multiple attempts (Pass@10). Our code is available at https://github.com/gersteinlab/LocAgent." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.766, + 0.26, + 0.781 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.791, + 0.489, + 0.888 + ], + "angle": 0, + "content": "Code localization can be viewed as an information retrieval (IR) task that aims to identify relevant code snippets given natural language descriptions (Yu et al., 2025; Yang et al., 2024; Xia et al., 2024). Developers spend up to \\(66\\%\\) of their debugging time (Böhme et al., 2017) understanding code to" + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.256, + 0.883, + 0.445 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.455, + 0.884, + 0.526 + ], + "angle": 0, + "content": "Figure 1: Code localization across four common programming scenarios. Given a codebase and an issue description, the goal of code localization is to identify the relevant code snippets that require modification to resolve the issue." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.535, + 0.885, + 0.904 + ], + "angle": 0, + "content": "make changes, and automated tools often struggle with the same challenge. Poor code localization leads to incomplete fixes, introduces new bugs, and significantly extends development cycles. Unlike traditional retrieval tasks that primarily focus on lexical or semantic matching between queries and documents (Guo et al., 2016, 2020), code localization requires bridging the gap between natural language and programming languages. It also necessitates reasoning capabilities to analyze the issue, while considering the structural and semantic properties of code (Lewis et al., 2020; Guu et al., 2020; Qu et al., 2020). This capability has become fundamental to powerful AI assistants (OpenAI, 2023; Anthropic, 2023), code-aware search engines (PerplexityAI, 2023), and automated programming agents (Cognition.ai, 2024; Wang et al., 2025; Gauthier, 2024). In particular, accurate code localization is crucial for software maintenance and evolution, as it enables precise code modifications for bug fixes, refactoring, and feature additions (Wang et al., 2024), thereby streamlining the development workflow." + }, + { + "type": "text", + "bbox": [ + 0.528, + 0.906, + 0.882, + 0.922 + ], + "angle": 0, + "content": "Existing approaches to code localization face" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.311, + 0.061, + 0.724 + ], + "angle": 270, + "content": "arXiv:2503.09089v2 [cs.SE] 29 Apr 2025" + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.895, + 0.489, + 0.922 + ], + "angle": 0, + "content": "* Equal contribution. This work was done during Zhaoling's time at Yale." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.117, + 0.085, + 0.486, + 0.535 + ], + "angle": 0, + "content": "significant limitations. Dense retrieval methods require maintaining and continuously updating vector representations of the entire codebase (Wang et al., 2023b; Günther et al., 2023), creating engineering challenges for large, evolving repositories where code changes frequently. While LLMs demonstrate strong code understanding capabilities (Kang et al., 2023; Wu et al., 2023), models with large context windows cannot process entire codebases at once, necessitating strategic navigation through relevant parts. Moreover, issue descriptions often mention only symptoms rather than underlying causes. For instance, a report of 'XSS vulnerability in user profile' might require changes to a shared validation utility used throughout the codebase but not explicitly referenced in the issue. This disconnect between issue descriptions and affected code components presents a substantial challenge for traditional retrieval approaches, which struggle to trace implicit dependencies across the codebase structure. Recent agent-based methods attempt to address these limitations through iterative exploration (Yang et al., 2024; Qin et al., 2024) but still struggle to efficiently navigate and comprehend complex code structures and dependencies, particularly when multi-hop reasoning is required to trace from issue descriptions to affected code regions that aren't directly mentioned." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.536, + 0.486, + 0.663 + ], + "angle": 0, + "content": "This raises a key question: How can we design efficient indexing as intermediate representations that are structure-aware and both easy and performant for LLM agents to consume? It is intuitive to design an agentic retrieval system that carefully combines traditional IR methods and LLM agent's reasoning ability to achieve accurate, efficient, and cost-effective code localization in codebases." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.665, + 0.486, + 0.921 + ], + "angle": 0, + "content": "To address this challenge, we propose LOCAGENT, a framework that builds directed heterogeneous graph indexing to unify code structures, dependencies, and contents. Our approach leverages a structured graph representation that enables powerful multi-hop reasoning capabilities, allowing agents to navigate complex dependency relationships between code elements even when target code isn't explicitly mentioned in issue descriptions. This graph-based approach significantly outperforms previous methods on challenging localization tasks that require traversing multiple code relationships. Our lightweight representation, coupled with sparse indexing techniques, enables efficient entity search while maintaining rich structural information. The indexing process typically" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.085, + 0.882, + 0.325 + ], + "angle": 0, + "content": "takes only a few seconds per codebase, making it highly practical for real-time use. The framework integrates a set of unified tools that guide the agent through a systematic exploration of the codebase, allowing autonomous navigation based on contextual needs. Furthermore, by fine-tuning Qwen-2.5-Coder-Instruct (Hui et al., 2024) 7B and 32B models(abbr. as Qwen-2.5-7B and Qwen-2.5-32B respectively), our system achieves performance comparable to state-of-the-art models like Claude-3-5-sonnet-20241022 (Anthropic, 2023) (abbr. as Claude-3.5) while significantly reducing API costs by over \\(80\\%\\) (from \\\\(0.66 to \\\\)0.09 per example), making it practical for real-world deployment." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.327, + 0.882, + 0.63 + ], + "angle": 0, + "content": "Additionally, to facilitate a comprehensive evaluation of code localization methods, we introduce LOC-BENCH, a new benchmark specifically designed for this task. Existing benchmarks like SWE-Bench present significant limitations: (1) they risk contamination through data overlap with LLM training sets (Mündler et al., 2024), and (2) they primarily focus on bug fixing, lacking diversity in maintenance scenarios such as feature requests, performance optimizations, and security fixes. In contrast, LOC-BENCH covers diverse scenarios and mitigates potential contamination concerns by incorporating more recent examples from popular Python repositories collected after known LLM training cutoff dates. Additionally, we provide tooling to continuously update the benchmark with new examples, allowing researchers to maintain a fresh evaluation dataset as models evolve and training data cutoffs advance." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.633, + 0.882, + 0.664 + ], + "angle": 0, + "content": "Our contributions address critical gaps in existing approaches:" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.676, + 0.882, + 0.771 + ], + "angle": 0, + "content": "- We introduce a heterogeneous graph representation that captures both explicit and implicit code relationships, enabling efficient multi-hop reasoning. Our lightweight graph-based indexing process takes only seconds per repository and requires minimal storage." + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.783, + 0.882, + 0.894 + ], + "angle": 0, + "content": "- We design unified tools for agent-based code exploration that leverage our graph representation, allowing LLM agents to perform complex multi-hop navigation and reasoning across code dependencies even when target code isn't explicitly mentioned in issue descriptions." + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.906, + 0.881, + 0.92 + ], + "angle": 0, + "content": "- We introduce Loc-Bench, a new benchmark" + }, + { + "type": "list", + "bbox": [ + 0.536, + 0.676, + 0.882, + 0.92 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "text", + "bbox": [ + 0.15, + 0.085, + 0.488, + 0.182 + ], + "angle": 0, + "content": "specifically designed for code localization that addresses limitations in existing datasets. Unlike previous benchmarks dominated by bug reports, Loc-Bench offers a balanced distribution across bug fixes, feature requests, security patches, and performance optimizations." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.196, + 0.49, + 0.259 + ], + "angle": 0, + "content": "- By fine-tuning open-source models on this task, we reduce the cost of code localization by \\(86\\%\\) while maintaining competitive performance." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.289, + 0.27, + 0.305 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.317, + 0.45, + 0.332 + ], + "angle": 0, + "content": "2.1 Traditional Retrieval-based Methods" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.339, + 0.49, + 0.66 + ], + "angle": 0, + "content": "Traditional IR methods rely on lexical or semantic matching to return ranked lists of code snippets. Sparse retrievers, such as BM25 (Robertson et al., 1994, 2009), have demonstrated robustness to domain adaptation. Dense retrievers utilize embeddings for improved semantic searching, including models with open checkpoints such as general text embedding models E5-base-v2 (Wang et al., 2022) and proprietary APIs (VoyageAI, 2024). Code embedding models such as Jina-Code-v2 (Günther et al., 2023), Codesage-large-v2 (Zhang et al., 2024), and CodeRankEmbed (Suresh et al., 2024), trained specifically for code related tasks, showing significant performance in Code2Code and NL2Code semantic search tasks. However, while the embedding models themselves are small, the engineering challenges of maintaining these indexing systems (e.g., storage requirements, update mechanisms, and infrastructure maintenance) make them difficult to adapt to fast-evolving codebases." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.674, + 0.486, + 0.689 + ], + "angle": 0, + "content": "2.2 LLM-based Generative Retrieval Methods" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.697, + 0.49, + 0.921 + ], + "angle": 0, + "content": "Recently, LLMs with advanced code reasoning capabilities have demonstrated superior performance by directly processing queries and raw code for code localization (Kang et al., 2023; Wu et al., 2023; Xia et al., 2024; Kang et al., 2024). For example, Agentless (Xia et al., 2024), initially designed for automated program repair, uses a simplistic hierarchical localization process powered by LLM. It employs a straightforward three-phase approach that first localizes relevant code sections before attempting to fix the identified issues, challenging the assumption that complex agent architectures are necessary for effective code understanding and modification tasks." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.438 + ], + "angle": 0, + "content": "Expanding on these techniques, agent-based methods utilize multi-step reasoning to enable automated codebase traversal. Specifically, OpenHands (Wang et al., 2025) implements a generalist coding agent that supports bash commands like grep and tools for viewing files. SWE-Agent (Yang et al., 2024) integrates a custom Agent-Computer Interface to support agents to navigate entire repositories. MoatlessTools (Örwall, 2024) combines an agentic searching loop and semantic search to obtain code locations. However, existing agent-based methods face two critical limitations: (a) they primarily navigate codebases through directory traversal rather than understanding semantic relationships, (b) and they struggle to extract and reason about complex cross-file dependencies when these relationships aren't explicitly represented in the repository structure. This significantly impairs their ability to locate code that requires modification when the issue involves interactions between structurally distant components in the codebase." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.45, + 0.829, + 0.481 + ], + "angle": 0, + "content": "2.3 Graph-based Code Representation Methods" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.487, + 0.885, + 0.791 + ], + "angle": 0, + "content": "Due to the inherent structure of code, several works have employed graph-based representations to improve code understanding by capturing key relationships between components. Aider (2023) constructs a RepoMap and uses a graph ranking algorithm to identify the most significant contextual elements. Similarly, as a plugin, RepoGraph (Ouyang et al., 2025) performs subgraph retrieval – extracting an ego-network of relevant lines and their neighbors – to provide structured context. CodexGraph (Liu et al., 2024) indexes the repository into a Neo4j graph database, where LLM agents query the database precisely using Cypher. The efficiency of its retrieval process depends heavily on the querying capabilities of the LLM. These methods focus primarily on providing relevant context but do not enhance the traversal process itself, as they do not explicitly model directory structure or file hierarchies." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.794, + 0.885, + 0.922 + ], + "angle": 0, + "content": "In contrast, RepoUnderstander (Ma et al., 2024) builds hierarchical and function-call graphs, using Monte Carlo Tree Search (MCTS) guided by an LLM for exploration. While thorough, MCTS introduces extra computational overhead, making it less efficient than simpler traversal methods like BFS, particularly in large repositories. OrcaLoca (Yu et al., 2025) uses a simplified graph" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.115, + 0.083, + 0.885, + 0.321 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.347, + 0.884, + 0.404 + ], + "angle": 0, + "content": "Figure 2: Overview of LOCAGENT framework. LOCAGENT first parses the given codebase to build a graph-based code representation with various types of entities and relations. It then constructs sparse indexes for exploring structures and searching content. Using these indexes, it performs agent-guided searches that combine the graph and tools." + }, + { + "type": "table", + "bbox": [ + 0.119, + 0.418, + 0.882, + 0.525 + ], + "angle": 0, + "content": "
MethodRelation TypesNode TypesSearch/Traversal Strategy
ContainImportInheritInvokeDirectoryFileClassFunction
CodexGraph(Liu et al., 2024)XXXCypher queries
RepoGraph(Ouyang et al., 2025)XXXEgo-graph retrieval
RepoUnderstander(Ma et al., 2024)XMCTS
OrcaLoca(Yu et al., 2025)XXSimple search tools
LOCAGENT(Ours)Unified retrieval tools
" + }, + { + "type": "table_caption", + "bbox": [ + 0.263, + 0.533, + 0.733, + 0.548 + ], + "angle": 0, + "content": "Table 1: Comparison of Graph-Based Code Representation Methods." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.563, + 0.49, + 0.708 + ], + "angle": 0, + "content": "enhanced by priority scheduling and context pruning. It maintains efficient search but may miss complex invocation dependencies. Table 1 summarizes the differences between these methods and LOCAGENT. Compared to these approaches, LOCAGENT offers a more comprehensive and unified representation of the repository, along with efficient, unified retrieval tools specifically designed for LLM consumption." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.719, + 0.401, + 0.735 + ], + "angle": 0, + "content": "3 The LOCAGENT Framework" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.745, + 0.491, + 0.922 + ], + "angle": 0, + "content": "We introduce LOCAGENT, a graph-oriented LLM-agent framework for code localization. Figure 2 illustrates the overall framework. When given a repository, LOCAGENT can locate all the relevant code sections at various granularities (file, class, function, or line level) for different types of GitHub issues (such as bug reports, feature requests, performance bottlenecks, and security vulnerabilities) through automated in-depth exploration and analysis of the codebase. Section 3.1 proposes a novel graph-based indexing approach as an intermediate" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.563, + 0.883, + 0.627 + ], + "angle": 0, + "content": "representation for codebases. Section 3.2 presents our agent-based code search on the indexes and Section 3.3 describes our model fine-tuning and distillation process." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.641, + 0.83, + 0.658 + ], + "angle": 0, + "content": "3.1 Graph-based Code Representation" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.663, + 0.885, + 0.759 + ], + "angle": 0, + "content": "Codebases contain rich structural information, both explicit and implicit, that is essential for agent reasoning. Building on this insight, we develop a graph-based indexing that comprehensively captures codebase relationships while maintaining a granularity suitable for LLM-agents to retrieve." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.761, + 0.886, + 0.922 + ], + "angle": 0, + "content": "Code Graph Construction. We construct a heterogeneous directed graph \\(\\mathcal{G}(\\mathcal{V},\\mathcal{E},\\mathcal{A},\\mathcal{R})\\) to index the codebase, where \\(\\nu = \\{v_{i}\\}_{i = 1}^{n}\\) is the node set and \\(\\mathcal{E}\\subseteq \\mathcal{V}\\times \\mathcal{V}\\) is the edge set. Each node \\(v\\in \\mathcal{V}\\) and edge \\(e\\in \\mathcal{E}\\) has an associated type mapping function. For nodes, \\(\\tau (v):\\mathcal{V}\\to \\mathcal{A}\\) maps to types \\(\\mathcal{A} = \\{\\mathrm{directory},\\mathrm{file},\\mathrm{class},\\mathrm{function}\\}\\). For edges, \\(\\phi (e):\\mathcal{E}\\rightarrow \\mathcal{R}\\) maps to relationships \\(\\mathcal{R} = \\{\\mathrm{contain},\\mathrm{import},\\mathrm{invoke},\\mathrm{inherit}\\}\\). In this paper, we focus our study on Python reposito" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.486, + 0.117 + ], + "angle": 0, + "content": "ries and leave codebases with other programming languages as future work." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.119, + 0.489, + 0.28 + ], + "angle": 0, + "content": "First, we include all directories and Python files as nodes. Then, we parse each Python file using the abstract syntax tree (AST) to identify inner functions and classes recursively as nodes. We set the function level as the smallest node granularity and use each function's code content as the document for agent retrieval. This approach creates a good balance of information density between the index and documents, allowing LLMs to reason effectively within their context window limitations." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.283, + 0.49, + 0.459 + ], + "angle": 0, + "content": "As shown in Figure 2, all nodes with different types can be connected as a single tree using the contain relationship. This structure supports standard codebase-navigation operations from existing works. Our code graph further incorporates more advanced codebase relationships as edges: (1) the invoke relationship from function/class to function/class, where an invoke to a class represents class instantiation; (2) the import relationship from file to function/class; and (3) the inherit relationship between classes." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.462, + 0.49, + 0.784 + ], + "angle": 0, + "content": "Sparse Hierarchical Entity Indexing. We treat nodes in our code graph as entities and build hierarchical indexing based on their contents. For each keyword, we lookup the indexes from top to bottom: (1) We build an entity ID index as a unique identifier for each node using its fully qualified name. For example, a function calculate_sum in the MathUtils class located in src/utils.py would be represented as: src/utils.py:MathUtilscalculate_sum. (2) We construct a global dictionary to map the entity name (e.g., calculate_sum) to all nodes that share the same name. (3) We index entity IDs through an inverted index (i.e., BM25) to handle keyword searches that don't exactly match the IDs or names of entities. (4) For cases where input keywords aren't part of the entities' IDs (e.g., when a keyword refers to a global variable), we build an inverted index that maps code chunk(s) to each entity to cover all possible matches." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.793, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Remark. Rather than relying solely on directory structures or hierarchical module indexing, our approach captures module dependencies that transcend directory boundaries. Two modules in distant directories (A and B) may appear unrelated in traditional navigation, but if they invoke each other or share inheritance, they're syntactically close in our graph representation. This syntactic" + }, + { + "type": "table", + "bbox": [ + 0.515, + 0.082, + 0.881, + 0.238 + ], + "angle": 0, + "content": "
Tool NameInput ParamsOutput
SearchEntityKeywordsRelated Entities with Code Snippets
TraverseGraphStart Entity IDs Direction Traverse Hops Entity Types Relation TypesTraversed Subgraph, including Entities and Relations
RetrieveEntityEntity IDsComplete Code of Specified Entities
" + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.246, + 0.883, + 0.276 + ], + "angle": 0, + "content": "Table 2: List of unified APIs provided by LocAgent for code search and exploration." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.287, + 0.884, + 0.382 + ], + "angle": 0, + "content": "proximity is essential for code localization because issues typically manifest through call relationships rather than directory structure. By capturing these functional dependencies, our approach efficiently identifies related components even when physically distant in the codebase." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.398, + 0.767, + 0.413 + ], + "angle": 0, + "content": "3.2 Agent-guided Code Search" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.42, + 0.884, + 0.612 + ], + "angle": 0, + "content": "We develop tools based on the indexes built offline. During runtime, LOCAGENT takes issue statements as input and launches agents that autonomously use tools to localize target code sections. While the agent may iteratively invoke multiple tools internally to explore the codebase, LOCAGENT presents a simplified interface to users, requiring only a single-turn interaction—users submit an issue statement and receive localization results without additional input. This autonomous, self-contained workflow makes LOCAGENT both easy to deploy and highly practical for real-world use." + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.615, + 0.885, + 0.823 + ], + "angle": 0, + "content": "Tool Design for Codebase Exploration. Recent works (Örwall, 2024; Wang et al., 2025), inspired by GUI-based IDEs, have developed numerous specialized tools for agents to explore codebases. However, these tools are initially designed for human readability, which sacrifices the compactness and efficiency that LLM agents prefer (Yang et al., 2024). Building upon our graph-based code representation, we can develop tools that support efficient higher-order codebase exploration to address these challenges. We unify all codebase navigation, search, and view operations into three tools (Table 2), introduced as follows." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.825, + 0.884, + 0.922 + ], + "angle": 0, + "content": "SearchEntity: This tool searches codebases using keywords to locate relevant entities through our Hierarchical Entity Index. When an exact match isn't found in the upper index, the system performs a fuzzy search using the lower index. For each entity found, we return its code snippet in three detail" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.488, + 0.132 + ], + "angle": 0, + "content": "levels: fold, preview, and full code (Figure 6). This effectively prevents lengthy code context and reduces noise fed into agents." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.134, + 0.49, + 0.535 + ], + "angle": 0, + "content": "**TraverseGraph:** This tool performs a type-aware breadth-first search (BFS) on the code graph, starting from input entities and allowing control over both traversal direction and number of hops. This supports agents to perform arbitrary multi-hop codebase navigation through only one action, significantly improving the efficiency compared with existing agent systems. Note that by allowing agents to select entity types and relation types for each traversal, this tool effectively leverages the LLM agents' coding expertise to generate proper meta paths—a crucial element for heterogeneous graph analysis (Lv et al., 2021). For example, by specifying entity types to {class, function} and relation types to {contain, inherit}, this tool returns the UML diagram. Additionally, we design an expanded tree-based format for the output subgraph that encodes both relation types and directions (Figure 7). (Fatemi et al., 2023) demonstrates that LLM performance on graph reasoning depends on the input graph format. Converting a graph into a tree structure encodes topology through the spatial distance between entity names, thereby deriving better performance. For detailed comparisons with alternative graph formats, please see Appendix A.1.2." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.536, + 0.489, + 0.599 + ], + "angle": 0, + "content": "RetreiveEntity: This tool retrieves complete entity attributes for each input entity ID, including essential information such as file path, line number, and code content." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.6, + 0.49, + 0.776 + ], + "angle": 0, + "content": "Chain-of-Thought Agent Planning. We use chain-of-thought (CoT) prompting (shown in Appendix D) to guide the agent in solving code localization problems step by step. The agent systematically follows these steps: (1) Keyword extraction. The agent begins by breaking down the issue statement into different categories and then extracts relevant keywords that are closely related to the problem. (2) Linking keywords to code entities. The agent invokes SearchEntity to complete and clarify each extracted keyword." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.778, + 0.489, + 0.921 + ], + "angle": 0, + "content": "(3) Generate the logical flow from fault to failure. The agent first identifies the entry points that trigger the problem. Then, it iteratively traverse the codebase with TraverseGraph, retrieves code contents with RetrieveEntity, and searches new keywords with SearchEntity. Finally, it generates the logic flow based on the issue and additional context. (4) Locate the target entities. The agent pinpoints all suspicious code entities that need modification" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.882, + 0.116 + ], + "angle": 0, + "content": "based on the logic flow. Then, it ranks these entities based on their relevance." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.118, + 0.884, + 0.31 + ], + "angle": 0, + "content": "Confidence Estimation Based on Consistency. After generating a complete ranked list of candidate entities, to obtain a more consistent ranking, we measure the consistency (Wang et al., 2023a) of the LLM's predictions across multiple iterations. Specifically, we use the Reciprocal Rank as the initial confidence score for each predicted location. We then aggregate the scores for each entity across iterations to compute its final confidence score. The intuition behind this approach is that if the LLM consistently ranks a location higher in multiple iterations, it is more likely to be relevant." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.323, + 0.809, + 0.339 + ], + "angle": 0, + "content": "3.3 Open-source Model Fine-tuning" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.344, + 0.884, + 0.649 + ], + "angle": 0, + "content": "Given the high costs of proprietary LLM APIs and data security concerns, we fine-tuned open-source models to improve their code localization capabilities and enable local deployment. We collect 433 successful trajectories generated with Claude-3.5, where the agent completed tasks from the SWEBench training set. Due to budget constraints, we sample an additional 335 trajectories generated by the initially fine-tuned Qwen2.5-32B model. Importantly, we only select successful trajectories where the model correctly localized the issues, creating a high-quality dataset of correct reasoning paths. These successful examples are then used to refine the same 32B model further, reinforcing effective reasoning patterns through this self-improvement loop. The entire dataset, combining both Claude-3.5 trajectories and successful Qwen2.5-32B samples, was then used to distill knowledge to a smaller 7B model." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.651, + 0.884, + 0.747 + ], + "angle": 0, + "content": "To fine-tune the smaller model, we employ Supervised Fine-Tuning (SFT) with LoRA (Hu et al., 2021). Our experiments show that this straightforward distillation method significantly enhances the performance of smaller models. See Appendix C.1.3 for more training details." + }, + { + "type": "title", + "bbox": [ + 0.508, + 0.761, + 0.865, + 0.792 + ], + "angle": 0, + "content": "4 LOC-BENCH: A New Benchmark for Code Localization" + }, + { + "type": "title", + "bbox": [ + 0.508, + 0.804, + 0.8, + 0.819 + ], + "angle": 0, + "content": "4.1 Revisiting Existing Benchmark" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.825, + 0.884, + 0.922 + ], + "angle": 0, + "content": "SWE-Bench(Jimenez et al., 2023) is a widely used benchmark that collects GitHub issues and corresponding code patches that resolve them. Xia et al. (2024); Suresh et al. (2024) adapt its subset, SWE-Bench-Lite, for code localization, treating the patched files and functions as the targets." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.117, + 0.085, + 0.49, + 0.39 + ], + "angle": 0, + "content": "However, existing datasets, including SWE-Bench, present challenges for effectively evaluating code localization methods. First, they are at risk of contamination, as they may include data overlapping with the repositories or issues used by modern models during pre-training. Second, existing datasets are not specifically designed for code localization (Tomassi et al., 2019). SWE-Bench, for instance, was created primarily to evaluate end-to-end bug-fixing capabilities, with localization being only an implicit intermediate step. This focus results in datasets dominated by bug reports (85% of SWE-Bench-Lite examples) while severely underrepresenting other common software maintenance tasks such as feature requests (14%), security vulnerabilities (1%), and performance optimizations (0%). This imbalance fails to capture the diverse localization challenges faced in real-world software development." + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.402, + 0.326, + 0.416 + ], + "angle": 0, + "content": "4.2 Dataset Construction" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.422, + 0.488, + 0.597 + ], + "angle": 0, + "content": "To address the limitations of existing benchmarks, we introduce LOC-BENCH, a new dataset specifically designed for code localization. This dataset collects up-to-date issues from Python repositories to mitigate the influence of pre-training bias in the latest LLMs. Additionally, LOC-BENCH covers wider categories, including bug reports, feature requests, security, and performance issues, enabling a more comprehensive evaluation of code localization methods. The statistics of LOC-BENCH are shown in Table 3." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.599, + 0.488, + 0.775 + ], + "angle": 0, + "content": "For the Bug Report category, we collect GitHub issues created after October 2024, which is later than the release dates of most modern LLMs. To enrich the dataset with more instances of security and performance issues, we use the GitHub Search API to search for relevant keywords, such as \"latency improvement\" for performance-related issues. We exclude instances that involve modifying more than five Python files or more than ten functions in the corresponding patch. For further details, see Appendix B.1." + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.787, + 0.258, + 0.803 + ], + "angle": 0, + "content": "5 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.81, + 0.488, + 0.92 + ], + "angle": 0, + "content": "Our experiments aim to evaluate four key aspects of LOCAGENT: (1) the effectiveness of our graph-based representation and tooling for code localization compared to existing methods, (2) the performance of fine-tuned open-source models as cost-effective alternatives to proprietary LLMs, (3) a detailed analysis of how performance varies across" + }, + { + "type": "table", + "bbox": [ + 0.525, + 0.083, + 0.869, + 0.266 + ], + "angle": 0, + "content": "
DatasetCategory#Sample
SWE-Bench-Lite (Total = 300)Bug Report254
Feature Request43
Security Issue3
Performance Issue0
Loc-Bench (Totoal = 560)Bug Report242
Feature Request150
Security Issue29
Performance Issue139
" + }, + { + "type": "table_caption", + "bbox": [ + 0.512, + 0.277, + 0.882, + 0.305 + ], + "angle": 0, + "content": "Table 3: Distribution of samples across different categories in the SWE-Bench-Lite and Loc-Bench datasets." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.318, + 0.882, + 0.428 + ], + "angle": 0, + "content": "task categories, and (4) the contribution of each component in our framework through comprehensive ablation studies. We evaluate on both SWE-Bench-Lite and our introduced Loc-Bench dataset. Additionally, we examine the impact of improved localization on downstream software maintenance tasks." + }, + { + "type": "title", + "bbox": [ + 0.512, + 0.434, + 0.729, + 0.45 + ], + "angle": 0, + "content": "5.1 Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.456, + 0.882, + 0.567 + ], + "angle": 0, + "content": "Datasets. We first conduct experiments on SWEBench-Lite, treating the patched files and functions as the targets for localization. Following Suresh et al. (2024), we excluded examples where no existing functions were modified by the patch, ultimately retaining 274 out of the original 300 examples." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.569, + 0.882, + 0.856 + ], + "angle": 0, + "content": "Metrics. To assess performance, we use a modified accuracy metric inspired by R-Precision from information retrieval, following Agentless(Xia et al., 2024). To assess performance, we use Acc@k (Accuracy at k) as our evaluation metric, following Agentless(Xia et al., 2024). For each example, we select the top-k predicted locations and consider a localization attempt successful only if all relevant locations are correctly identified within these top-k predictions. This approach measures the ability to fully identify all necessary code sections that require modification. We report results across multiple \\( k \\) values: file localization at Acc@1, Acc@3, and Acc@5, and function localization at Acc@5 and Acc@10. Additionally, to provide a more relaxed evaluation criteria, we assess module localization, which only requires finding any function within the patched class." + }, + { + "type": "title", + "bbox": [ + 0.512, + 0.869, + 0.627, + 0.883 + ], + "angle": 0, + "content": "5.2 Baselines" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.891, + 0.882, + 0.92 + ], + "angle": 0, + "content": "We evaluate LOCAGENT against three categories of competitive baselines: (a) Retrieval-based meth" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.12, + 0.065, + 0.883, + 0.301 + ], + "angle": 0, + "content": "
TypeMethodLoc-ModelFile (%)Module (%)Function (%)
Acc@1Acc@3Acc@5Acc@5Acc@10Acc@5Acc@10
Embedding-BasedBM25 (Robertson et al., 1994)38.6951.8261.6845.2652.9231.7536.86
E5-base-v2 (Wang et al., 2022)49.6474.4580.2967.8872.2639.4251.09
Jina-Code-v2 (Günther et al., 2023)43.4371.1780.2963.5072.6342.3452.19
Codesage-large-v2 (Zhang et al., 2024)47.8169.3478.1060.5869.7133.9444.53
CodeRankEmbed (Suresh et al., 2024)52.5577.7484.6771.9078.8351.8258.76
Procedure-BasedAgentless (Xia et al., 2024)GPT-4o67.1574.4574.4567.1567.1555.4755.47
Claude-3.572.6379.2079.5668.9868.9858.7658.76
Agent-BasedMoutlessTools (Örwall, 2024)GPT-4o73.3684.3185.0474.8276.2857.3059.49
Claude-3.572.6385.7786.1376.2876.2864.6064.96
SWE-agent (Yang et al., 2024)GPT-4o57.3064.9668.9858.0358.0345.9946.35
Claude-3.577.3787.2390.1577.7478.1064.2364.60
Openhands (Wang et al., 2025)GPT-4o60.9571.9073.7262.4163.8749.6450.36
Claude-3.576.2889.7890.1583.2183.5868.2570.07
LOCAGENT (Ours)Qwen2.5-7B(ft)70.8084.6788.3281.0282.8564.2371.53
Qwen2.5-32B(ft)75.9190.5192.7085.7787.2371.9077.01
Claude-3.577.7491.9794.1686.5087.5973.3677.37
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.31, + 0.885, + 0.382 + ], + "angle": 0, + "content": "Table 4: Performance comparison with baseline methods on code localization on SWE-bench lite. Results show the accuracy at file, module, and function levels. For Agent-Based methods, we use GPT-4o-2024-0513 (abbr. as GPT-4o) and Claude-3-5-sonnet-20241022 (abbr. as Claude-3.5) as the localization model. Additionally, the performance of our fine-tuned open-source models, Qwen2.5-7B(ft) and Qwen2.5-32B(ft), are included for comparison." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.39, + 0.492, + 0.729 + ], + "angle": 0, + "content": "ods: We include the sparse retrieval approach BM25 (Robertson et al., 1994) and several state-of-the-art embedding models, including the general-purpose E5-base-v2 (Wang et al., 2022) and specialized code embedding models such as JinaCode-v2 (Günther et al., 2023), Codesage-large-v2 (Zhang et al., 2024), and the current SOTA code embedding model CodeRankEmbed (Suresh et al., 2024). Proprietary embedding solutions were excluded due to API costs. (b) Procedure-based methods: We compare against Agentless (Xia et al., 2024), which employs a structured hierarchical approach to code localization without complex agent architectures. (c) Agent-based methods: We include several advanced agent frameworks designed for code exploration and modification, specifically OpenHands (Wang et al., 2025) (using its default CodeActAgent implementation), SWE-Agent (Yang et al., 2024), and MoatlessTools (Örwall, 2024). For implementation details, please refer to Appendix C.1.1." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.74, + 0.473, + 0.755 + ], + "angle": 0, + "content": "5.3 Evaluation Results on SWE-Bench-Lite" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.761, + 0.491, + 0.922 + ], + "angle": 0, + "content": "As shown in Table 4, Agent-Based methods consistently outperform other approaches, and our method demonstrates competitive performance by achieving the best results across all levels of code localization. Unlike traditional retrieval-based methods, Agentless identifies only a limited number of locations due to its narrow repository scope, which hinders performance gains when considering a broader set of candidates. The results of the NDCG are presented in Table 11 in the Appendix." + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.385, + 0.872, + 0.512 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.516, + 0.872, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.653, + 0.885, + 0.741 + ], + "angle": 0, + "content": "Figure 3: Performance analysis at different difficulty levels for file- and function-level localization. All agent-based methods and Agentless use Claude-3.5 as the localization model. Hop \\(N\\) refers to the distances between functions mentioned in the issue description and the ground truth patch on our code graph." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.761, + 0.885, + 0.922 + ], + "angle": 0, + "content": "To further analyze the results, we examine performance across different task difficulty levels. We measure the task difficulty by calculating the shortest hops between the functions mentioned in the issue descriptions and the patched functions on our code graph (See Appendix C.1.2 for more details). As shown in Figure 3, performance decreases for all methods as the task becomes more challenging. However, Agent-based methods demonstrate better robustness as the difficulty increases, with" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.127, + 0.076, + 0.476, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.226, + 0.49, + 0.298 + ], + "angle": 0, + "content": "Figure 4: Comparison of performance between the original and fine-tuned Qwen models. The metrics used are file-level Acc@5 and module/function-level Acc@10. Dashed lines represent the performance of the Claude-3.5 model for reference." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.31, + 0.49, + 0.504 + ], + "angle": 0, + "content": "our method maintaining competitive performance across various difficulty levels. Retrieval-based methods, such as E5-Base-v2 and CodeRankEmbed, perform poorly at the function level, even when the patched functions are explicitly mentioned in the query. This is because they treat the query as a whole, failing to capture fine-grained details. Agentless performs even worse than retrieval-based methods when exploration beyond the query is needed (\\(hop \\geq 0\\)) due to its simplistic localization process and limited view focused only on the repository structure." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.515, + 0.414, + 0.53 + ], + "angle": 0, + "content": "5.4 Fine-tuned Open-source Models" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.535, + 0.49, + 0.809 + ], + "angle": 0, + "content": "Figure 4 demonstrates that after fine-tuning, both the 7B and 32B models show significant improvements on this task. LOCAGENT with finetuned Qwen-2.5-Coder-Instruct-32B (abbreviated as Qwen2.5-32B(ft)) achieves performance comparable to Claude-3.5, and LOCAGENT with Qwen2.5-7B(ft) also delivers results on par with that obtained using GPT-4o. As shown in Table 4, our method with Qwen2.5-32B(ft) outperforms nearly all baselines, including those that use larger and more powerful LLMs. The original 7B model performs poorly due to its limited tool-use capability (Chen et al., 2024). These results validate the feasibility of deploying our fine-tuned open-source models as promising alternatives to proprietary APIs, especially in resource-constrained applications." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.82, + 0.312, + 0.837 + ], + "angle": 0, + "content": "5.5 Efficiency Analysis" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.842, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Table 5 presents an efficiency analysis comparing agent-based methods in terms of cost and the number of agent interactions required. MoatlessTools demonstrates good cost-efficiency and requires relatively fewer rounds of interaction. However, the" + }, + { + "type": "table", + "bbox": [ + 0.508, + 0.082, + 0.875, + 0.215 + ], + "angle": 0, + "content": "
MethodLM#RoundCost($)Acc@10
Cost
MoatlessToolsGPT-4o50.461.3
Claude-3.550.461.4
SWE-agentGPT-4o80.560.8
Claude-3.590.671.0
OpenhandsGPT-4o150.830.6
Claude-3.5130.790.9
OursClaude-3.570.661.2
Qwen2.5-7B(ft)60.0513.2
Qwen2.5-32B(ft)90.098.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.219, + 0.883, + 0.291 + ], + "angle": 0, + "content": "Table 5: Efficiency analysis comparing the average cost and number of agent interaction rounds required by different methods. The cost-efficiency of each method is evaluated using the ratio of function-level Acc@10 to average cost." + }, + { + "type": "table", + "bbox": [ + 0.512, + 0.302, + 0.879, + 0.442 + ], + "angle": 0, + "content": "
Model SettingFile Acc@5Module Acc@10Function Acc@10
Ours88.3282.8571.53
w/o TraverseGraph86.1378.4766.06
Relation Types: contain86.5079.5666.42
Traverse Hops: 186.8680.2966.79
w/o RetrieveEntity87.5981.3969.34
w/o SearchEntity68.9861.3153.28
w/o BM25 index75.1868.9860.22
" + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.452, + 0.884, + 0.525 + ], + "angle": 0, + "content": "Table 6: The ablation study of our model. The metrics used here are file-level Acc@5, module-level Acc@10, and function-level Acc@10. The impact of removing or fixing components is analyzed to observe how each component contributes to the overall accuracy." + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.548, + 0.885, + 0.821 + ], + "angle": 0, + "content": "dense embeddings it uses make it difficult and slow to adapt to fast-evolving codebases. SWE-agent and Openhands also show moderate costs but still do not match the efficiency of LOCAGENT. For LOCAGENT with Claude-3.5, although more rounds of interaction are required, the cost remains lower than that of Openhands, illustrating the token efficiency of our tool's outputs. LOCAGENT with fine-tuned Qwen models stands out for its superior efficiency1. Qwen2.5-7B(ft) is the most cost-efficient option, requiring only $0.05 per example, while Qwen2.5-32B(ft) offers a more cost-effective alternative to Claude-3.5. These results highlight the potential of fine-tuned open-source models as efficient alternatives, providing an optimal balance of cost-effectiveness and performance that surpasses other methods." + }, + { + "type": "page_footnote", + "bbox": [ + 0.508, + 0.848, + 0.883, + 0.922 + ], + "angle": 0, + "content": "1We calculate the cost based on the prices from AI inference providers (Hyperbolic, 2025; artificialanalysis.ai, 2025). Specifically, for the Qwen2.5-32B(ft) model, the cost is $0.20/1M tokens for both input and output. For the Qwen2.5-7B(ft) model, the cost is $0.14/1M tokens for input and $0.28/1M tokens for output." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.085, + 0.279, + 0.1 + ], + "angle": 0, + "content": "5.6 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.108, + 0.489, + 0.186 + ], + "angle": 0, + "content": "We conduct an ablation study to evaluate the effectiveness of each component of our toolsets. Due to budget constraints, we use the fine-tuned Qwen-2.5-7B as the localization model for these experiments." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.189, + 0.489, + 0.317 + ], + "angle": 0, + "content": "(1) Each tool in our toolset plays a critical role in code localization performance. As shown in Table 6, removing any tool, especially the SearchEntity tool, leads to varying degrees of accuracy degradation, particularly in module and function level localization. This highlights the critical role each tool plays in identifying relevant modules and functions." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.32, + 0.49, + 0.526 + ], + "angle": 0, + "content": "(2) The graph structure provides essential information for accurate code localization. Removing TraverseGraph tool decreases module and function level performance since the agent cannot obtain any structure information about the codebase and relies on reasoning capability to identify call relationship or directory structure. Adding contain relationship provides only marginal improvements compared to fully removing TraverseGraph, emphasizing the importance of the other three relationship types and explaining why our method surpasses others relying only on the repository structure." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.529, + 0.489, + 0.641 + ], + "angle": 0, + "content": "(3) Multi-hop exploration is crucial for deep code understanding. When compared to the full setting, fixing \\( Hops = 1 \\) leads to a moderate decline in file and module-level accuracy, but it causes a more significant decrease in function-level accuracy, underscoring the importance of multi-hop exploration for identifying relevant entities." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.643, + 0.489, + 0.755 + ], + "angle": 0, + "content": "(4) Sparse indexing significantly enhances localization performance. Removing SearchEntity tool, or even partial removal of its index, causes a substantial drop in performance across all metrics. This demonstrates the effectiveness of building a sparse index on our code graph for improving localization performance." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.189, + 0.49, + 0.755 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.77, + 0.421, + 0.785 + ], + "angle": 0, + "content": "5.7 Evaluation Results on Loc-Bench" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.793, + 0.489, + 0.922 + ], + "angle": 0, + "content": "To ensure the robustness and generalization of our methods and fine-tuned Qwen models, and to eliminate potential data leakage, we evaluate our new dataset. Since Loc-Bench includes examples that edit 1 to 5 files, we assess file localization at top-5 and top-10 ranks, and function/module localization at top-10 and top-15 ranks. Table 7 shows that our fine-tuned Qwen2.5-7B model exhibits strong gen" + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.08, + 0.872, + 0.196 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.2, + 0.872, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.325, + 0.883, + 0.382 + ], + "angle": 0, + "content": "Figure 5: Performance analysis at different difficulty category for file- and function-level localization. All agent-based baselines and Agentless use Claude-3.5 as the localization model." + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.4, + 0.884, + 0.544 + ], + "angle": 0, + "content": "eralization capabilities, maintaining competitive performance compared to SWE-agent using more expensive and strong model. These results highlight the practicality of the fine-tuned Qwen2.5-7B model for real-world applications. Despite being an open-source alternative, it achieves a performance comparable to Claude-3.5, supporting its feasibility as a cost-effective substitute for commercial models in practical scenarios." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.546, + 0.884, + 0.803 + ], + "angle": 0, + "content": "Additionally, we evaluate the performance across four different difficulty categories. Figure 5 clearly shows that our method outperforms other methods in almost all categories of code localization. However, it also highlights a noticeable decrease in performance across the other three categories compared to the Bug Report category. This performance gap likely reflects our training data distribution, which contained more bug report examples, potentially leading to scaffolds better optimized for bug localization tasks. This trend suggests that while our method is highly effective for bug report localization, there is still room for improvement in handling the other categories through more balanced training data and category-specific optimization strategies." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.818, + 0.881, + 0.849 + ], + "angle": 0, + "content": "5.8 Application: Better Localization Leads to More Solved GitHub Issues" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.857, + 0.882, + 0.922 + ], + "angle": 0, + "content": "To assess the impact of localization methods on downstream tasks, we evaluated their effectiveness in solving GitHub issues. We choose Agentless as the baseline, ranking among the top-performing" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.149, + 0.082, + 0.847, + 0.226 + ], + "angle": 0, + "content": "
MethodLoc ModelFile (%)Module (%)Function (%)
Acc@5Acc@10Acc@10Acc@15Acc@10Acc@15
IR-BasedCodeRankEmbed74.2980.8963.2167.5043.3946.61
AgentlessClaude-3.567.5067.5053.3953.3942.6842.68
OpenHandsClaude-3.579.8280.0068.9369.1159.1159.29
SWE-agentClaude-3.577.6877.6863.5763.7551.9651.96
LocAgent (Ours)Qwen2.5-7B(ft)78.5779.6463.0463.0451.4351.79
Claude-3.583.3986.0770.8971.0759.2960.71
" + }, + { + "type": "table_caption", + "bbox": [ + 0.263, + 0.236, + 0.733, + 0.25 + ], + "angle": 0, + "content": "Table 7: Performance evaluation on the real-world LocBench dataset." + }, + { + "type": "table", + "bbox": [ + 0.115, + 0.274, + 0.484, + 0.345 + ], + "angle": 0, + "content": "
MethodLocalization LMAcc@5Pass@1Pass@10
AgentlessClaude-3.558.3926.3133.58
OursQwen2.5-32B(ft)69.3426.7936.13
Claude-3.573.3627.9237.59
" + }, + { + "type": "table_caption", + "bbox": [ + 0.114, + 0.354, + 0.486, + 0.385 + ], + "angle": 0, + "content": "Table 8: Impact of localization accuracy on downstream bug repair tasks." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.398, + 0.489, + 0.495 + ], + "angle": 0, + "content": "open-source submissions on SWE-Bench-Lite. For consistency, we utilized Claude-3.5 as the editing model in conjunction with the Agentless editing method. Table 8 shows that the success rate for solving GitHub issues improves significantly with better code localization accuracy." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.506, + 0.248, + 0.52 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.531, + 0.49, + 0.692 + ], + "angle": 0, + "content": "In conclusion, LOCAGENT enhances code localization by structuring codebases as graphs, enabling efficient repository-level exploration for LLM agents. With fine-tuned open-source models, our method achieves high localization accuracy while significantly reducing costs compared to larger proprietary models. Experimental results demonstrate the effectiveness of LOCAGENT in identifying relevant code components and improving downstream tasks." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.704, + 0.22, + 0.718 + ], + "angle": 0, + "content": "Limitations" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.729, + 0.489, + 0.873 + ], + "angle": 0, + "content": "First, our study primarily focused on fine-tuning Qwen-2.5-Coder models. Exploring a broader range of base models, including other open-source LLMs like CodeLlama, Mistral, or Yi, could provide valuable insights into model selection trade-offs. Additionally, investigating different finetuning approaches beyond LoRA, such as full finetuning or other parameter-efficient methods, could potentially yield better performance." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.874, + 0.489, + 0.923 + ], + "angle": 0, + "content": "Second, though we demonstrated improved bug repair performance with better localization, we only scratched the surface of potential downstream" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.276, + 0.885, + 0.372 + ], + "angle": 0, + "content": "applications. Future work should evaluate LocAgent's impact on other software engineering tasks like refactoring, feature addition, security vulnerability patching, and performance optimization. This would provide a more comprehensive understanding of the framework's practical utility." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.373, + 0.885, + 0.534 + ], + "angle": 0, + "content": "Moreover, our fine-tuning process relied heavily on trajectories generated by Claude-3.5 and the fine-tuned Qwen2.5-32B model. A more diverse training dataset incorporating examples from different models, tasks, and repositories could improve the robustness and generalization of fine-tuned models. Additionally, analyzing the impact of different dataset compositions and filtering strategies on model performance could yield valuable insights." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.535, + 0.886, + 0.664 + ], + "angle": 0, + "content": "Finally, the current evaluation focuses primarily on Python codebases. Extending LOCAGENT to support other programming languages and evaluating its performance across different language paradigms would better demonstrate its generalizability. Further, our evaluation metrics could be expanded to include more nuanced measures of localization quality beyond accuracy and NDCG." + }, + { + "type": "title", + "bbox": [ + 0.511, + 0.691, + 0.61, + 0.706 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.714, + 0.882, + 0.742 + ], + "angle": 0, + "content": "Aider. 2023. Building a better repository map with tree sitter. Accessed: April 15, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.752, + 0.884, + 0.78 + ], + "angle": 0, + "content": "Anthropic. 2023. Claude: Conversational ai by anthropic. Accessed: January 21, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.79, + 0.885, + 0.83 + ], + "angle": 0, + "content": "artificialanalysis.ai. 2025. Artificial analysis. https://artificialanalysis.ai/models/. Accessed: 2025-04-28." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.842, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Marcel Böhme, Ezekiel O Soremekun, Sudipta Chattopadhyay, Emamurho Ugherughe, and Andreas Zeller. 2017. Where is the bug and how is it fixed? an experiment with practitioners. In Proceedings of the 2017 11th joint meeting on foundations of software engineering, pages 117-128." + }, + { + "type": "list", + "bbox": [ + 0.51, + 0.714, + 0.885, + 0.922 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.086, + 0.487, + 0.191 + ], + "angle": 0, + "content": "Zehui Chen, Weihua Du, Wenwei Zhang, Kuikun Liu, Jiangning Liu, Miao Zheng, Jingming Zhuo, Songyang Zhang, Dahua Lin, Kai Chen, et al. 2024. T-eval: Evaluating the tool utilization capability of large language models step by step. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9510-9529." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.201, + 0.487, + 0.227 + ], + "angle": 0, + "content": "Cognition.ai. 2024. Introducing devin, the first ai software engineer." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.238, + 0.487, + 0.316 + ], + "angle": 0, + "content": "John Ellson, Emden Gansner, Lefteris Koutsofios, Stephen C North, and Gordon Woodhull. 2002. Graphviz—open source graph drawing tools. In Graph Drawing: 9th International Symposium, GD 2001 Vienna, Austria, September 23–26, 2001 Revised Papers 9, pages 483–484. Springer." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.327, + 0.487, + 0.366 + ], + "angle": 0, + "content": "Bahare Fatemi, Jonathan Halcrow, and Bryan Perozzi. 2023. Talk like a graph: Encoding graphs for large language models. arXiv preprint arXiv:2310.04560." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.377, + 0.486, + 0.403 + ], + "angle": 0, + "content": "Paul Gauthier. 2024. How aider scored sota \\(26.3\\%\\) on swe bench lite | aider. Accessed: January 21, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.413, + 0.487, + 0.479 + ], + "angle": 0, + "content": "Jiafeng Guo, Yixing Fan, Qingyao Ai, and W Bruce Croft. 2016. A deep relevance matching model for ad-hoc retrieval. In Proceedings of the 25th ACM international on conference on information and knowledge management, pages 55-64." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.489, + 0.487, + 0.554 + ], + "angle": 0, + "content": "Jiafeng Guo, Yixing Fan, Liang Pang, Liu Yang, Qingyao Ai, Hamed Zamani, Chen Wu, W Bruce Croft, and Xueqi Cheng. 2020. A deep look into neural ranking models for information retrieval. Information Processing & Management, 57(6):102067." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.565, + 0.487, + 0.617 + ], + "angle": 0, + "content": "Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. 2020. Retrieval augmented language model pre-training. In International conference on machine learning, pages 3929-3938. PMLR." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.627, + 0.487, + 0.692 + ], + "angle": 0, + "content": "Michael Gunther, Louis Milliken, Jonathan Geuter, Georgios Mastrupas, Bo Wang, and Han Xiao. 2023. Jina embeddings: A novel set of high-performance sentence embedding models. Preprint, arXiv:2307.11224." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.703, + 0.487, + 0.768 + ], + "angle": 0, + "content": "Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.779, + 0.487, + 0.883 + ], + "angle": 0, + "content": "Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, Kai Dang, Yang Fan, Yichang Zhang, An Yang, Rui Men, Fei Huang, Bo Zheng, Yibo Miao, Shanghaoran Quan, Yunlong Feng, Xingzhang Ren, Xuancheng Ren, Jingren Zhou, and Junyang Lin. 2024. Qwen2.5-coder technical report. Preprint, arXiv:2409.12186." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.894, + 0.487, + 0.92 + ], + "angle": 0, + "content": "Hyperbolic. 2025. Hyperbolic website. https:// hyperbolic.xyz/. Accessed: 2025-04-15." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.487, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.086, + 0.882, + 0.15 + ], + "angle": 0, + "content": "Carlos E Jimenez, John Yang, Alexander Wettig, Shunyu Yao, Kexin Pei, Ofir Press, and Karthik Narasimhan. 2023. Swe-bench: Can language models resolve real-world github issues? arXiv preprint arXiv:2310.06770." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.162, + 0.882, + 0.201 + ], + "angle": 0, + "content": "Sungmin Kang, Gabin An, and Shin Yoo. 2023. A preliminary evaluation of llm-based fault localization. arXiv preprint arXiv:2308.05487." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.212, + 0.882, + 0.264 + ], + "angle": 0, + "content": "Sungmin Kang, Gabin An, and Shin Yoo. 2024. A quantitative and qualitative evaluation of llm-based explainable fault localization. Proceedings of the ACM on Software Engineering, 1(FSE):1424-1446." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.274, + 0.882, + 0.352 + ], + "angle": 0, + "content": "Patrick Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, et al. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems, 33:9459-9474." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.363, + 0.882, + 0.428 + ], + "angle": 0, + "content": "Xiangyan Liu, Bo Lan, Zhiyuan Hu, Yang Liu, Zhicheng Zhang, Fei Wang, Michael Shieh, and Wenmeng Zhou. 2024. Codexgraph: Bridging large language models and code repositories via code graph databases. Preprint, arXiv:2408.03910." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.439, + 0.882, + 0.543 + ], + "angle": 0, + "content": "Qingsong Lv, Ming Ding, Qiang Liu, Yuxiang Chen, Wenzheng Feng, Siming He, Chang Zhou, Jianguo Jiang, Yuxiao Dong, and Jie Tang. 2021. Are we really making much progress? revisiting, benchmarking and refining heterogeneous graph neural networks. In Proceedings of the 27th ACM SIGKDD conference on knowledge discovery & data mining, pages 1150-1160." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.554, + 0.882, + 0.607 + ], + "angle": 0, + "content": "Yingwei Ma, Qingping Yang, Rongyu Cao, Binhua Li, Fei Huang, and Yongbin Li. 2024. How to understand whole software repository? arXiv e-prints, pages arXiv-2406." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.616, + 0.882, + 0.681 + ], + "angle": 0, + "content": "Niels Mündler, Mark Müller, Jingxuan He, and Martin Vechev. 2024. Swt-bench: Testing and validating real-world bug-fixes with code agents. Advances in Neural Information Processing Systems, 37:81857-81887." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.692, + 0.882, + 0.719 + ], + "angle": 0, + "content": "OpenAI. 2023. Chatgpt: Language model by openai. Accessed: January 21, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.729, + 0.882, + 0.807 + ], + "angle": 0, + "content": "Siru Ouyang, Wenhao Yu, Kaixin Ma, Zilin Xiao, Zhihan Zhang, Mengzhao Jia, Jiawei Han, Hongming Zhang, and Dong Yu. 2025. Repograph: Enhancing AI software engineering with repository-level code graph. In The Thirteenth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.818, + 0.882, + 0.845 + ], + "angle": 0, + "content": "PerplexityAI. 2023. Perplexity ai: An ai-powered search engine. Accessed: January 21, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.854, + 0.882, + 0.919 + ], + "angle": 0, + "content": "Yihao Qin, Shangwen Wang, Yiling Lou, Jinhao Dong, Kaixin Wang, Xiaoling Li, and Xiaoguang Mao. 2024. Agentfl: Scaling llm-based fault localization to project-level context. arXiv preprint arXiv:2403.16362." + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.086, + 0.882, + 0.919 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.166 + ], + "angle": 0, + "content": "Chen Qu, Liu Yang, Cen Chen, Minghui Qiu, W Bruce Croft, and Mohit Iyyer. 2020. Open-retrieval conversational question answering. In Proceedings of the 43rd International ACM SIGIR conference on research and development in Information Retrieval, pages 539-548." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.176, + 0.489, + 0.229 + ], + "angle": 0, + "content": "Stephen Robertson, Hugo Zaragoza, et al. 2009. The probabilistic relevance framework: Bm25 and beyond. Foundations and Trends® in Information Retrieval, 3(4):333-389." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.24, + 0.489, + 0.28 + ], + "angle": 0, + "content": "Stephen E. Robertson, Steve Walker, Susan Jones, Micheline Hancock-Beaulieu, and Mike Gatford. 1994. Okapi at trec-3. In Text Retrieval Conference." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.291, + 0.489, + 0.356 + ], + "angle": 0, + "content": "Tarun Suresh, Revanth Gangi Reddy, Yifei Xu, Zach Nussbaum, Andriy Mulyar, Brandon Duderstadt, and Heng Ji. 2024. Cornstack: High-quality contrastive data for better code ranking. arXiv preprint arXiv:2412.01007." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.368, + 0.489, + 0.445 + ], + "angle": 0, + "content": "David A. Tomassi, Naji Dmeiri, Yichen Wang, Antara Bhowmick, Yen-Chuan Liu, Premkumar Devanbu, Bogdan Vasilescu, and Cindy Rubio-Gonzalez. 2019. Bugswarm: Mining and continuously growing a dataset of reproducible failures and fixes. Preprint, arXiv:1903.06725." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.457, + 0.486, + 0.483 + ], + "angle": 0, + "content": "VoyageAI. 2024. Voyage-code-2: Elevate your code retrieval. Accessed: 2024-02-02." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.495, + 0.489, + 0.56 + ], + "angle": 0, + "content": "Liang Wang, Nan Yang, Xiaolong Huang, Bixing Jiao, Linjun Yang, Daxin Jiang, Rangan Majumder, and Furu Wei. 2022. Text embeddings by weakly-supervised contrastive pre-training. arXiv preprint arXiv:2212.03533." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.572, + 0.489, + 0.703 + ], + "angle": 0, + "content": "Xingyao Wang, Boxuan Li, Yufan Song, Frank F. Xu, Xiangru Tang, Mingchen Zhuge, Jiayi Pan, Yueqi Song, Bowen Li, Jaskirat Singh, Hoang H. Tran, Fuqiang Li, Ren Ma, Mingzhang Zheng, Bill Qian, Yanjun Shao, Niklas Muennighoff, Yizhe Zhang, Binyuan Hui, Junyang Lin, Robert Brennan, Hao Peng, Heng Ji, and Graham Neubig. 2025. Open hands: An open platform for AI software developers as generalist agents. In The Thirteenth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.714, + 0.489, + 0.78 + ], + "angle": 0, + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2023a. Self-consistency improves chain of thought reasoning in language models. Preprint, arXiv:2203.11171." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.791, + 0.489, + 0.856 + ], + "angle": 0, + "content": "Yue Wang, Hung Le, Akhilesh Deepak Gotmare, Nghi D. Q. Bui, Junnan Li, and Steven C. H. Hoi. 2023b. Codet5+: Open code large language models for code understanding and generation. Preprint, arXiv:2305.07922." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.868, + 0.489, + 0.92 + ], + "angle": 0, + "content": "Zora Zhiruo Wang, Akari Asai, Xinyan Velocity Yu, Frank F. Xu, Yiqing Xie, Graham Neubig, and Daniel Fried. 2024. Coderag-bench: Can retrieval augment code generation? Preprint, arXiv:2406.14497." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.086, + 0.883, + 0.138 + ], + "angle": 0, + "content": "Yonghao Wu, Zheng Li, Jie M Zhang, Mike Papadakis, Mark Harman, and Yong Liu. 2023. Large language models in fault localisation. arXiv preprint arXiv:2308.15276." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.149, + 0.883, + 0.201 + ], + "angle": 0, + "content": "Chunqiu Steven Xia, Yinlin Deng, Soren Dunn, and Lingming Zhang. 2024. Agentless: Demystifying llm-based software engineering agents. arXiv preprint arXiv:2407.01489." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.212, + 0.883, + 0.278 + ], + "angle": 0, + "content": "John Yang, Carlos E Jimenez, Alexander Wettig, Kili-ian Lieret, Shunyu Yao, Karthik Narasimhan, and Ofir Press. 2024. Swe-agent: Agent-computer interfaces enable automated software engineering. arXiv preprint arXiv:2405.15793." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.287, + 0.883, + 0.34 + ], + "angle": 0, + "content": "Zhongming Yu, Hejia Zhang, Yujie Zhao, Hanxian Huang, Matrix Yao, Ke Ding, and Jishen Zhao. 2025. Ocaloca: An llm agent framework for software issue localization. arXiv preprint arXiv:2502.00350." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.35, + 0.883, + 0.416 + ], + "angle": 0, + "content": "Dejiao Zhang, Wasi Uddin Ahmad, Ming Tan, Hantian Ding, Ramesh Nallapati, Dan Roth, Xiaofei Ma, and Bing Xiang. 2024. CODE REPRESENTATION LEARNING AT SCALE. In The Twelfth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.426, + 0.761, + 0.439 + ], + "angle": 0, + "content": "Albert Örwall. 2024. Moatless tools." + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.086, + 0.883, + 0.439 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.084, + 0.388, + 0.101 + ], + "angle": 0, + "content": "A LOCAGENT Design Details" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.11, + 0.32, + 0.126 + ], + "angle": 0, + "content": "A.1 Tool Output Design" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.131, + 0.468, + 0.163 + ], + "angle": 0, + "content": "A.1.1 Three-level format for SearchEntity output" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.167, + 0.489, + 0.31 + ], + "angle": 0, + "content": "Once invoked by the LLM agent, the retrieval APIs search for files, classes, methods, and code snippets in the codebase, and return the results back to the agent. To avoid forming very lengthy code context that may containing noisy information to LLM, we return only necessary information as API outputs. To achieve this, we desgined four granular standard output formats (Figure 6): fold, preview, full code." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.321, + 0.469, + 0.353 + ], + "angle": 0, + "content": "A.1.2 Tree-based Subgraph Formatting for TraverseGraph Output" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.357, + 0.489, + 0.517 + ], + "angle": 0, + "content": "The TraverseGraph tool traverses the code graph and returns a local subgraph for each input entity. The agent reasons about these subgraphs to understand each entity's complex dependencies. However, reasoning about graphs remains challenging for LLMs. Research by (Fatemi et al., 2023) demonstrates that LLM performance varies significantly based on graph formatting (how graphs are encoded as text). This makes the format design for output subgraphs crucial." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.518, + 0.491, + 0.677 + ], + "angle": 0, + "content": "We have developed a new tree-based format, shown in Figure 7, with several features that enhance LLM reasoning: (1) We represent subgraphs as trees, allowing LLMs to use indentation to determine a node's distance from the root, (2) We display complete entity IDs for each node (e.g., django/core-validators.py:RegexValidator) to help LLMs locate nodes easily, and (3) We explicitly specify relation types for each edge, including reversed relations" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.679, + 0.49, + 0.838 + ], + "angle": 0, + "content": "To evaluate how different graph formats impact code localization performance, we conducted an experiment using 37 challenging samples from SWEBench-Lite. These samples were considered \"challenging\" because they could not be solved by any baseline agent methods. Using Claude-3.5 as the Localization Model across all settings, we compared various output formats. Table 9 presents our findings. The baseline output formats we tested are described below:" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.852, + 0.489, + 0.884 + ], + "angle": 0, + "content": "- row: For each line, list one row of the adjacency matrix. For example," + }, + { + "type": "text", + "bbox": [ + 0.148, + 0.89, + 0.488, + 0.922 + ], + "angle": 0, + "content": "function \"fileA.py:funcA\" invokes function \"fileA.py:funcB\", \"fileA.py:funcC\"" + }, + { + "type": "text", + "bbox": [ + 0.531, + 0.085, + 0.883, + 0.116 + ], + "angle": 0, + "content": "- row (w/ entity attributes): Additionally include entity attributes for format row." + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.127, + 0.883, + 0.191 + ], + "angle": 0, + "content": "- incident: The incident format mentioned in (Fatemi et al., 2023). An integer instead of entity ID is used to represent each node. For example," + }, + { + "type": "list", + "bbox": [ + 0.531, + 0.085, + 0.883, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.542, + 0.197, + 0.882, + 0.244 + ], + "angle": 0, + "content": "Map function \"fileA.py:funcA\" to index 0. Map function \"fileA.py:funcB\" to index 1. Map function \"fileA.py:funcC\" to index 2." + }, + { + "type": "text", + "bbox": [ + 0.543, + 0.25, + 0.78, + 0.266 + ], + "angle": 0, + "content": "function \\(O\\) invokes function 1,2." + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.275, + 0.882, + 0.307 + ], + "angle": 0, + "content": "Graphviz DOT: Represent graph in Graphviz DOT language (Ellson et al., 2002)." + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.318, + 0.882, + 0.349 + ], + "angle": 0, + "content": "- JSON: Expand the subgraph as a tree, and convert it to JSON format." + }, + { + "type": "list", + "bbox": [ + 0.532, + 0.275, + 0.882, + 0.349 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.36, + 0.885, + 0.552 + ], + "angle": 0, + "content": "As shown in Table 9, expanding subgraphs as trees (i.e., JSON, tree-based) can significantly improve the performance. Our tree-based format achieves the best overall performance across different levels of localization tasks. We also test returning entity attributes along with subgraphs. We notice that row (w/ entity attributes) consistently underperforms row, indicating the attributes for all the nodes may be very noisy. Besides, although using incident format can simplify the output and show improvements in file-level localization, it degradation the module- and file-level localization." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.564, + 0.687, + 0.579 + ], + "angle": 0, + "content": "A.2 Implementation" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.584, + 0.884, + 0.825 + ], + "angle": 0, + "content": "To enable the LLM agent to invoke the Code Localization APIs, we handle the interaction differently based on the LLM's capabilities. For LLMs that support tool-calling features, we define the tools as a list of JSON objects, which are then used as parameters for the API calls. For LLMs that do not support tool-calling (such as Qwen), we provide the description of the API and the expected output as part of the LLM's prompt. When the agent decides to invoke a set of retrieval APIs, it responds with a list of API call names and their corresponding arguments. These retrieval API requests are processed locally by searching over the built code graph. The results from executing these APIs locally are returned to the agent." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.826, + 0.884, + 0.922 + ], + "angle": 0, + "content": "By default, we query the LLM with a temperature setting of 1.0. We conduct two interactions, after which we rerank the results based on mean reciprocal rank (MRR) scores. We also leverage multiprocessing execution to speed up the process. Since all our tools are read-only, LOCAGENT does" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.129, + 0.086, + 0.368, + 0.344 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.375, + 0.087, + 0.622, + 0.344 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.626, + 0.087, + 0.873, + 0.344 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.354, + 0.883, + 0.397 + ], + "angle": 0, + "content": "Figure 6: Different output formats designed for efficient agent-code interaction. Left: Full code output when matched entities \\(\\leq 3\\). Middle: Preview output showing module skeleton for large files. Right: Fold output showing only entity IDs when matches \\(>3\\)." + }, + { + "type": "image", + "bbox": [ + 0.27, + 0.411, + 0.729, + 0.637 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.646, + 0.881, + 0.662 + ], + "angle": 0, + "content": "Figure 7: A truncated example of the expanded tree-based format for the output subgraph of tool TraverseGraph." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.687, + 0.486, + 0.718 + ], + "angle": 0, + "content": "not require a specialized Docker environment to operate." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.731, + 0.447, + 0.745 + ], + "angle": 0, + "content": "B Dataset construction and statistics" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.756, + 0.382, + 0.77 + ], + "angle": 0, + "content": "B.1 Dataset construction details" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.777, + 0.489, + 0.889 + ], + "angle": 0, + "content": "Example collection. We collected examples on popular Python repositories on Github follow (Jimenez et al., 2023). To gather issues related to performance and security, we searched for the keywords listed in Table 10 using the GitHub Search APIs. We then used GPT-4o-2024-0513 as the classifier based on the issue descriptions." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.89, + 0.489, + 0.922 + ], + "angle": 0, + "content": "Ground Truth Locations. The affected files or functions in the original codebase, as identified in" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.687, + 0.885, + 0.894 + ], + "angle": 0, + "content": "the patches, are considered the target locations for the given issue. While it is possible to fix a bug in a location different from the ground truth, the extracted ground-truth locations still serve as approximate targets for localization. Additionally, edited code such as documents, import statements, and comments are excluded from the localization target. These elements are not considered relevant for bug localization, as they do not directly impact the functionality of the code or its execution. By filtering out these elements, the focus is maintained on the core code changes that are relevant for localization." + } + ], + [ + { + "type": "table", + "bbox": [ + 0.159, + 0.082, + 0.842, + 0.226 + ], + "angle": 0, + "content": "
Output FormatFile(%)Module(%)Function(%)
Acc@1Acc@3Acc@5Acc@5Acc@10Acc@5Acc@10
row41.1867.6570.5961.7661.7635.2938.24
row (w/ entity attributes)41.1864.7164.7150.0050.0032.3532.35
incident41.1870.5973.5355.8855.8829.4132.35
Graphviz DOT41.1873.5382.3564.7164.7135.2935.29
JSON41.1867.6576.4767.6570.5938.2441.18
tree-based (Ours)47.0679.4179.4164.7164.7138.2441.18
" + }, + { + "type": "table_caption", + "bbox": [ + 0.218, + 0.233, + 0.778, + 0.248 + ], + "angle": 0, + "content": "Table 9: Localization performance under different TraverseGraph output formats." + }, + { + "type": "table", + "bbox": [ + 0.159, + 0.261, + 0.842, + 0.441 + ], + "angle": 0, + "content": "
CategoryKeywords
Performancebottleneck, performance improvement, memory usage optimization, time complexity reduction, latency improvement, scalability improvement, CPU usage reduction, caching improvement, concurrency optimization
SecurityOut-of-bounds Write, Out-of-bounds Read, NULL Pointer Dereference, Missing Authorization, memory leak fix, security vulnerability, security issue, authentication bypass, authentication issue, better maintained, buffer overflow, denial of service, security hardening, security patch, unsafe deserialization, Use After Free, Integer Overflow or Wraparound, Uncontrolled Resource Consumption, Missing Authentication for Critical Function
" + }, + { + "type": "table_caption", + "bbox": [ + 0.114, + 0.449, + 0.882, + 0.465 + ], + "angle": 0, + "content": "Table 10: We use these Keywords to search for Performance and Security related issues with Github Search APIs." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.489, + 0.361, + 0.506 + ], + "angle": 0, + "content": "C Additional Experiments" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.515, + 0.35, + 0.531 + ], + "angle": 0, + "content": "C.1 Implementation Details" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.536, + 0.382, + 0.552 + ], + "angle": 0, + "content": "C.1.1 Baselines Implementation" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.555, + 0.49, + 0.732 + ], + "angle": 0, + "content": "Regarding the embedding-based methods in our evaluation, these approaches operate primarily at the function level, where each function is embedded as a separate unit. The function's context (its containing file and class) is appended to the function representation before embedding, rather than being embedded separately. While theoretically these methods could employ hierarchical indexing, the standard implementations we evaluated use flat indexing structures where each function is embedded as a single unit." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.733, + 0.49, + 0.797 + ], + "angle": 0, + "content": "We use OpenHands's remote runtime feature to parallelize evaluation on OpenHands and SWEagent. We use Openhands version 0.12.0 released on Oct 31, 2024." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.806, + 0.471, + 0.838 + ], + "angle": 0, + "content": "C.1.2 Quantifying Task Difficulty Based on Code Graph Distance" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.842, + 0.49, + 0.922 + ], + "angle": 0, + "content": "We measure task difficulty by computing the average shortest hop distance between the functions mentioned in the issue descriptions and the patched functions within our code graph. Specifically, we first extract potential function names from each" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.49, + 0.885, + 0.651 + ], + "angle": 0, + "content": "issue description using GPT-4o-2024-0513, and identify their corresponding nodes in the code graph using the global dictionary. These identified nodes form the set of predicted nodes, denoted as \\(\\mathcal{C}\\). Similarly, we link the ground truth functions from the patch to their corresponding nodes in the code graph, forming the set of target nodes, denoted as \\(\\mathcal{T}\\). To quantify the difficulty \\(\\delta\\), we calculate the average shortest hop distance between the predicted nodes \\(\\mathcal{C}\\) and the target nodes \\(\\mathcal{T}\\), defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.577, + 0.657, + 0.816, + 0.694 + ], + "angle": 0, + "content": "\\[\n\\delta = \\frac {1}{| \\mathcal {C} |} \\sum_ {c \\in \\mathcal {C}} \\frac {1}{m i n _ {t \\in \\mathcal {T}} d (c , t) + 1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.702, + 0.885, + 0.798 + ], + "angle": 0, + "content": "where \\( d(c, t) \\) represents the shortest hop distance between nodes \\( c \\) and \\( t \\) in the graph. For performance analysis stratified by difficulty, we round \\( \\delta \\) down to \\( \\lfloor \\delta \\rfloor \\) to group samples by difficulty levels, and we exclude samples where the LLM fails to extract any valid function names." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.806, + 0.704, + 0.822 + ], + "angle": 0, + "content": "C.1.3 Training details." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.826, + 0.884, + 0.922 + ], + "angle": 0, + "content": "Fine-tuning Settings. We use Qwen-2.5-Coder-Instruct (Hui et al., 2024) 7B and 32B variants as our base models. We fine-tuned Qwen-2.5-Coder-Instruct 7B and 32B models on 768 training samples from the SWE-Bench training dataset, leveraging LoRA" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.12, + 0.083, + 0.88, + 0.285 + ], + "angle": 0, + "content": "
TypeMethodLoc-ModelFile (%)Module (%)Function (%)
NDCG@1NDCG@3NDCG@5NDCG@5NDCG@10NDCG@5NDCG@10
Embedding-BasedBM25 (Robertson et al., 2009)38.6946.550.6137.3139.8626.1527.92
E5-base-v2 (Wang et al., 2022)49.6464.1966.653.1554.4531.3935.3
Jina-Code-v2 (Günther et al., 2023)43.4359.9363.751.0254.1333.2836.44
Codesage-large-v2 (Zhang et al., 2024)47.8160.8264.3949.3852.2227.0330.74
CodeRankEmbed (Suresh et al., 2024)52.5567.5470.3957.5159.7640.2842.55
Procedure-BasedAgentless (Xia et al., 2024)GPT-4o67.1571.7671.7664.3164.3153.8153.81
Claude-3.572.6376.7276.8767.3667.3657.5557.55
Agent-BasedMoatlessTools (Örwall, 2024)GPT-4o73.3680.0380.3368.5769.0949.7750.62
Claude-3.572.6380.7380.8869.1169.1153.0353.16
SWE-agent (Yang et al., 2024)GPT-4o57.363.9664.1253.9553.9542.3242.44
Claude-3.577.3784.3284.9372.7772.959.6759.79
Openshands (Wang et al., 2025)GPT-4o60.9567.6268.3958.1858.644.3444.66
Claude-3.576.2884.2784.4375.7975.9263.1363.8
LocAgent (Ours)Qwen2.5-7B(ft)70.8079.3680.970.9971.6855.6258.09
Qwen2.5-32B(ft)75.9184.7485.6476.2876.7764.2765.93
Claude-3.577.7486.1987.1477.7378.164.3465.57
" + }, + { + "type": "table_caption", + "bbox": [ + 0.219, + 0.296, + 0.778, + 0.31 + ], + "angle": 0, + "content": "Table 11: NDCG scores comparison showing ranking quality of different methods." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.336, + 0.489, + 0.448 + ], + "angle": 0, + "content": "for efficient adaptation. The training set included 447 samples generated by Claude-3.5, while the remaining samples were iteratively generated using the fine-tuned Qwen2.5-32B model. The fine-tuning process was conducted over 5 epochs with max_token set to \\(128k\\) and a learning rate of \\(2 \\times 10^{-4}\\)." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.46, + 0.221, + 0.476 + ], + "angle": 0, + "content": "D Prompt" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.486, + 0.486, + 0.518 + ], + "angle": 0, + "content": "In this section, we go through the prompt template that make up the agent's history." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.129, + 0.255, + 0.173, + 0.266 + ], + "angle": 0, + "content": "Prompt" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.269, + 0.872, + 0.288 + ], + "angle": 0, + "content": "Given the following GitHub problem description, your objective is to localize the specific files, classes or functions, and lines of code that need modification or contain key information to resolve the issue." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.293, + 0.367, + 0.303 + ], + "angle": 0, + "content": "Follow these steps to localize the issue:" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.303, + 0.46, + 0.311 + ], + "angle": 0, + "content": "Step 1: Categorize and Extract Key Problem Information" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.312, + 0.491, + 0.32 + ], + "angle": 0, + "content": "- Classify the problem statement into the following categories:" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.32, + 0.624, + 0.327 + ], + "angle": 0, + "content": "Problem description, error trace, code to reproduce the bug, and additional context." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.328, + 0.589, + 0.335 + ], + "angle": 0, + "content": "- Identify modules in the '{package_name}' package mentioned in each category." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.335, + 0.726, + 0.343 + ], + "angle": 0, + "content": "- Use extracted keywords and line numbers to search for relevant code references for additional context." + }, + { + "type": "list", + "bbox": [ + 0.129, + 0.312, + 0.726, + 0.343 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.35, + 0.339, + 0.359 + ], + "angle": 0, + "content": "Step 2: Locate Referenced Modules" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.36, + 0.356, + 0.366 + ], + "angle": 0, + "content": "Accurately determine specific modules" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.367, + 0.497, + 0.374 + ], + "angle": 0, + "content": "- Explore the repo to familiarize yourself with its structure." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.375, + 0.709, + 0.382 + ], + "angle": 0, + "content": "- Analyze the described execution flow to identify specific modules or components being referenced." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.383, + 0.817, + 0.39 + ], + "angle": 0, + "content": "- Pay special attention to distinguishing between modules with similar names using context and described execution flow." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.391, + 0.4, + 0.398 + ], + "angle": 0, + "content": "- Output Format for collected relevant modules:" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.399, + 0.384, + 0.406 + ], + "angle": 0, + "content": "- Use the format: 'file path:QualifiedName'" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.407, + 0.841, + 0.414 + ], + "angle": 0, + "content": "- E.q., for a function `calculate_sum` in the `MathUtilities` class located in `src/helpers/mathHelpers.py`, represent it as:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.415, + 0.463, + 0.423 + ], + "angle": 0, + "content": "'src/helpers/mathHelpers.py:MathUtil calculator_sum'." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.429, + 0.396, + 0.438 + ], + "angle": 0, + "content": "## Step 3: Analyze and Reproducing the Problem" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.439, + 0.326, + 0.446 + ], + "angle": 0, + "content": "Clarify the Purpose of the Issue" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.447, + 0.715, + 0.455 + ], + "angle": 0, + "content": "- If expanding capabilities: Identify where and how to incorporate new behavior, fields, or modules." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.456, + 0.664, + 0.463 + ], + "angle": 0, + "content": "- If addressing unexpected behavior: Focus on localizing modules containing potential bugs." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.464, + 0.317, + 0.471 + ], + "angle": 0, + "content": "- Reconstruct the execution flow" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.472, + 0.428, + 0.48 + ], + "angle": 0, + "content": "- Identify main entry points triggering the issue." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.48, + 0.532, + 0.487 + ], + "angle": 0, + "content": "- Trace function calls, class interactions, and sequences of events." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.488, + 0.434, + 0.495 + ], + "angle": 0, + "content": "- Identify potential breakpoints causing the issue." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.496, + 0.663, + 0.504 + ], + "angle": 0, + "content": "Important: Keep the reconstructed flow focused on the problem, avoiding irrelevant details." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.509, + 0.362, + 0.518 + ], + "angle": 0, + "content": "## Step 4: Locate Areas for Modification" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.519, + 0.875, + 0.527 + ], + "angle": 0, + "content": "- Locate specific files, functions, or lines of code requiring changes or containing critical information for resolving the issue." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.528, + 0.658, + 0.535 + ], + "angle": 0, + "content": "- Consider upstream and downstream dependencies that may affect or be affected by the issue." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.536, + 0.595, + 0.543 + ], + "angle": 0, + "content": "- If applicable, identify where to introduce new fields, functions, or variables." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.544, + 0.767, + 0.551 + ], + "angle": 0, + "content": "- Think Thoroughly: List multiple potential solutions and consider edge cases that could impact the resolution." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.558, + 0.331, + 0.567 + ], + "angle": 0, + "content": "Output Format for Final Results:" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.568, + 0.687, + 0.575 + ], + "angle": 0, + "content": "Your final output should list the locations requiring modification, wrapped with triple back ticks" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.576, + 0.852, + 0.583 + ], + "angle": 0, + "content": "Each location should include the file path, class name (if applicable), function name, or line numbers, ordered by importance." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.584, + 0.4, + 0.591 + ], + "angle": 0, + "content": "Your answer would better include about 5 files." + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.598, + 0.206, + 0.608 + ], + "angle": 0, + "content": "Examples:" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.614, + 0.241, + 0.623 + ], + "angle": 0, + "content": "full_path1/file1.py" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.624, + 0.178, + 0.63 + ], + "angle": 0, + "content": "line: 10" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.631, + 0.218, + 0.639 + ], + "angle": 0, + "content": "class: MyClass1" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.639, + 0.258, + 0.647 + ], + "angle": 0, + "content": "function: my_function1" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.655, + 0.241, + 0.663 + ], + "angle": 0, + "content": "full path2/file2.py" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.664, + 0.178, + 0.671 + ], + "angle": 0, + "content": "line:76" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.671, + 0.309, + 0.68 + ], + "angle": 0, + "content": "function: MyClass2.my_function2" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.686, + 0.241, + 0.695 + ], + "angle": 0, + "content": "full_path3/file3.py" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.696, + 0.178, + 0.703 + ], + "angle": 0, + "content": "line: 24" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.703, + 0.184, + 0.71 + ], + "angle": 0, + "content": "line: 156" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.711, + 0.258, + 0.719 + ], + "angle": 0, + "content": "function: my_function3" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.719, + 0.146, + 0.725 + ], + "angle": 0, + "content": "#" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.734, + 0.285, + 0.743 + ], + "angle": 0, + "content": "Return just the location(s)" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.744, + 0.555, + 0.752 + ], + "angle": 0, + "content": "Note: Your thinking should be thorough and so it's fine if it's very long." + }, + { + "type": "image_caption", + "bbox": [ + 0.312, + 0.768, + 0.684, + 0.782 + ], + "angle": 0, + "content": "Figure 8: The task instruction prompt for LOCAGENT." + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09089/c2c2c95f-facf-4f82-977c-1820c00d4eb0_origin.pdf b/data/2025/2503_09xxx/2503.09089/c2c2c95f-facf-4f82-977c-1820c00d4eb0_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4eaed77dbafd09cd07de04f914d2425241f50f1c --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/c2c2c95f-facf-4f82-977c-1820c00d4eb0_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fcd0ff4b8f624de14b00fe1f8de43f88471336de8b18bb06c84278e17c1d2e4 +size 777937 diff --git a/data/2025/2503_09xxx/2503.09089/full.md b/data/2025/2503_09xxx/2503.09089/full.md new file mode 100644 index 0000000000000000000000000000000000000000..ca210656d90190b6b90bc732771a0494b937c4f7 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/full.md @@ -0,0 +1,504 @@ +# LocAgent: Graph-Guided LLM Agents for Code Localization + +Zhaoling Chen\*, Xiangru Tang\*, Gangda Deng\*, Fang Wu\*, Jialong Wu\*, Zhiwei Jiang, Viktor Prasanna\*, Arman Cohan\*, Xingyao Wang + +$^{\spadesuit}$ Yale University $^{\spadesuit}$ University of Southern California $^{\spadesuit}$ Stanford University $^{\spadesuit}$ All Hands AI xiangru.tang@yale.edu, gangdade@usc.edu, xingyao@all-hands.dev + +# Abstract + +Code localization—identifying precisely where in a codebase changes need to be made—is a fundamental yet challenging task in software maintenance. Existing approaches struggle to efficiently navigate complex codebases when identifying relevant code sections. The challenge lies in bridging natural language problem descriptions with the appropriate code elements, often requiring reasoning across hierarchical structures and multiple dependencies. We introduce LOCAGENT, a framework that addresses code localization through graph-based representation. By parsing codebases into directed heterogeneous graphs, LOCAGENT creates a lightweight representation that captures code structures (files, classes, functions) and their dependencies (imports, invocations, inheritance), enabling LLM agents to effectively search and locate relevant entities through powerful multi-hop reasoning. Experimental results on real-world benchmarks demonstrate that our approach significantly enhances accuracy in code localization. Notably, our method with the fine-tuned Qwen-2.5-Coder-Instruct-32B model achieves comparable results to SOTA proprietary models at greatly reduced cost (approximately $86\%$ reduction), reaching up to $92.7\%$ accuracy on file-level localization while improving downstream GitHub issue resolution success rates by $12\%$ for multiple attempts (Pass@10). Our code is available at https://github.com/gersteinlab/LocAgent. + +# 1 Introduction + +Code localization can be viewed as an information retrieval (IR) task that aims to identify relevant code snippets given natural language descriptions (Yu et al., 2025; Yang et al., 2024; Xia et al., 2024). Developers spend up to $66\%$ of their debugging time (Böhme et al., 2017) understanding code to + +![](images/c20269c8cebd98330e89168b5cf72fa71b25a1845eac8027149a6e7bbe018c9f.jpg) +Figure 1: Code localization across four common programming scenarios. Given a codebase and an issue description, the goal of code localization is to identify the relevant code snippets that require modification to resolve the issue. + +make changes, and automated tools often struggle with the same challenge. Poor code localization leads to incomplete fixes, introduces new bugs, and significantly extends development cycles. Unlike traditional retrieval tasks that primarily focus on lexical or semantic matching between queries and documents (Guo et al., 2016, 2020), code localization requires bridging the gap between natural language and programming languages. It also necessitates reasoning capabilities to analyze the issue, while considering the structural and semantic properties of code (Lewis et al., 2020; Guu et al., 2020; Qu et al., 2020). This capability has become fundamental to powerful AI assistants (OpenAI, 2023; Anthropic, 2023), code-aware search engines (PerplexityAI, 2023), and automated programming agents (Cognition.ai, 2024; Wang et al., 2025; Gauthier, 2024). In particular, accurate code localization is crucial for software maintenance and evolution, as it enables precise code modifications for bug fixes, refactoring, and feature additions (Wang et al., 2024), thereby streamlining the development workflow. + +Existing approaches to code localization face + +significant limitations. Dense retrieval methods require maintaining and continuously updating vector representations of the entire codebase (Wang et al., 2023b; Günther et al., 2023), creating engineering challenges for large, evolving repositories where code changes frequently. While LLMs demonstrate strong code understanding capabilities (Kang et al., 2023; Wu et al., 2023), models with large context windows cannot process entire codebases at once, necessitating strategic navigation through relevant parts. Moreover, issue descriptions often mention only symptoms rather than underlying causes. For instance, a report of 'XSS vulnerability in user profile' might require changes to a shared validation utility used throughout the codebase but not explicitly referenced in the issue. This disconnect between issue descriptions and affected code components presents a substantial challenge for traditional retrieval approaches, which struggle to trace implicit dependencies across the codebase structure. Recent agent-based methods attempt to address these limitations through iterative exploration (Yang et al., 2024; Qin et al., 2024) but still struggle to efficiently navigate and comprehend complex code structures and dependencies, particularly when multi-hop reasoning is required to trace from issue descriptions to affected code regions that aren't directly mentioned. + +This raises a key question: How can we design efficient indexing as intermediate representations that are structure-aware and both easy and performant for LLM agents to consume? It is intuitive to design an agentic retrieval system that carefully combines traditional IR methods and LLM agent's reasoning ability to achieve accurate, efficient, and cost-effective code localization in codebases. + +To address this challenge, we propose LOCAGENT, a framework that builds directed heterogeneous graph indexing to unify code structures, dependencies, and contents. Our approach leverages a structured graph representation that enables powerful multi-hop reasoning capabilities, allowing agents to navigate complex dependency relationships between code elements even when target code isn't explicitly mentioned in issue descriptions. This graph-based approach significantly outperforms previous methods on challenging localization tasks that require traversing multiple code relationships. Our lightweight representation, coupled with sparse indexing techniques, enables efficient entity search while maintaining rich structural information. The indexing process typically + +takes only a few seconds per codebase, making it highly practical for real-time use. The framework integrates a set of unified tools that guide the agent through a systematic exploration of the codebase, allowing autonomous navigation based on contextual needs. Furthermore, by fine-tuning Qwen-2.5-Coder-Instruct (Hui et al., 2024) 7B and 32B models(abbr. as Qwen-2.5-7B and Qwen-2.5-32B respectively), our system achieves performance comparable to state-of-the-art models like Claude-3-5-sonnet-20241022 (Anthropic, 2023) (abbr. as Claude-3.5) while significantly reducing API costs by over $80\%$ (from \ $0.66 to \$ 0.09 per example), making it practical for real-world deployment. + +Additionally, to facilitate a comprehensive evaluation of code localization methods, we introduce LOC-BENCH, a new benchmark specifically designed for this task. Existing benchmarks like SWE-Bench present significant limitations: (1) they risk contamination through data overlap with LLM training sets (Mündler et al., 2024), and (2) they primarily focus on bug fixing, lacking diversity in maintenance scenarios such as feature requests, performance optimizations, and security fixes. In contrast, LOC-BENCH covers diverse scenarios and mitigates potential contamination concerns by incorporating more recent examples from popular Python repositories collected after known LLM training cutoff dates. Additionally, we provide tooling to continuously update the benchmark with new examples, allowing researchers to maintain a fresh evaluation dataset as models evolve and training data cutoffs advance. + +Our contributions address critical gaps in existing approaches: + +- We introduce a heterogeneous graph representation that captures both explicit and implicit code relationships, enabling efficient multi-hop reasoning. Our lightweight graph-based indexing process takes only seconds per repository and requires minimal storage. +- We design unified tools for agent-based code exploration that leverage our graph representation, allowing LLM agents to perform complex multi-hop navigation and reasoning across code dependencies even when target code isn't explicitly mentioned in issue descriptions. +- We introduce Loc-Bench, a new benchmark + +specifically designed for code localization that addresses limitations in existing datasets. Unlike previous benchmarks dominated by bug reports, Loc-Bench offers a balanced distribution across bug fixes, feature requests, security patches, and performance optimizations. + +- By fine-tuning open-source models on this task, we reduce the cost of code localization by $86\%$ while maintaining competitive performance. + +# 2 Related Work + +# 2.1 Traditional Retrieval-based Methods + +Traditional IR methods rely on lexical or semantic matching to return ranked lists of code snippets. Sparse retrievers, such as BM25 (Robertson et al., 1994, 2009), have demonstrated robustness to domain adaptation. Dense retrievers utilize embeddings for improved semantic searching, including models with open checkpoints such as general text embedding models E5-base-v2 (Wang et al., 2022) and proprietary APIs (VoyageAI, 2024). Code embedding models such as Jina-Code-v2 (Günther et al., 2023), Codesage-large-v2 (Zhang et al., 2024), and CodeRankEmbed (Suresh et al., 2024), trained specifically for code related tasks, showing significant performance in Code2Code and NL2Code semantic search tasks. However, while the embedding models themselves are small, the engineering challenges of maintaining these indexing systems (e.g., storage requirements, update mechanisms, and infrastructure maintenance) make them difficult to adapt to fast-evolving codebases. + +# 2.2 LLM-based Generative Retrieval Methods + +Recently, LLMs with advanced code reasoning capabilities have demonstrated superior performance by directly processing queries and raw code for code localization (Kang et al., 2023; Wu et al., 2023; Xia et al., 2024; Kang et al., 2024). For example, Agentless (Xia et al., 2024), initially designed for automated program repair, uses a simplistic hierarchical localization process powered by LLM. It employs a straightforward three-phase approach that first localizes relevant code sections before attempting to fix the identified issues, challenging the assumption that complex agent architectures are necessary for effective code understanding and modification tasks. + +Expanding on these techniques, agent-based methods utilize multi-step reasoning to enable automated codebase traversal. Specifically, OpenHands (Wang et al., 2025) implements a generalist coding agent that supports bash commands like grep and tools for viewing files. SWE-Agent (Yang et al., 2024) integrates a custom Agent-Computer Interface to support agents to navigate entire repositories. MoatlessTools (Örwall, 2024) combines an agentic searching loop and semantic search to obtain code locations. However, existing agent-based methods face two critical limitations: (a) they primarily navigate codebases through directory traversal rather than understanding semantic relationships, (b) and they struggle to extract and reason about complex cross-file dependencies when these relationships aren't explicitly represented in the repository structure. This significantly impairs their ability to locate code that requires modification when the issue involves interactions between structurally distant components in the codebase. + +# 2.3 Graph-based Code Representation Methods + +Due to the inherent structure of code, several works have employed graph-based representations to improve code understanding by capturing key relationships between components. Aider (2023) constructs a RepoMap and uses a graph ranking algorithm to identify the most significant contextual elements. Similarly, as a plugin, RepoGraph (Ouyang et al., 2025) performs subgraph retrieval – extracting an ego-network of relevant lines and their neighbors – to provide structured context. CodexGraph (Liu et al., 2024) indexes the repository into a Neo4j graph database, where LLM agents query the database precisely using Cypher. The efficiency of its retrieval process depends heavily on the querying capabilities of the LLM. These methods focus primarily on providing relevant context but do not enhance the traversal process itself, as they do not explicitly model directory structure or file hierarchies. + +In contrast, RepoUnderstander (Ma et al., 2024) builds hierarchical and function-call graphs, using Monte Carlo Tree Search (MCTS) guided by an LLM for exploration. While thorough, MCTS introduces extra computational overhead, making it less efficient than simpler traversal methods like BFS, particularly in large repositories. OrcaLoca (Yu et al., 2025) uses a simplified graph + +![](images/68d78298f4ef4a0462b402972796e7a35e59e5ec5b78ccafd6cd08b74dd8ad0d.jpg) +Figure 2: Overview of LOCAGENT framework. LOCAGENT first parses the given codebase to build a graph-based code representation with various types of entities and relations. It then constructs sparse indexes for exploring structures and searching content. Using these indexes, it performs agent-guided searches that combine the graph and tools. + +
MethodRelation TypesNode TypesSearch/Traversal Strategy
ContainImportInheritInvokeDirectoryFileClassFunction
CodexGraph(Liu et al., 2024)XXXCypher queries
RepoGraph(Ouyang et al., 2025)XXXEgo-graph retrieval
RepoUnderstander(Ma et al., 2024)XMCTS
OrcaLoca(Yu et al., 2025)XXSimple search tools
LOCAGENT(Ours)Unified retrieval tools
+ +Table 1: Comparison of Graph-Based Code Representation Methods. + +enhanced by priority scheduling and context pruning. It maintains efficient search but may miss complex invocation dependencies. Table 1 summarizes the differences between these methods and LOCAGENT. Compared to these approaches, LOCAGENT offers a more comprehensive and unified representation of the repository, along with efficient, unified retrieval tools specifically designed for LLM consumption. + +# 3 The LOCAGENT Framework + +We introduce LOCAGENT, a graph-oriented LLM-agent framework for code localization. Figure 2 illustrates the overall framework. When given a repository, LOCAGENT can locate all the relevant code sections at various granularities (file, class, function, or line level) for different types of GitHub issues (such as bug reports, feature requests, performance bottlenecks, and security vulnerabilities) through automated in-depth exploration and analysis of the codebase. Section 3.1 proposes a novel graph-based indexing approach as an intermediate + +representation for codebases. Section 3.2 presents our agent-based code search on the indexes and Section 3.3 describes our model fine-tuning and distillation process. + +# 3.1 Graph-based Code Representation + +Codebases contain rich structural information, both explicit and implicit, that is essential for agent reasoning. Building on this insight, we develop a graph-based indexing that comprehensively captures codebase relationships while maintaining a granularity suitable for LLM-agents to retrieve. + +Code Graph Construction. We construct a heterogeneous directed graph $\mathcal{G}(\mathcal{V},\mathcal{E},\mathcal{A},\mathcal{R})$ to index the codebase, where $\nu = \{v_{i}\}_{i = 1}^{n}$ is the node set and $\mathcal{E}\subseteq \mathcal{V}\times \mathcal{V}$ is the edge set. Each node $v\in \mathcal{V}$ and edge $e\in \mathcal{E}$ has an associated type mapping function. For nodes, $\tau (v):\mathcal{V}\to \mathcal{A}$ maps to types $\mathcal{A} = \{\mathrm{directory},\mathrm{file},\mathrm{class},\mathrm{function}\}$ . For edges, $\phi (e):\mathcal{E}\rightarrow \mathcal{R}$ maps to relationships $\mathcal{R} = \{\mathrm{contain},\mathrm{import},\mathrm{invoke},\mathrm{inherit}\}$ . In this paper, we focus our study on Python reposito + +ries and leave codebases with other programming languages as future work. + +First, we include all directories and Python files as nodes. Then, we parse each Python file using the abstract syntax tree (AST) to identify inner functions and classes recursively as nodes. We set the function level as the smallest node granularity and use each function's code content as the document for agent retrieval. This approach creates a good balance of information density between the index and documents, allowing LLMs to reason effectively within their context window limitations. + +As shown in Figure 2, all nodes with different types can be connected as a single tree using the contain relationship. This structure supports standard codebase-navigation operations from existing works. Our code graph further incorporates more advanced codebase relationships as edges: (1) the invoke relationship from function/class to function/class, where an invoke to a class represents class instantiation; (2) the import relationship from file to function/class; and (3) the inherit relationship between classes. + +Sparse Hierarchical Entity Indexing. We treat nodes in our code graph as entities and build hierarchical indexing based on their contents. For each keyword, we lookup the indexes from top to bottom: (1) We build an entity ID index as a unique identifier for each node using its fully qualified name. For example, a function calculate_sum in the MathUtils class located in src/utils.py would be represented as: src/utils.py:MathUtilscalculate_sum. (2) We construct a global dictionary to map the entity name (e.g., calculate_sum) to all nodes that share the same name. (3) We index entity IDs through an inverted index (i.e., BM25) to handle keyword searches that don't exactly match the IDs or names of entities. (4) For cases where input keywords aren't part of the entities' IDs (e.g., when a keyword refers to a global variable), we build an inverted index that maps code chunk(s) to each entity to cover all possible matches. + +Remark. Rather than relying solely on directory structures or hierarchical module indexing, our approach captures module dependencies that transcend directory boundaries. Two modules in distant directories (A and B) may appear unrelated in traditional navigation, but if they invoke each other or share inheritance, they're syntactically close in our graph representation. This syntactic + +
Tool NameInput ParamsOutput
SearchEntityKeywordsRelated Entities with Code Snippets
TraverseGraphStart Entity IDs Direction Traverse Hops Entity Types Relation TypesTraversed Subgraph, including Entities and Relations
RetrieveEntityEntity IDsComplete Code of Specified Entities
+ +Table 2: List of unified APIs provided by LocAgent for code search and exploration. + +proximity is essential for code localization because issues typically manifest through call relationships rather than directory structure. By capturing these functional dependencies, our approach efficiently identifies related components even when physically distant in the codebase. + +# 3.2 Agent-guided Code Search + +We develop tools based on the indexes built offline. During runtime, LOCAGENT takes issue statements as input and launches agents that autonomously use tools to localize target code sections. While the agent may iteratively invoke multiple tools internally to explore the codebase, LOCAGENT presents a simplified interface to users, requiring only a single-turn interaction—users submit an issue statement and receive localization results without additional input. This autonomous, self-contained workflow makes LOCAGENT both easy to deploy and highly practical for real-world use. + +Tool Design for Codebase Exploration. Recent works (Örwall, 2024; Wang et al., 2025), inspired by GUI-based IDEs, have developed numerous specialized tools for agents to explore codebases. However, these tools are initially designed for human readability, which sacrifices the compactness and efficiency that LLM agents prefer (Yang et al., 2024). Building upon our graph-based code representation, we can develop tools that support efficient higher-order codebase exploration to address these challenges. We unify all codebase navigation, search, and view operations into three tools (Table 2), introduced as follows. + +SearchEntity: This tool searches codebases using keywords to locate relevant entities through our Hierarchical Entity Index. When an exact match isn't found in the upper index, the system performs a fuzzy search using the lower index. For each entity found, we return its code snippet in three detail + +levels: fold, preview, and full code (Figure 6). This effectively prevents lengthy code context and reduces noise fed into agents. + +**TraverseGraph:** This tool performs a type-aware breadth-first search (BFS) on the code graph, starting from input entities and allowing control over both traversal direction and number of hops. This supports agents to perform arbitrary multi-hop codebase navigation through only one action, significantly improving the efficiency compared with existing agent systems. Note that by allowing agents to select entity types and relation types for each traversal, this tool effectively leverages the LLM agents' coding expertise to generate proper meta paths—a crucial element for heterogeneous graph analysis (Lv et al., 2021). For example, by specifying entity types to {class, function} and relation types to {contain, inherit}, this tool returns the UML diagram. Additionally, we design an expanded tree-based format for the output subgraph that encodes both relation types and directions (Figure 7). (Fatemi et al., 2023) demonstrates that LLM performance on graph reasoning depends on the input graph format. Converting a graph into a tree structure encodes topology through the spatial distance between entity names, thereby deriving better performance. For detailed comparisons with alternative graph formats, please see Appendix A.1.2. + +RetreiveEntity: This tool retrieves complete entity attributes for each input entity ID, including essential information such as file path, line number, and code content. + +Chain-of-Thought Agent Planning. We use chain-of-thought (CoT) prompting (shown in Appendix D) to guide the agent in solving code localization problems step by step. The agent systematically follows these steps: (1) Keyword extraction. The agent begins by breaking down the issue statement into different categories and then extracts relevant keywords that are closely related to the problem. (2) Linking keywords to code entities. The agent invokes SearchEntity to complete and clarify each extracted keyword. + +(3) Generate the logical flow from fault to failure. The agent first identifies the entry points that trigger the problem. Then, it iteratively traverse the codebase with TraverseGraph, retrieves code contents with RetrieveEntity, and searches new keywords with SearchEntity. Finally, it generates the logic flow based on the issue and additional context. (4) Locate the target entities. The agent pinpoints all suspicious code entities that need modification + +based on the logic flow. Then, it ranks these entities based on their relevance. + +Confidence Estimation Based on Consistency. After generating a complete ranked list of candidate entities, to obtain a more consistent ranking, we measure the consistency (Wang et al., 2023a) of the LLM's predictions across multiple iterations. Specifically, we use the Reciprocal Rank as the initial confidence score for each predicted location. We then aggregate the scores for each entity across iterations to compute its final confidence score. The intuition behind this approach is that if the LLM consistently ranks a location higher in multiple iterations, it is more likely to be relevant. + +# 3.3 Open-source Model Fine-tuning + +Given the high costs of proprietary LLM APIs and data security concerns, we fine-tuned open-source models to improve their code localization capabilities and enable local deployment. We collect 433 successful trajectories generated with Claude-3.5, where the agent completed tasks from the SWEBench training set. Due to budget constraints, we sample an additional 335 trajectories generated by the initially fine-tuned Qwen2.5-32B model. Importantly, we only select successful trajectories where the model correctly localized the issues, creating a high-quality dataset of correct reasoning paths. These successful examples are then used to refine the same 32B model further, reinforcing effective reasoning patterns through this self-improvement loop. The entire dataset, combining both Claude-3.5 trajectories and successful Qwen2.5-32B samples, was then used to distill knowledge to a smaller 7B model. + +To fine-tune the smaller model, we employ Supervised Fine-Tuning (SFT) with LoRA (Hu et al., 2021). Our experiments show that this straightforward distillation method significantly enhances the performance of smaller models. See Appendix C.1.3 for more training details. + +# 4 LOC-BENCH: A New Benchmark for Code Localization + +# 4.1 Revisiting Existing Benchmark + +SWE-Bench(Jimenez et al., 2023) is a widely used benchmark that collects GitHub issues and corresponding code patches that resolve them. Xia et al. (2024); Suresh et al. (2024) adapt its subset, SWE-Bench-Lite, for code localization, treating the patched files and functions as the targets. + +However, existing datasets, including SWE-Bench, present challenges for effectively evaluating code localization methods. First, they are at risk of contamination, as they may include data overlapping with the repositories or issues used by modern models during pre-training. Second, existing datasets are not specifically designed for code localization (Tomassi et al., 2019). SWE-Bench, for instance, was created primarily to evaluate end-to-end bug-fixing capabilities, with localization being only an implicit intermediate step. This focus results in datasets dominated by bug reports (85% of SWE-Bench-Lite examples) while severely underrepresenting other common software maintenance tasks such as feature requests (14%), security vulnerabilities (1%), and performance optimizations (0%). This imbalance fails to capture the diverse localization challenges faced in real-world software development. + +# 4.2 Dataset Construction + +To address the limitations of existing benchmarks, we introduce LOC-BENCH, a new dataset specifically designed for code localization. This dataset collects up-to-date issues from Python repositories to mitigate the influence of pre-training bias in the latest LLMs. Additionally, LOC-BENCH covers wider categories, including bug reports, feature requests, security, and performance issues, enabling a more comprehensive evaluation of code localization methods. The statistics of LOC-BENCH are shown in Table 3. + +For the Bug Report category, we collect GitHub issues created after October 2024, which is later than the release dates of most modern LLMs. To enrich the dataset with more instances of security and performance issues, we use the GitHub Search API to search for relevant keywords, such as "latency improvement" for performance-related issues. We exclude instances that involve modifying more than five Python files or more than ten functions in the corresponding patch. For further details, see Appendix B.1. + +# 5 Experiments + +Our experiments aim to evaluate four key aspects of LOCAGENT: (1) the effectiveness of our graph-based representation and tooling for code localization compared to existing methods, (2) the performance of fine-tuned open-source models as cost-effective alternatives to proprietary LLMs, (3) a detailed analysis of how performance varies across + +
DatasetCategory#Sample
SWE-Bench-Lite (Total = 300)Bug Report254
Feature Request43
Security Issue3
Performance Issue0
Loc-Bench (Totoal = 560)Bug Report242
Feature Request150
Security Issue29
Performance Issue139
+ +Table 3: Distribution of samples across different categories in the SWE-Bench-Lite and Loc-Bench datasets. + +task categories, and (4) the contribution of each component in our framework through comprehensive ablation studies. We evaluate on both SWE-Bench-Lite and our introduced Loc-Bench dataset. Additionally, we examine the impact of improved localization on downstream software maintenance tasks. + +# 5.1 Experimental Settings + +Datasets. We first conduct experiments on SWEBench-Lite, treating the patched files and functions as the targets for localization. Following Suresh et al. (2024), we excluded examples where no existing functions were modified by the patch, ultimately retaining 274 out of the original 300 examples. + +Metrics. To assess performance, we use a modified accuracy metric inspired by R-Precision from information retrieval, following Agentless(Xia et al., 2024). To assess performance, we use Acc@k (Accuracy at k) as our evaluation metric, following Agentless(Xia et al., 2024). For each example, we select the top-k predicted locations and consider a localization attempt successful only if all relevant locations are correctly identified within these top-k predictions. This approach measures the ability to fully identify all necessary code sections that require modification. We report results across multiple $k$ values: file localization at Acc@1, Acc@3, and Acc@5, and function localization at Acc@5 and Acc@10. Additionally, to provide a more relaxed evaluation criteria, we assess module localization, which only requires finding any function within the patched class. + +# 5.2 Baselines + +We evaluate LOCAGENT against three categories of competitive baselines: (a) Retrieval-based meth + +
TypeMethodLoc-ModelFile (%)Module (%)Function (%)
Acc@1Acc@3Acc@5Acc@5Acc@10Acc@5Acc@10
Embedding-BasedBM25 (Robertson et al., 1994)38.6951.8261.6845.2652.9231.7536.86
E5-base-v2 (Wang et al., 2022)49.6474.4580.2967.8872.2639.4251.09
Jina-Code-v2 (Günther et al., 2023)43.4371.1780.2963.5072.6342.3452.19
Codesage-large-v2 (Zhang et al., 2024)47.8169.3478.1060.5869.7133.9444.53
CodeRankEmbed (Suresh et al., 2024)52.5577.7484.6771.9078.8351.8258.76
Procedure-BasedAgentless (Xia et al., 2024)GPT-4o67.1574.4574.4567.1567.1555.4755.47
Claude-3.572.6379.2079.5668.9868.9858.7658.76
Agent-BasedMoutlessTools (Örwall, 2024)GPT-4o73.3684.3185.0474.8276.2857.3059.49
Claude-3.572.6385.7786.1376.2876.2864.6064.96
SWE-agent (Yang et al., 2024)GPT-4o57.3064.9668.9858.0358.0345.9946.35
Claude-3.577.3787.2390.1577.7478.1064.2364.60
Openhands (Wang et al., 2025)GPT-4o60.9571.9073.7262.4163.8749.6450.36
Claude-3.576.2889.7890.1583.2183.5868.2570.07
LOCAGENT (Ours)Qwen2.5-7B(ft)70.8084.6788.3281.0282.8564.2371.53
Qwen2.5-32B(ft)75.9190.5192.7085.7787.2371.9077.01
Claude-3.577.7491.9794.1686.5087.5973.3677.37
+ +Table 4: Performance comparison with baseline methods on code localization on SWE-bench lite. Results show the accuracy at file, module, and function levels. For Agent-Based methods, we use GPT-4o-2024-0513 (abbr. as GPT-4o) and Claude-3-5-sonnet-20241022 (abbr. as Claude-3.5) as the localization model. Additionally, the performance of our fine-tuned open-source models, Qwen2.5-7B(ft) and Qwen2.5-32B(ft), are included for comparison. + +ods: We include the sparse retrieval approach BM25 (Robertson et al., 1994) and several state-of-the-art embedding models, including the general-purpose E5-base-v2 (Wang et al., 2022) and specialized code embedding models such as JinaCode-v2 (Günther et al., 2023), Codesage-large-v2 (Zhang et al., 2024), and the current SOTA code embedding model CodeRankEmbed (Suresh et al., 2024). Proprietary embedding solutions were excluded due to API costs. (b) Procedure-based methods: We compare against Agentless (Xia et al., 2024), which employs a structured hierarchical approach to code localization without complex agent architectures. (c) Agent-based methods: We include several advanced agent frameworks designed for code exploration and modification, specifically OpenHands (Wang et al., 2025) (using its default CodeActAgent implementation), SWE-Agent (Yang et al., 2024), and MoatlessTools (Örwall, 2024). For implementation details, please refer to Appendix C.1.1. + +# 5.3 Evaluation Results on SWE-Bench-Lite + +As shown in Table 4, Agent-Based methods consistently outperform other approaches, and our method demonstrates competitive performance by achieving the best results across all levels of code localization. Unlike traditional retrieval-based methods, Agentless identifies only a limited number of locations due to its narrow repository scope, which hinders performance gains when considering a broader set of candidates. The results of the NDCG are presented in Table 11 in the Appendix. + +![](images/f929dd53f4adf74c47eab0524e9c10d9df2c8753a81e68bd756d7c86d5d4876b.jpg) + +![](images/dd0a043ec99e4aa7d3cdd9cab21f0f8c8ff70d258cc383c3b7961f81be3880dc.jpg) +Figure 3: Performance analysis at different difficulty levels for file- and function-level localization. All agent-based methods and Agentless use Claude-3.5 as the localization model. Hop $N$ refers to the distances between functions mentioned in the issue description and the ground truth patch on our code graph. + +To further analyze the results, we examine performance across different task difficulty levels. We measure the task difficulty by calculating the shortest hops between the functions mentioned in the issue descriptions and the patched functions on our code graph (See Appendix C.1.2 for more details). As shown in Figure 3, performance decreases for all methods as the task becomes more challenging. However, Agent-based methods demonstrate better robustness as the difficulty increases, with + +![](images/594e4e848668151fa0f0d585dcf3a37fcc51677d8a82c5fdceaf97947d73c7bb.jpg) +Figure 4: Comparison of performance between the original and fine-tuned Qwen models. The metrics used are file-level Acc@5 and module/function-level Acc@10. Dashed lines represent the performance of the Claude-3.5 model for reference. + +our method maintaining competitive performance across various difficulty levels. Retrieval-based methods, such as E5-Base-v2 and CodeRankEmbed, perform poorly at the function level, even when the patched functions are explicitly mentioned in the query. This is because they treat the query as a whole, failing to capture fine-grained details. Agentless performs even worse than retrieval-based methods when exploration beyond the query is needed ( $hop \geq 0$ ) due to its simplistic localization process and limited view focused only on the repository structure. + +# 5.4 Fine-tuned Open-source Models + +Figure 4 demonstrates that after fine-tuning, both the 7B and 32B models show significant improvements on this task. LOCAGENT with finetuned Qwen-2.5-Coder-Instruct-32B (abbreviated as Qwen2.5-32B(ft)) achieves performance comparable to Claude-3.5, and LOCAGENT with Qwen2.5-7B(ft) also delivers results on par with that obtained using GPT-4o. As shown in Table 4, our method with Qwen2.5-32B(ft) outperforms nearly all baselines, including those that use larger and more powerful LLMs. The original 7B model performs poorly due to its limited tool-use capability (Chen et al., 2024). These results validate the feasibility of deploying our fine-tuned open-source models as promising alternatives to proprietary APIs, especially in resource-constrained applications. + +# 5.5 Efficiency Analysis + +Table 5 presents an efficiency analysis comparing agent-based methods in terms of cost and the number of agent interactions required. MoatlessTools demonstrates good cost-efficiency and requires relatively fewer rounds of interaction. However, the + +
MethodLM#RoundCost($)Acc@10
Cost
MoatlessToolsGPT-4o50.461.3
Claude-3.550.461.4
SWE-agentGPT-4o80.560.8
Claude-3.590.671.0
OpenhandsGPT-4o150.830.6
Claude-3.5130.790.9
OursClaude-3.570.661.2
Qwen2.5-7B(ft)60.0513.2
Qwen2.5-32B(ft)90.098.6
+ +Table 5: Efficiency analysis comparing the average cost and number of agent interaction rounds required by different methods. The cost-efficiency of each method is evaluated using the ratio of function-level Acc@10 to average cost. + +
Model SettingFile Acc@5Module Acc@10Function Acc@10
Ours88.3282.8571.53
w/o TraverseGraph86.1378.4766.06
Relation Types: contain86.5079.5666.42
Traverse Hops: 186.8680.2966.79
w/o RetrieveEntity87.5981.3969.34
w/o SearchEntity68.9861.3153.28
w/o BM25 index75.1868.9860.22
+ +Table 6: The ablation study of our model. The metrics used here are file-level Acc@5, module-level Acc@10, and function-level Acc@10. The impact of removing or fixing components is analyzed to observe how each component contributes to the overall accuracy. + +dense embeddings it uses make it difficult and slow to adapt to fast-evolving codebases. SWE-agent and Openhands also show moderate costs but still do not match the efficiency of LOCAGENT. For LOCAGENT with Claude-3.5, although more rounds of interaction are required, the cost remains lower than that of Openhands, illustrating the token efficiency of our tool's outputs. LOCAGENT with fine-tuned Qwen models stands out for its superior efficiency1. Qwen2.5-7B(ft) is the most cost-efficient option, requiring only $0.05 per example, while Qwen2.5-32B(ft) offers a more cost-effective alternative to Claude-3.5. These results highlight the potential of fine-tuned open-source models as efficient alternatives, providing an optimal balance of cost-effectiveness and performance that surpasses other methods. + +# 5.6 Ablation Study + +We conduct an ablation study to evaluate the effectiveness of each component of our toolsets. Due to budget constraints, we use the fine-tuned Qwen-2.5-7B as the localization model for these experiments. + +(1) Each tool in our toolset plays a critical role in code localization performance. As shown in Table 6, removing any tool, especially the SearchEntity tool, leads to varying degrees of accuracy degradation, particularly in module and function level localization. This highlights the critical role each tool plays in identifying relevant modules and functions. +(2) The graph structure provides essential information for accurate code localization. Removing TraverseGraph tool decreases module and function level performance since the agent cannot obtain any structure information about the codebase and relies on reasoning capability to identify call relationship or directory structure. Adding contain relationship provides only marginal improvements compared to fully removing TraverseGraph, emphasizing the importance of the other three relationship types and explaining why our method surpasses others relying only on the repository structure. +(3) Multi-hop exploration is crucial for deep code understanding. When compared to the full setting, fixing $Hops = 1$ leads to a moderate decline in file and module-level accuracy, but it causes a more significant decrease in function-level accuracy, underscoring the importance of multi-hop exploration for identifying relevant entities. +(4) Sparse indexing significantly enhances localization performance. Removing SearchEntity tool, or even partial removal of its index, causes a substantial drop in performance across all metrics. This demonstrates the effectiveness of building a sparse index on our code graph for improving localization performance. + +# 5.7 Evaluation Results on Loc-Bench + +To ensure the robustness and generalization of our methods and fine-tuned Qwen models, and to eliminate potential data leakage, we evaluate our new dataset. Since Loc-Bench includes examples that edit 1 to 5 files, we assess file localization at top-5 and top-10 ranks, and function/module localization at top-10 and top-15 ranks. Table 7 shows that our fine-tuned Qwen2.5-7B model exhibits strong gen + +![](images/40113cdec1e1feca7000cb77d3a5294a6135d63a0d343288699aecc05a7b7a58.jpg) + +![](images/f3ff500f60d006d9ab2100b2e34cdaaed024cc944a6d5f1861937bb334d1a3cf.jpg) +Figure 5: Performance analysis at different difficulty category for file- and function-level localization. All agent-based baselines and Agentless use Claude-3.5 as the localization model. + +eralization capabilities, maintaining competitive performance compared to SWE-agent using more expensive and strong model. These results highlight the practicality of the fine-tuned Qwen2.5-7B model for real-world applications. Despite being an open-source alternative, it achieves a performance comparable to Claude-3.5, supporting its feasibility as a cost-effective substitute for commercial models in practical scenarios. + +Additionally, we evaluate the performance across four different difficulty categories. Figure 5 clearly shows that our method outperforms other methods in almost all categories of code localization. However, it also highlights a noticeable decrease in performance across the other three categories compared to the Bug Report category. This performance gap likely reflects our training data distribution, which contained more bug report examples, potentially leading to scaffolds better optimized for bug localization tasks. This trend suggests that while our method is highly effective for bug report localization, there is still room for improvement in handling the other categories through more balanced training data and category-specific optimization strategies. + +# 5.8 Application: Better Localization Leads to More Solved GitHub Issues + +To assess the impact of localization methods on downstream tasks, we evaluated their effectiveness in solving GitHub issues. We choose Agentless as the baseline, ranking among the top-performing + +
MethodLoc ModelFile (%)Module (%)Function (%)
Acc@5Acc@10Acc@10Acc@15Acc@10Acc@15
IR-BasedCodeRankEmbed74.2980.8963.2167.5043.3946.61
AgentlessClaude-3.567.5067.5053.3953.3942.6842.68
OpenHandsClaude-3.579.8280.0068.9369.1159.1159.29
SWE-agentClaude-3.577.6877.6863.5763.7551.9651.96
LocAgent (Ours)Qwen2.5-7B(ft)78.5779.6463.0463.0451.4351.79
Claude-3.583.3986.0770.8971.0759.2960.71
+ +Table 7: Performance evaluation on the real-world LocBench dataset. + +
MethodLocalization LMAcc@5Pass@1Pass@10
AgentlessClaude-3.558.3926.3133.58
OursQwen2.5-32B(ft)69.3426.7936.13
Claude-3.573.3627.9237.59
+ +Table 8: Impact of localization accuracy on downstream bug repair tasks. + +open-source submissions on SWE-Bench-Lite. For consistency, we utilized Claude-3.5 as the editing model in conjunction with the Agentless editing method. Table 8 shows that the success rate for solving GitHub issues improves significantly with better code localization accuracy. + +# 6 Conclusion + +In conclusion, LOCAGENT enhances code localization by structuring codebases as graphs, enabling efficient repository-level exploration for LLM agents. With fine-tuned open-source models, our method achieves high localization accuracy while significantly reducing costs compared to larger proprietary models. Experimental results demonstrate the effectiveness of LOCAGENT in identifying relevant code components and improving downstream tasks. + +# Limitations + +First, our study primarily focused on fine-tuning Qwen-2.5-Coder models. Exploring a broader range of base models, including other open-source LLMs like CodeLlama, Mistral, or Yi, could provide valuable insights into model selection trade-offs. Additionally, investigating different finetuning approaches beyond LoRA, such as full finetuning or other parameter-efficient methods, could potentially yield better performance. + +Second, though we demonstrated improved bug repair performance with better localization, we only scratched the surface of potential downstream + +applications. Future work should evaluate LocAgent's impact on other software engineering tasks like refactoring, feature addition, security vulnerability patching, and performance optimization. This would provide a more comprehensive understanding of the framework's practical utility. + +Moreover, our fine-tuning process relied heavily on trajectories generated by Claude-3.5 and the fine-tuned Qwen2.5-32B model. A more diverse training dataset incorporating examples from different models, tasks, and repositories could improve the robustness and generalization of fine-tuned models. Additionally, analyzing the impact of different dataset compositions and filtering strategies on model performance could yield valuable insights. + +Finally, the current evaluation focuses primarily on Python codebases. Extending LOCAGENT to support other programming languages and evaluating its performance across different language paradigms would better demonstrate its generalizability. Further, our evaluation metrics could be expanded to include more nuanced measures of localization quality beyond accuracy and NDCG. + +# References + +Aider. 2023. Building a better repository map with tree sitter. Accessed: April 15, 2025. +Anthropic. 2023. Claude: Conversational ai by anthropic. Accessed: January 21, 2025. +artificialanalysis.ai. 2025. Artificial analysis. https://artificialanalysis.ai/models/. Accessed: 2025-04-28. +Marcel Böhme, Ezekiel O Soremekun, Sudipta Chattopadhyay, Emamurho Ugherughe, and Andreas Zeller. 2017. Where is the bug and how is it fixed? an experiment with practitioners. In Proceedings of the 2017 11th joint meeting on foundations of software engineering, pages 117-128. + +Zehui Chen, Weihua Du, Wenwei Zhang, Kuikun Liu, Jiangning Liu, Miao Zheng, Jingming Zhuo, Songyang Zhang, Dahua Lin, Kai Chen, et al. 2024. T-eval: Evaluating the tool utilization capability of large language models step by step. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9510-9529. +Cognition.ai. 2024. Introducing devin, the first ai software engineer. +John Ellson, Emden Gansner, Lefteris Koutsofios, Stephen C North, and Gordon Woodhull. 2002. Graphviz—open source graph drawing tools. In Graph Drawing: 9th International Symposium, GD 2001 Vienna, Austria, September 23–26, 2001 Revised Papers 9, pages 483–484. Springer. +Bahare Fatemi, Jonathan Halcrow, and Bryan Perozzi. 2023. Talk like a graph: Encoding graphs for large language models. arXiv preprint arXiv:2310.04560. +Paul Gauthier. 2024. How aider scored sota $26.3\%$ on swe bench lite | aider. Accessed: January 21, 2025. +Jiafeng Guo, Yixing Fan, Qingyao Ai, and W Bruce Croft. 2016. A deep relevance matching model for ad-hoc retrieval. In Proceedings of the 25th ACM international on conference on information and knowledge management, pages 55-64. +Jiafeng Guo, Yixing Fan, Liang Pang, Liu Yang, Qingyao Ai, Hamed Zamani, Chen Wu, W Bruce Croft, and Xueqi Cheng. 2020. A deep look into neural ranking models for information retrieval. Information Processing & Management, 57(6):102067. +Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. 2020. Retrieval augmented language model pre-training. In International conference on machine learning, pages 3929-3938. PMLR. +Michael Gunther, Louis Milliken, Jonathan Geuter, Georgios Mastrupas, Bo Wang, and Han Xiao. 2023. Jina embeddings: A novel set of high-performance sentence embedding models. Preprint, arXiv:2307.11224. +Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685. +Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, Kai Dang, Yang Fan, Yichang Zhang, An Yang, Rui Men, Fei Huang, Bo Zheng, Yibo Miao, Shanghaoran Quan, Yunlong Feng, Xingzhang Ren, Xuancheng Ren, Jingren Zhou, and Junyang Lin. 2024. Qwen2.5-coder technical report. Preprint, arXiv:2409.12186. +Hyperbolic. 2025. Hyperbolic website. https:// hyperbolic.xyz/. Accessed: 2025-04-15. + +Carlos E Jimenez, John Yang, Alexander Wettig, Shunyu Yao, Kexin Pei, Ofir Press, and Karthik Narasimhan. 2023. Swe-bench: Can language models resolve real-world github issues? arXiv preprint arXiv:2310.06770. +Sungmin Kang, Gabin An, and Shin Yoo. 2023. A preliminary evaluation of llm-based fault localization. arXiv preprint arXiv:2308.05487. +Sungmin Kang, Gabin An, and Shin Yoo. 2024. A quantitative and qualitative evaluation of llm-based explainable fault localization. Proceedings of the ACM on Software Engineering, 1(FSE):1424-1446. +Patrick Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, et al. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems, 33:9459-9474. +Xiangyan Liu, Bo Lan, Zhiyuan Hu, Yang Liu, Zhicheng Zhang, Fei Wang, Michael Shieh, and Wenmeng Zhou. 2024. Codexgraph: Bridging large language models and code repositories via code graph databases. Preprint, arXiv:2408.03910. +Qingsong Lv, Ming Ding, Qiang Liu, Yuxiang Chen, Wenzheng Feng, Siming He, Chang Zhou, Jianguo Jiang, Yuxiao Dong, and Jie Tang. 2021. Are we really making much progress? revisiting, benchmarking and refining heterogeneous graph neural networks. In Proceedings of the 27th ACM SIGKDD conference on knowledge discovery & data mining, pages 1150-1160. +Yingwei Ma, Qingping Yang, Rongyu Cao, Binhua Li, Fei Huang, and Yongbin Li. 2024. How to understand whole software repository? arXiv e-prints, pages arXiv-2406. +Niels Mündler, Mark Müller, Jingxuan He, and Martin Vechev. 2024. Swt-bench: Testing and validating real-world bug-fixes with code agents. Advances in Neural Information Processing Systems, 37:81857-81887. +OpenAI. 2023. Chatgpt: Language model by openai. Accessed: January 21, 2025. +Siru Ouyang, Wenhao Yu, Kaixin Ma, Zilin Xiao, Zhihan Zhang, Mengzhao Jia, Jiawei Han, Hongming Zhang, and Dong Yu. 2025. Repograph: Enhancing AI software engineering with repository-level code graph. In The Thirteenth International Conference on Learning Representations. +PerplexityAI. 2023. Perplexity ai: An ai-powered search engine. Accessed: January 21, 2025. +Yihao Qin, Shangwen Wang, Yiling Lou, Jinhao Dong, Kaixin Wang, Xiaoling Li, and Xiaoguang Mao. 2024. Agentfl: Scaling llm-based fault localization to project-level context. arXiv preprint arXiv:2403.16362. + +Chen Qu, Liu Yang, Cen Chen, Minghui Qiu, W Bruce Croft, and Mohit Iyyer. 2020. Open-retrieval conversational question answering. In Proceedings of the 43rd International ACM SIGIR conference on research and development in Information Retrieval, pages 539-548. +Stephen Robertson, Hugo Zaragoza, et al. 2009. The probabilistic relevance framework: Bm25 and beyond. Foundations and Trends® in Information Retrieval, 3(4):333-389. +Stephen E. Robertson, Steve Walker, Susan Jones, Micheline Hancock-Beaulieu, and Mike Gatford. 1994. Okapi at trec-3. In Text Retrieval Conference. +Tarun Suresh, Revanth Gangi Reddy, Yifei Xu, Zach Nussbaum, Andriy Mulyar, Brandon Duderstadt, and Heng Ji. 2024. Cornstack: High-quality contrastive data for better code ranking. arXiv preprint arXiv:2412.01007. +David A. Tomassi, Naji Dmeiri, Yichen Wang, Antara Bhowmick, Yen-Chuan Liu, Premkumar Devanbu, Bogdan Vasilescu, and Cindy Rubio-Gonzalez. 2019. Bugswarm: Mining and continuously growing a dataset of reproducible failures and fixes. Preprint, arXiv:1903.06725. +VoyageAI. 2024. Voyage-code-2: Elevate your code retrieval. Accessed: 2024-02-02. +Liang Wang, Nan Yang, Xiaolong Huang, Bixing Jiao, Linjun Yang, Daxin Jiang, Rangan Majumder, and Furu Wei. 2022. Text embeddings by weakly-supervised contrastive pre-training. arXiv preprint arXiv:2212.03533. +Xingyao Wang, Boxuan Li, Yufan Song, Frank F. Xu, Xiangru Tang, Mingchen Zhuge, Jiayi Pan, Yueqi Song, Bowen Li, Jaskirat Singh, Hoang H. Tran, Fuqiang Li, Ren Ma, Mingzhang Zheng, Bill Qian, Yanjun Shao, Niklas Muennighoff, Yizhe Zhang, Binyuan Hui, Junyang Lin, Robert Brennan, Hao Peng, Heng Ji, and Graham Neubig. 2025. Open hands: An open platform for AI software developers as generalist agents. In The Thirteenth International Conference on Learning Representations. +Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2023a. Self-consistency improves chain of thought reasoning in language models. Preprint, arXiv:2203.11171. +Yue Wang, Hung Le, Akhilesh Deepak Gotmare, Nghi D. Q. Bui, Junnan Li, and Steven C. H. Hoi. 2023b. Codet5+: Open code large language models for code understanding and generation. Preprint, arXiv:2305.07922. +Zora Zhiruo Wang, Akari Asai, Xinyan Velocity Yu, Frank F. Xu, Yiqing Xie, Graham Neubig, and Daniel Fried. 2024. Coderag-bench: Can retrieval augment code generation? Preprint, arXiv:2406.14497. + +Yonghao Wu, Zheng Li, Jie M Zhang, Mike Papadakis, Mark Harman, and Yong Liu. 2023. Large language models in fault localisation. arXiv preprint arXiv:2308.15276. +Chunqiu Steven Xia, Yinlin Deng, Soren Dunn, and Lingming Zhang. 2024. Agentless: Demystifying llm-based software engineering agents. arXiv preprint arXiv:2407.01489. +John Yang, Carlos E Jimenez, Alexander Wettig, Kili-ian Lieret, Shunyu Yao, Karthik Narasimhan, and Ofir Press. 2024. Swe-agent: Agent-computer interfaces enable automated software engineering. arXiv preprint arXiv:2405.15793. +Zhongming Yu, Hejia Zhang, Yujie Zhao, Hanxian Huang, Matrix Yao, Ke Ding, and Jishen Zhao. 2025. Ocaloca: An llm agent framework for software issue localization. arXiv preprint arXiv:2502.00350. +Dejiao Zhang, Wasi Uddin Ahmad, Ming Tan, Hantian Ding, Ramesh Nallapati, Dan Roth, Xiaofei Ma, and Bing Xiang. 2024. CODE REPRESENTATION LEARNING AT SCALE. In The Twelfth International Conference on Learning Representations. +Albert Örwall. 2024. Moatless tools. + +# A LOCAGENT Design Details + +# A.1 Tool Output Design + +# A.1.1 Three-level format for SearchEntity output + +Once invoked by the LLM agent, the retrieval APIs search for files, classes, methods, and code snippets in the codebase, and return the results back to the agent. To avoid forming very lengthy code context that may containing noisy information to LLM, we return only necessary information as API outputs. To achieve this, we desgined four granular standard output formats (Figure 6): fold, preview, full code. + +# A.1.2 Tree-based Subgraph Formatting for TraverseGraph Output + +The TraverseGraph tool traverses the code graph and returns a local subgraph for each input entity. The agent reasons about these subgraphs to understand each entity's complex dependencies. However, reasoning about graphs remains challenging for LLMs. Research by (Fatemi et al., 2023) demonstrates that LLM performance varies significantly based on graph formatting (how graphs are encoded as text). This makes the format design for output subgraphs crucial. + +We have developed a new tree-based format, shown in Figure 7, with several features that enhance LLM reasoning: (1) We represent subgraphs as trees, allowing LLMs to use indentation to determine a node's distance from the root, (2) We display complete entity IDs for each node (e.g., django/core-validators.py:RegexValidator) to help LLMs locate nodes easily, and (3) We explicitly specify relation types for each edge, including reversed relations + +To evaluate how different graph formats impact code localization performance, we conducted an experiment using 37 challenging samples from SWEBench-Lite. These samples were considered "challenging" because they could not be solved by any baseline agent methods. Using Claude-3.5 as the Localization Model across all settings, we compared various output formats. Table 9 presents our findings. The baseline output formats we tested are described below: + +- row: For each line, list one row of the adjacency matrix. For example, + +function "fileA.py:funcA" invokes function "fileA.py:funcB", "fileA.py:funcC" + +- row (w/ entity attributes): Additionally include entity attributes for format row. +- incident: The incident format mentioned in (Fatemi et al., 2023). An integer instead of entity ID is used to represent each node. For example, + +Map function "fileA.py:funcA" to index 0. Map function "fileA.py:funcB" to index 1. Map function "fileA.py:funcC" to index 2. + +function $O$ invokes function 1,2. + +Graphviz DOT: Represent graph in Graphviz DOT language (Ellson et al., 2002). +- JSON: Expand the subgraph as a tree, and convert it to JSON format. + +As shown in Table 9, expanding subgraphs as trees (i.e., JSON, tree-based) can significantly improve the performance. Our tree-based format achieves the best overall performance across different levels of localization tasks. We also test returning entity attributes along with subgraphs. We notice that row (w/ entity attributes) consistently underperforms row, indicating the attributes for all the nodes may be very noisy. Besides, although using incident format can simplify the output and show improvements in file-level localization, it degradation the module- and file-level localization. + +# A.2 Implementation + +To enable the LLM agent to invoke the Code Localization APIs, we handle the interaction differently based on the LLM's capabilities. For LLMs that support tool-calling features, we define the tools as a list of JSON objects, which are then used as parameters for the API calls. For LLMs that do not support tool-calling (such as Qwen), we provide the description of the API and the expected output as part of the LLM's prompt. When the agent decides to invoke a set of retrieval APIs, it responds with a list of API call names and their corresponding arguments. These retrieval API requests are processed locally by searching over the built code graph. The results from executing these APIs locally are returned to the agent. + +By default, we query the LLM with a temperature setting of 1.0. We conduct two interactions, after which we rerank the results based on mean reciprocal rank (MRR) scores. We also leverage multiprocessing execution to speed up the process. Since all our tools are read-only, LOCAGENT does + +![](images/e3da4d4339e99b1a14d6f8d73ea975c643cc44ad95a8dc1803fff42294d4f99a.jpg) +Figure 6: Different output formats designed for efficient agent-code interaction. Left: Full code output when matched entities $\leq 3$ . Middle: Preview output showing module skeleton for large files. Right: Fold output showing only entity IDs when matches $>3$ . + +![](images/003dcde246a439f5b9b36cc33df7c37daaa5e4eaf0478ed30be80b30cbe72965.jpg) + +![](images/86b464c56ef20eb2e4a58b1077f46f88aa113910e37ea427df9e03b94670489d.jpg) + +![](images/503072355158697ea6cb6ac7e2712ec0ffc412f1cd70207465e4cb36e21f3c9b.jpg) +Figure 7: A truncated example of the expanded tree-based format for the output subgraph of tool TraverseGraph. + +not require a specialized Docker environment to operate. + +# B Dataset construction and statistics + +# B.1 Dataset construction details + +Example collection. We collected examples on popular Python repositories on Github follow (Jimenez et al., 2023). To gather issues related to performance and security, we searched for the keywords listed in Table 10 using the GitHub Search APIs. We then used GPT-4o-2024-0513 as the classifier based on the issue descriptions. + +Ground Truth Locations. The affected files or functions in the original codebase, as identified in + +the patches, are considered the target locations for the given issue. While it is possible to fix a bug in a location different from the ground truth, the extracted ground-truth locations still serve as approximate targets for localization. Additionally, edited code such as documents, import statements, and comments are excluded from the localization target. These elements are not considered relevant for bug localization, as they do not directly impact the functionality of the code or its execution. By filtering out these elements, the focus is maintained on the core code changes that are relevant for localization. + +
Output FormatFile(%)Module(%)Function(%)
Acc@1Acc@3Acc@5Acc@5Acc@10Acc@5Acc@10
row41.1867.6570.5961.7661.7635.2938.24
row (w/ entity attributes)41.1864.7164.7150.0050.0032.3532.35
incident41.1870.5973.5355.8855.8829.4132.35
Graphviz DOT41.1873.5382.3564.7164.7135.2935.29
JSON41.1867.6576.4767.6570.5938.2441.18
tree-based (Ours)47.0679.4179.4164.7164.7138.2441.18
+ +Table 9: Localization performance under different TraverseGraph output formats. + +
CategoryKeywords
Performancebottleneck, performance improvement, memory usage optimization, time complexity reduction, latency improvement, scalability improvement, CPU usage reduction, caching improvement, concurrency optimization
SecurityOut-of-bounds Write, Out-of-bounds Read, NULL Pointer Dereference, Missing Authorization, memory leak fix, security vulnerability, security issue, authentication bypass, authentication issue, better maintained, buffer overflow, denial of service, security hardening, security patch, unsafe deserialization, Use After Free, Integer Overflow or Wraparound, Uncontrolled Resource Consumption, Missing Authentication for Critical Function
+ +Table 10: We use these Keywords to search for Performance and Security related issues with Github Search APIs. + +# C Additional Experiments + +# C.1 Implementation Details + +# C.1.1 Baselines Implementation + +Regarding the embedding-based methods in our evaluation, these approaches operate primarily at the function level, where each function is embedded as a separate unit. The function's context (its containing file and class) is appended to the function representation before embedding, rather than being embedded separately. While theoretically these methods could employ hierarchical indexing, the standard implementations we evaluated use flat indexing structures where each function is embedded as a single unit. + +We use OpenHands's remote runtime feature to parallelize evaluation on OpenHands and SWEagent. We use Openhands version 0.12.0 released on Oct 31, 2024. + +# C.1.2 Quantifying Task Difficulty Based on Code Graph Distance + +We measure task difficulty by computing the average shortest hop distance between the functions mentioned in the issue descriptions and the patched functions within our code graph. Specifically, we first extract potential function names from each + +issue description using GPT-4o-2024-0513, and identify their corresponding nodes in the code graph using the global dictionary. These identified nodes form the set of predicted nodes, denoted as $\mathcal{C}$ . Similarly, we link the ground truth functions from the patch to their corresponding nodes in the code graph, forming the set of target nodes, denoted as $\mathcal{T}$ . To quantify the difficulty $\delta$ , we calculate the average shortest hop distance between the predicted nodes $\mathcal{C}$ and the target nodes $\mathcal{T}$ , defined as: + +$$ +\delta = \frac {1}{| \mathcal {C} |} \sum_ {c \in \mathcal {C}} \frac {1}{m i n _ {t \in \mathcal {T}} d (c , t) + 1} +$$ + +where $d(c, t)$ represents the shortest hop distance between nodes $c$ and $t$ in the graph. For performance analysis stratified by difficulty, we round $\delta$ down to $\lfloor \delta \rfloor$ to group samples by difficulty levels, and we exclude samples where the LLM fails to extract any valid function names. + +# C.1.3 Training details. + +Fine-tuning Settings. We use Qwen-2.5-Coder-Instruct (Hui et al., 2024) 7B and 32B variants as our base models. We fine-tuned Qwen-2.5-Coder-Instruct 7B and 32B models on 768 training samples from the SWE-Bench training dataset, leveraging LoRA + +
TypeMethodLoc-ModelFile (%)Module (%)Function (%)
NDCG@1NDCG@3NDCG@5NDCG@5NDCG@10NDCG@5NDCG@10
Embedding-BasedBM25 (Robertson et al., 2009)38.6946.550.6137.3139.8626.1527.92
E5-base-v2 (Wang et al., 2022)49.6464.1966.653.1554.4531.3935.3
Jina-Code-v2 (Günther et al., 2023)43.4359.9363.751.0254.1333.2836.44
Codesage-large-v2 (Zhang et al., 2024)47.8160.8264.3949.3852.2227.0330.74
CodeRankEmbed (Suresh et al., 2024)52.5567.5470.3957.5159.7640.2842.55
Procedure-BasedAgentless (Xia et al., 2024)GPT-4o67.1571.7671.7664.3164.3153.8153.81
Claude-3.572.6376.7276.8767.3667.3657.5557.55
Agent-BasedMoatlessTools (Örwall, 2024)GPT-4o73.3680.0380.3368.5769.0949.7750.62
Claude-3.572.6380.7380.8869.1169.1153.0353.16
SWE-agent (Yang et al., 2024)GPT-4o57.363.9664.1253.9553.9542.3242.44
Claude-3.577.3784.3284.9372.7772.959.6759.79
Openshands (Wang et al., 2025)GPT-4o60.9567.6268.3958.1858.644.3444.66
Claude-3.576.2884.2784.4375.7975.9263.1363.8
LocAgent (Ours)Qwen2.5-7B(ft)70.8079.3680.970.9971.6855.6258.09
Qwen2.5-32B(ft)75.9184.7485.6476.2876.7764.2765.93
Claude-3.577.7486.1987.1477.7378.164.3465.57
+ +Table 11: NDCG scores comparison showing ranking quality of different methods. + +for efficient adaptation. The training set included 447 samples generated by Claude-3.5, while the remaining samples were iteratively generated using the fine-tuned Qwen2.5-32B model. The fine-tuning process was conducted over 5 epochs with max_token set to $128k$ and a learning rate of $2 \times 10^{-4}$ . + +# D Prompt + +In this section, we go through the prompt template that make up the agent's history. + +# Prompt + +Given the following GitHub problem description, your objective is to localize the specific files, classes or functions, and lines of code that need modification or contain key information to resolve the issue. + +Follow these steps to localize the issue: + +Step 1: Categorize and Extract Key Problem Information + +- Classify the problem statement into the following categories: +Problem description, error trace, code to reproduce the bug, and additional context. +- Identify modules in the '{package_name}' package mentioned in each category. +- Use extracted keywords and line numbers to search for relevant code references for additional context. + +Step 2: Locate Referenced Modules + +Accurately determine specific modules + +- Explore the repo to familiarize yourself with its structure. + +- Analyze the described execution flow to identify specific modules or components being referenced. + +- Pay special attention to distinguishing between modules with similar names using context and described execution flow. + +- Output Format for collected relevant modules: + +- Use the format: 'file path:QualifiedName' + +- E.q., for a function `calculate_sum` in the `MathUtilities` class located in `src/helpers/mathHelpers.py`, represent it as: + +'src/helpers/mathHelpers.py:MathUtil calculator_sum'. + +## Step 3: Analyze and Reproducing the Problem + +Clarify the Purpose of the Issue + +- If expanding capabilities: Identify where and how to incorporate new behavior, fields, or modules. + +- If addressing unexpected behavior: Focus on localizing modules containing potential bugs. + +- Reconstruct the execution flow + +- Identify main entry points triggering the issue. + +- Trace function calls, class interactions, and sequences of events. + +- Identify potential breakpoints causing the issue. + +Important: Keep the reconstructed flow focused on the problem, avoiding irrelevant details. + +## Step 4: Locate Areas for Modification + +- Locate specific files, functions, or lines of code requiring changes or containing critical information for resolving the issue. + +- Consider upstream and downstream dependencies that may affect or be affected by the issue. + +- If applicable, identify where to introduce new fields, functions, or variables. + +- Think Thoroughly: List multiple potential solutions and consider edge cases that could impact the resolution. + +Output Format for Final Results: + +Your final output should list the locations requiring modification, wrapped with triple back ticks + +Each location should include the file path, class name (if applicable), function name, or line numbers, ordered by importance. + +Your answer would better include about 5 files. + +Examples: + +full_path1/file1.py + +line: 10 + +class: MyClass1 + +function: my_function1 + +full path2/file2.py + +line:76 + +function: MyClass2.my_function2 + +full_path3/file3.py + +line: 24 + +line: 156 + +function: my_function3 + +# + +Return just the location(s) + +Note: Your thinking should be thorough and so it's fine if it's very long. + +Figure 8: The task instruction prompt for LOCAGENT. \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09089/images/003dcde246a439f5b9b36cc33df7c37daaa5e4eaf0478ed30be80b30cbe72965.jpg b/data/2025/2503_09xxx/2503.09089/images/003dcde246a439f5b9b36cc33df7c37daaa5e4eaf0478ed30be80b30cbe72965.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bfcdb2fc52cc9aa8815b925891732ba1de98f474 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/003dcde246a439f5b9b36cc33df7c37daaa5e4eaf0478ed30be80b30cbe72965.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc575a586cf7fdf9f989df73df02bb6cd8e953c6c02e2ce64d1b157f26b26dca +size 45844 diff --git a/data/2025/2503_09xxx/2503.09089/images/2976dea7e0ff07dfee13b430fc8f8efa94af68a9c7230eb416ed229809e5c751.jpg b/data/2025/2503_09xxx/2503.09089/images/2976dea7e0ff07dfee13b430fc8f8efa94af68a9c7230eb416ed229809e5c751.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e9bb096e161e2ee1eaed7b68054d00aab74bbb8 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/2976dea7e0ff07dfee13b430fc8f8efa94af68a9c7230eb416ed229809e5c751.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fc65166d01b05e39ed05ae94db0571500b8a687cd11f45dbfae5f3e6d50600a +size 100879 diff --git a/data/2025/2503_09xxx/2503.09089/images/3501fac23dcf5fe773840a66ad8f7737fdc4bb7268e280936b833deb06905a3d.jpg b/data/2025/2503_09xxx/2503.09089/images/3501fac23dcf5fe773840a66ad8f7737fdc4bb7268e280936b833deb06905a3d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bea23f6850f1c29ceb241633c5c02c9ea1d59724 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/3501fac23dcf5fe773840a66ad8f7737fdc4bb7268e280936b833deb06905a3d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:996240c866386bbb2a14f6dcf1a8dd75e2e2d48f5516ef6098abf61c92d68aea +size 5647 diff --git a/data/2025/2503_09xxx/2503.09089/images/3ee9190f79ad7623fb4b180523bd53a7f66d2934c043f7c4febdadae07f9b9c0.jpg b/data/2025/2503_09xxx/2503.09089/images/3ee9190f79ad7623fb4b180523bd53a7f66d2934c043f7c4febdadae07f9b9c0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e83c1c2b73cff18c75300b603dc8113bada2e41f --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/3ee9190f79ad7623fb4b180523bd53a7f66d2934c043f7c4febdadae07f9b9c0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1afe305bac7a028fd1116040e965efcb130d12ad80aac94aab20085dfb18a113 +size 37757 diff --git a/data/2025/2503_09xxx/2503.09089/images/40113cdec1e1feca7000cb77d3a5294a6135d63a0d343288699aecc05a7b7a58.jpg b/data/2025/2503_09xxx/2503.09089/images/40113cdec1e1feca7000cb77d3a5294a6135d63a0d343288699aecc05a7b7a58.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d04587655199c0e6d3aedd8cf7b186a6e196fd14 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/40113cdec1e1feca7000cb77d3a5294a6135d63a0d343288699aecc05a7b7a58.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b9bae937c708ee8867f37f7e8296f16bfb7c850df5abf00ec951f457f89f5e5 +size 19964 diff --git a/data/2025/2503_09xxx/2503.09089/images/503072355158697ea6cb6ac7e2712ec0ffc412f1cd70207465e4cb36e21f3c9b.jpg b/data/2025/2503_09xxx/2503.09089/images/503072355158697ea6cb6ac7e2712ec0ffc412f1cd70207465e4cb36e21f3c9b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ad84ddcbf8ca490863ee406c35aeceb90495176 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/503072355158697ea6cb6ac7e2712ec0ffc412f1cd70207465e4cb36e21f3c9b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a702baf6f4f227b62302398031623083a8f6ed6e13aef49abd0dc6056e7283c9 +size 71435 diff --git a/data/2025/2503_09xxx/2503.09089/images/594e4e848668151fa0f0d585dcf3a37fcc51677d8a82c5fdceaf97947d73c7bb.jpg b/data/2025/2503_09xxx/2503.09089/images/594e4e848668151fa0f0d585dcf3a37fcc51677d8a82c5fdceaf97947d73c7bb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ab3405884e1ee9cb1ae83824d9c56bc36cddcddb --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/594e4e848668151fa0f0d585dcf3a37fcc51677d8a82c5fdceaf97947d73c7bb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6633f57d2efd165672f11cd9e50d5a24e64159dbc0865ee6fbe80a128274465e +size 25426 diff --git a/data/2025/2503_09xxx/2503.09089/images/65aa2b242676fd4ce7ba9e60ceadb537140f50dff384a8c6651710edad591512.jpg b/data/2025/2503_09xxx/2503.09089/images/65aa2b242676fd4ce7ba9e60ceadb537140f50dff384a8c6651710edad591512.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f79e87900fc0b1115f3a06b1e0b5d659f5715b9 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/65aa2b242676fd4ce7ba9e60ceadb537140f50dff384a8c6651710edad591512.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5f93ec1c7a16b1fb4956035928c140561e786ea3f15e2011d31516f1871c6e1 +size 67474 diff --git a/data/2025/2503_09xxx/2503.09089/images/68d78298f4ef4a0462b402972796e7a35e59e5ec5b78ccafd6cd08b74dd8ad0d.jpg b/data/2025/2503_09xxx/2503.09089/images/68d78298f4ef4a0462b402972796e7a35e59e5ec5b78ccafd6cd08b74dd8ad0d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a163baf861756cd345b96722da6fba8fcd6eff7b --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/68d78298f4ef4a0462b402972796e7a35e59e5ec5b78ccafd6cd08b74dd8ad0d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7c9e062dfc053cf334aec50c371c4118e5a6209f429e47f0e09aa500c7b3f23 +size 110996 diff --git a/data/2025/2503_09xxx/2503.09089/images/6c9737e3fa48ebc0fa6a563806676ce4a098be8490a88f39f510258c9a12539f.jpg b/data/2025/2503_09xxx/2503.09089/images/6c9737e3fa48ebc0fa6a563806676ce4a098be8490a88f39f510258c9a12539f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3fbbd61040e8bf90d0e5249191d201fa1ee4fe91 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/6c9737e3fa48ebc0fa6a563806676ce4a098be8490a88f39f510258c9a12539f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0ee1b030040da1256498b06305feb45019c604f662dadf93cf3f6a28c71b95e +size 73598 diff --git a/data/2025/2503_09xxx/2503.09089/images/86b464c56ef20eb2e4a58b1077f46f88aa113910e37ea427df9e03b94670489d.jpg b/data/2025/2503_09xxx/2503.09089/images/86b464c56ef20eb2e4a58b1077f46f88aa113910e37ea427df9e03b94670489d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..066c41d9195083f80ff956a62d9a6695d9243122 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/86b464c56ef20eb2e4a58b1077f46f88aa113910e37ea427df9e03b94670489d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2630e3da3e65110f2706bb9b1663351032e23b1637dd6ef1776b3ecd0f77555e +size 44110 diff --git a/data/2025/2503_09xxx/2503.09089/images/ad9888427e4959ea8df866a9f22f434e24d97aa01a98851cc1f0d57d84fed76f.jpg b/data/2025/2503_09xxx/2503.09089/images/ad9888427e4959ea8df866a9f22f434e24d97aa01a98851cc1f0d57d84fed76f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..442e486fdb6d1abea3738a8ebb21880197b942c0 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/ad9888427e4959ea8df866a9f22f434e24d97aa01a98851cc1f0d57d84fed76f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:291403f680dd739a4972468085376d6491c23ede05a006fbdab3d6c4d6d88509 +size 143607 diff --git a/data/2025/2503_09xxx/2503.09089/images/c20269c8cebd98330e89168b5cf72fa71b25a1845eac8027149a6e7bbe018c9f.jpg b/data/2025/2503_09xxx/2503.09089/images/c20269c8cebd98330e89168b5cf72fa71b25a1845eac8027149a6e7bbe018c9f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ca74a8228eb7f118d1c7447c14b148581ce2b884 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/c20269c8cebd98330e89168b5cf72fa71b25a1845eac8027149a6e7bbe018c9f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7f197658a0015312cda5fac0a76d44b49aa8ddc2eb17ac4445bb1bc7ef20007 +size 46192 diff --git a/data/2025/2503_09xxx/2503.09089/images/ca58188c6fe4c2ffec34aeccdb6ee5a73bdf63c458fea192f02d6865ca76db6a.jpg b/data/2025/2503_09xxx/2503.09089/images/ca58188c6fe4c2ffec34aeccdb6ee5a73bdf63c458fea192f02d6865ca76db6a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ae1ac2578c8fd322958fb5d5fb2b915ab7022b1 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/ca58188c6fe4c2ffec34aeccdb6ee5a73bdf63c458fea192f02d6865ca76db6a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a16181fe8e00966c752d3258670373f7e4708d0b0bc2829647e1135945d8dcc7 +size 31899 diff --git a/data/2025/2503_09xxx/2503.09089/images/cbbf57fce918f5d0e7521a35c3e24e9a0612bad18584b2403a531e9d40562501.jpg b/data/2025/2503_09xxx/2503.09089/images/cbbf57fce918f5d0e7521a35c3e24e9a0612bad18584b2403a531e9d40562501.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0a8218a11bf823528d33153e91a4a64b2300347 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/cbbf57fce918f5d0e7521a35c3e24e9a0612bad18584b2403a531e9d40562501.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bd8526a7331d1be7a18846bf93a395028b4f82729d7036334793253fccf7a8e +size 41001 diff --git a/data/2025/2503_09xxx/2503.09089/images/cd90b427caeb94e68300cb0963a3939e90034375ecc1b70e22bb7ba12b53914b.jpg b/data/2025/2503_09xxx/2503.09089/images/cd90b427caeb94e68300cb0963a3939e90034375ecc1b70e22bb7ba12b53914b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..64ff9f75a18dd1e086d913d42744e9d58cf7f18b --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/cd90b427caeb94e68300cb0963a3939e90034375ecc1b70e22bb7ba12b53914b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1336048e02d816dd47e3c18f0e0a3980c102306abcaba58fad85b20d4bc1b1dd +size 22870 diff --git a/data/2025/2503_09xxx/2503.09089/images/d58f8658622664aa6fdd9f0ba4233e824e52755bf040929bc8346fe186a5d5e3.jpg b/data/2025/2503_09xxx/2503.09089/images/d58f8658622664aa6fdd9f0ba4233e824e52755bf040929bc8346fe186a5d5e3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ce60ed004b54693ebd3994c345173a018bcb18d --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/d58f8658622664aa6fdd9f0ba4233e824e52755bf040929bc8346fe186a5d5e3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6757c07f1143f174872373a9e409aad2711a39065a1a4a31530f79b87de90d45 +size 116702 diff --git a/data/2025/2503_09xxx/2503.09089/images/dd0a043ec99e4aa7d3cdd9cab21f0f8c8ff70d258cc383c3b7961f81be3880dc.jpg b/data/2025/2503_09xxx/2503.09089/images/dd0a043ec99e4aa7d3cdd9cab21f0f8c8ff70d258cc383c3b7961f81be3880dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f881d8d0205ae47dbaea99e553ac2b45e6ad4fba --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/dd0a043ec99e4aa7d3cdd9cab21f0f8c8ff70d258cc383c3b7961f81be3880dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8867e6c8d590feed919d7a251a6d03c93c61020a77129f4d0011713354f47a6d +size 27240 diff --git a/data/2025/2503_09xxx/2503.09089/images/dffede0cb6e1140d33ef35874b6f55be329c72cec08ada3fd8a7e8684a261136.jpg b/data/2025/2503_09xxx/2503.09089/images/dffede0cb6e1140d33ef35874b6f55be329c72cec08ada3fd8a7e8684a261136.jpg new file mode 100644 index 0000000000000000000000000000000000000000..37f2731989c46867b8ec51c01ca494c30a19b0c6 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/dffede0cb6e1140d33ef35874b6f55be329c72cec08ada3fd8a7e8684a261136.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b71f5480e0149300917298cb40123a44d5e30ee5ffe2d4f0eaffc7383c4c5998 +size 37436 diff --git a/data/2025/2503_09xxx/2503.09089/images/e3da4d4339e99b1a14d6f8d73ea975c643cc44ad95a8dc1803fff42294d4f99a.jpg b/data/2025/2503_09xxx/2503.09089/images/e3da4d4339e99b1a14d6f8d73ea975c643cc44ad95a8dc1803fff42294d4f99a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..26b0469e960e76d4e8c7ac5e3632a0d4deffbbf2 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/e3da4d4339e99b1a14d6f8d73ea975c643cc44ad95a8dc1803fff42294d4f99a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b62d73dcdf3d355424bfc175e4c72ec1168b141a0358cdb8f918fb2646996e4e +size 44580 diff --git a/data/2025/2503_09xxx/2503.09089/images/eff607d6667dc8fa01afca421fe5518165076b8c38bf5f9855a1411d560992d1.jpg b/data/2025/2503_09xxx/2503.09089/images/eff607d6667dc8fa01afca421fe5518165076b8c38bf5f9855a1411d560992d1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5fa9fab95abb8e81a77212667f7f6ee8aa391417 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/eff607d6667dc8fa01afca421fe5518165076b8c38bf5f9855a1411d560992d1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6a1b69ea0fbad50753f055ac04c162b0c32e4a1b74f8b321bf3f2db8b9cbdae +size 57871 diff --git a/data/2025/2503_09xxx/2503.09089/images/f3ff500f60d006d9ab2100b2e34cdaaed024cc944a6d5f1861937bb334d1a3cf.jpg b/data/2025/2503_09xxx/2503.09089/images/f3ff500f60d006d9ab2100b2e34cdaaed024cc944a6d5f1861937bb334d1a3cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68d65a5ac9b3e4bfe912865adc2c938d32f76949 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/f3ff500f60d006d9ab2100b2e34cdaaed024cc944a6d5f1861937bb334d1a3cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69686cb696cf22b75f9ae0a4956471984930e534a992795935f1d8b4304b1fac +size 25224 diff --git a/data/2025/2503_09xxx/2503.09089/images/f929dd53f4adf74c47eab0524e9c10d9df2c8753a81e68bd756d7c86d5d4876b.jpg b/data/2025/2503_09xxx/2503.09089/images/f929dd53f4adf74c47eab0524e9c10d9df2c8753a81e68bd756d7c86d5d4876b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..269d5041048795b02f4a9885a23a773208d03ecf --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/images/f929dd53f4adf74c47eab0524e9c10d9df2c8753a81e68bd756d7c86d5d4876b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:452e68c1be55d2c8514908d04a61ae7685f84dff9395401100eb5e374a78f03b +size 29884 diff --git a/data/2025/2503_09xxx/2503.09089/layout.json b/data/2025/2503_09xxx/2503.09089/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..0e3c6e0a7e5999649dbeadf375888a8c35cfa24e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09089/layout.json @@ -0,0 +1,10733 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 76, + 487, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 76, + 487, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 76, + 487, + 95 + ], + "type": "text", + "content": "LocAgent: Graph-Guided LLM Agents for Code Localization" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 114, + 534, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 114, + 534, + 148 + ], + "spans": [ + { + "bbox": [ + 61, + 114, + 534, + 148 + ], + "type": "text", + "content": "Zhaoling Chen\\*, Xiangru Tang\\*, Gangda Deng\\*, Fang Wu\\*, Jialong Wu\\*, Zhiwei Jiang, Viktor Prasanna\\*, Arman Cohan\\*, Xingyao Wang" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 62, + 148, + 531, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 148, + 531, + 179 + ], + "spans": [ + { + "bbox": [ + 62, + 148, + 531, + 179 + ], + "type": "inline_equation", + "content": "^{\\spadesuit}" + }, + { + "bbox": [ + 62, + 148, + 531, + 179 + ], + "type": "text", + "content": "Yale University " + }, + { + "bbox": [ + 62, + 148, + 531, + 179 + ], + "type": "inline_equation", + "content": "^{\\spadesuit}" + }, + { + "bbox": [ + 62, + 148, + 531, + 179 + ], + "type": "text", + "content": "University of Southern California " + }, + { + "bbox": [ + 62, + 148, + 531, + 179 + ], + "type": "inline_equation", + "content": "^{\\spadesuit}" + }, + { + "bbox": [ + 62, + 148, + 531, + 179 + ], + "type": "text", + "content": "Stanford University " + }, + { + "bbox": [ + 62, + 148, + 531, + 179 + ], + "type": "inline_equation", + "content": "^{\\spadesuit}" + }, + { + "bbox": [ + 62, + 148, + 531, + 179 + ], + "type": "text", + "content": "All Hands AI xiangru.tang@yale.edu, gangdade@usc.edu, xingyao@all-hands.dev" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "spans": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 84, + 242, + 274, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 242, + 274, + 624 + ], + "spans": [ + { + "bbox": [ + 84, + 242, + 274, + 624 + ], + "type": "text", + "content": "Code localization—identifying precisely where in a codebase changes need to be made—is a fundamental yet challenging task in software maintenance. Existing approaches struggle to efficiently navigate complex codebases when identifying relevant code sections. The challenge lies in bridging natural language problem descriptions with the appropriate code elements, often requiring reasoning across hierarchical structures and multiple dependencies. We introduce LOCAGENT, a framework that addresses code localization through graph-based representation. By parsing codebases into directed heterogeneous graphs, LOCAGENT creates a lightweight representation that captures code structures (files, classes, functions) and their dependencies (imports, invocations, inheritance), enabling LLM agents to effectively search and locate relevant entities through powerful multi-hop reasoning. Experimental results on real-world benchmarks demonstrate that our approach significantly enhances accuracy in code localization. Notably, our method with the fine-tuned Qwen-2.5-Coder-Instruct-32B model achieves comparable results to SOTA proprietary models at greatly reduced cost (approximately " + }, + { + "bbox": [ + 84, + 242, + 274, + 624 + ], + "type": "inline_equation", + "content": "86\\%" + }, + { + "bbox": [ + 84, + 242, + 274, + 624 + ], + "type": "text", + "content": " reduction), reaching up to " + }, + { + "bbox": [ + 84, + 242, + 274, + 624 + ], + "type": "inline_equation", + "content": "92.7\\%" + }, + { + "bbox": [ + 84, + 242, + 274, + 624 + ], + "type": "text", + "content": " accuracy on file-level localization while improving downstream GitHub issue resolution success rates by " + }, + { + "bbox": [ + 84, + 242, + 274, + 624 + ], + "type": "inline_equation", + "content": "12\\%" + }, + { + "bbox": [ + 84, + 242, + 274, + 624 + ], + "type": "text", + "content": " for multiple attempts (Pass@10). Our code is available at https://github.com/gersteinlab/LocAgent." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 644, + 154, + 656 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 644, + 154, + 656 + ], + "spans": [ + { + "bbox": [ + 68, + 644, + 154, + 656 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 665, + 290, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 665, + 290, + 746 + ], + "spans": [ + { + "bbox": [ + 67, + 665, + 290, + 746 + ], + "type": "text", + "content": "Code localization can be viewed as an information retrieval (IR) task that aims to identify relevant code snippets given natural language descriptions (Yu et al., 2025; Yang et al., 2024; Xia et al., 2024). Developers spend up to " + }, + { + "bbox": [ + 67, + 665, + 290, + 746 + ], + "type": "inline_equation", + "content": "66\\%" + }, + { + "bbox": [ + 67, + 665, + 290, + 746 + ], + "type": "text", + "content": " of their debugging time (Böhme et al., 2017) understanding code to" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 304, + 215, + 525, + 374 + ], + "blocks": [ + { + "bbox": [ + 304, + 215, + 525, + 374 + ], + "lines": [ + { + "bbox": [ + 304, + 215, + 525, + 374 + ], + "spans": [ + { + "bbox": [ + 304, + 215, + 525, + 374 + ], + "type": "image", + "image_path": "c20269c8cebd98330e89168b5cf72fa71b25a1845eac8027149a6e7bbe018c9f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 382, + 525, + 442 + ], + "lines": [ + { + "bbox": [ + 302, + 382, + 525, + 442 + ], + "spans": [ + { + "bbox": [ + 302, + 382, + 525, + 442 + ], + "type": "text", + "content": "Figure 1: Code localization across four common programming scenarios. Given a codebase and an issue description, the goal of code localization is to identify the relevant code snippets that require modification to resolve the issue." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 449, + 526, + 760 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 449, + 526, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 449, + 526, + 760 + ], + "type": "text", + "content": "make changes, and automated tools often struggle with the same challenge. Poor code localization leads to incomplete fixes, introduces new bugs, and significantly extends development cycles. Unlike traditional retrieval tasks that primarily focus on lexical or semantic matching between queries and documents (Guo et al., 2016, 2020), code localization requires bridging the gap between natural language and programming languages. It also necessitates reasoning capabilities to analyze the issue, while considering the structural and semantic properties of code (Lewis et al., 2020; Guu et al., 2020; Qu et al., 2020). This capability has become fundamental to powerful AI assistants (OpenAI, 2023; Anthropic, 2023), code-aware search engines (PerplexityAI, 2023), and automated programming agents (Cognition.ai, 2024; Wang et al., 2025; Gauthier, 2024). In particular, accurate code localization is crucial for software maintenance and evolution, as it enables precise code modifications for bug fixes, refactoring, and feature additions (Wang et al., 2024), thereby streamlining the development workflow." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 761, + 524, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 761, + 524, + 775 + ], + "spans": [ + { + "bbox": [ + 314, + 761, + 524, + 775 + ], + "type": "text", + "content": "Existing approaches to code localization face" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 13, + 261, + 36, + 608 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 13, + 261, + 36, + 608 + ], + "spans": [ + { + "bbox": [ + 13, + 261, + 36, + 608 + ], + "type": "text", + "content": "arXiv:2503.09089v2 [cs.SE] 29 Apr 2025" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 752, + 290, + 775 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 752, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 752, + 290, + 775 + ], + "type": "text", + "content": "* Equal contribution. This work was done during Zhaoling's time at Yale." + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 289, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 289, + 449 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 289, + 449 + ], + "type": "text", + "content": "significant limitations. Dense retrieval methods require maintaining and continuously updating vector representations of the entire codebase (Wang et al., 2023b; Günther et al., 2023), creating engineering challenges for large, evolving repositories where code changes frequently. While LLMs demonstrate strong code understanding capabilities (Kang et al., 2023; Wu et al., 2023), models with large context windows cannot process entire codebases at once, necessitating strategic navigation through relevant parts. Moreover, issue descriptions often mention only symptoms rather than underlying causes. For instance, a report of 'XSS vulnerability in user profile' might require changes to a shared validation utility used throughout the codebase but not explicitly referenced in the issue. This disconnect between issue descriptions and affected code components presents a substantial challenge for traditional retrieval approaches, which struggle to trace implicit dependencies across the codebase structure. Recent agent-based methods attempt to address these limitations through iterative exploration (Yang et al., 2024; Qin et al., 2024) but still struggle to efficiently navigate and comprehend complex code structures and dependencies, particularly when multi-hop reasoning is required to trace from issue descriptions to affected code regions that aren't directly mentioned." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 450, + 289, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 450, + 289, + 557 + ], + "spans": [ + { + "bbox": [ + 69, + 450, + 289, + 557 + ], + "type": "text", + "content": "This raises a key question: How can we design efficient indexing as intermediate representations that are structure-aware and both easy and performant for LLM agents to consume? It is intuitive to design an agentic retrieval system that carefully combines traditional IR methods and LLM agent's reasoning ability to achieve accurate, efficient, and cost-effective code localization in codebases." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 559, + 289, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 559, + 289, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 559, + 289, + 774 + ], + "type": "text", + "content": "To address this challenge, we propose LOCAGENT, a framework that builds directed heterogeneous graph indexing to unify code structures, dependencies, and contents. Our approach leverages a structured graph representation that enables powerful multi-hop reasoning capabilities, allowing agents to navigate complex dependency relationships between code elements even when target code isn't explicitly mentioned in issue descriptions. This graph-based approach significantly outperforms previous methods on challenging localization tasks that require traversing multiple code relationships. Our lightweight representation, coupled with sparse indexing techniques, enables efficient entity search while maintaining rich structural information. The indexing process typically" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 305, + 71, + 524, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 71, + 524, + 273 + ], + "spans": [ + { + "bbox": [ + 305, + 71, + 524, + 273 + ], + "type": "text", + "content": "takes only a few seconds per codebase, making it highly practical for real-time use. The framework integrates a set of unified tools that guide the agent through a systematic exploration of the codebase, allowing autonomous navigation based on contextual needs. Furthermore, by fine-tuning Qwen-2.5-Coder-Instruct (Hui et al., 2024) 7B and 32B models(abbr. as Qwen-2.5-7B and Qwen-2.5-32B respectively), our system achieves performance comparable to state-of-the-art models like Claude-3-5-sonnet-20241022 (Anthropic, 2023) (abbr. as Claude-3.5) while significantly reducing API costs by over " + }, + { + "bbox": [ + 305, + 71, + 524, + 273 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 305, + 71, + 524, + 273 + ], + "type": "text", + "content": " (from \\" + }, + { + "bbox": [ + 305, + 71, + 524, + 273 + ], + "type": "inline_equation", + "content": "0.66 to \\" + }, + { + "bbox": [ + 305, + 71, + 524, + 273 + ], + "type": "text", + "content": "0.09 per example), making it practical for real-world deployment." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 305, + 275, + 524, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 275, + 524, + 529 + ], + "spans": [ + { + "bbox": [ + 305, + 275, + 524, + 529 + ], + "type": "text", + "content": "Additionally, to facilitate a comprehensive evaluation of code localization methods, we introduce LOC-BENCH, a new benchmark specifically designed for this task. Existing benchmarks like SWE-Bench present significant limitations: (1) they risk contamination through data overlap with LLM training sets (Mündler et al., 2024), and (2) they primarily focus on bug fixing, lacking diversity in maintenance scenarios such as feature requests, performance optimizations, and security fixes. In contrast, LOC-BENCH covers diverse scenarios and mitigates potential contamination concerns by incorporating more recent examples from popular Python repositories collected after known LLM training cutoff dates. Additionally, we provide tooling to continuously update the benchmark with new examples, allowing researchers to maintain a fresh evaluation dataset as models evolve and training data cutoffs advance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 532, + 524, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 532, + 524, + 558 + ], + "spans": [ + { + "bbox": [ + 305, + 532, + 524, + 558 + ], + "type": "text", + "content": "Our contributions address critical gaps in existing approaches:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 318, + 568, + 524, + 773 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 318, + 568, + 524, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 568, + 524, + 648 + ], + "spans": [ + { + "bbox": [ + 318, + 568, + 524, + 648 + ], + "type": "text", + "content": "- We introduce a heterogeneous graph representation that captures both explicit and implicit code relationships, enabling efficient multi-hop reasoning. Our lightweight graph-based indexing process takes only seconds per repository and requires minimal storage." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 318, + 658, + 524, + 751 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 658, + 524, + 751 + ], + "spans": [ + { + "bbox": [ + 318, + 658, + 524, + 751 + ], + "type": "text", + "content": "- We design unified tools for agent-based code exploration that leverage our graph representation, allowing LLM agents to perform complex multi-hop navigation and reasoning across code dependencies even when target code isn't explicitly mentioned in issue descriptions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 318, + 761, + 524, + 773 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 761, + 524, + 773 + ], + "spans": [ + { + "bbox": [ + 318, + 761, + 524, + 773 + ], + "type": "text", + "content": "- We introduce Loc-Bench, a new benchmark" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 89, + 71, + 290, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 71, + 290, + 153 + ], + "spans": [ + { + "bbox": [ + 89, + 71, + 290, + 153 + ], + "type": "text", + "content": "specifically designed for code localization that addresses limitations in existing datasets. Unlike previous benchmarks dominated by bug reports, Loc-Bench offers a balanced distribution across bug fixes, feature requests, security patches, and performance optimizations." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 81, + 164, + 291, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 164, + 291, + 217 + ], + "spans": [ + { + "bbox": [ + 81, + 164, + 291, + 217 + ], + "type": "text", + "content": "- By fine-tuning open-source models on this task, we reduce the cost of code localization by " + }, + { + "bbox": [ + 81, + 164, + 291, + 217 + ], + "type": "inline_equation", + "content": "86\\%" + }, + { + "bbox": [ + 81, + 164, + 291, + 217 + ], + "type": "text", + "content": " while maintaining competitive performance." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 243, + 160, + 256 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 243, + 160, + 256 + ], + "spans": [ + { + "bbox": [ + 67, + 243, + 160, + 256 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 266, + 267, + 279 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 266, + 267, + 279 + ], + "spans": [ + { + "bbox": [ + 67, + 266, + 267, + 279 + ], + "type": "text", + "content": "2.1 Traditional Retrieval-based Methods" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 285, + 291, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 285, + 291, + 555 + ], + "spans": [ + { + "bbox": [ + 67, + 285, + 291, + 555 + ], + "type": "text", + "content": "Traditional IR methods rely on lexical or semantic matching to return ranked lists of code snippets. Sparse retrievers, such as BM25 (Robertson et al., 1994, 2009), have demonstrated robustness to domain adaptation. Dense retrievers utilize embeddings for improved semantic searching, including models with open checkpoints such as general text embedding models E5-base-v2 (Wang et al., 2022) and proprietary APIs (VoyageAI, 2024). Code embedding models such as Jina-Code-v2 (Günther et al., 2023), Codesage-large-v2 (Zhang et al., 2024), and CodeRankEmbed (Suresh et al., 2024), trained specifically for code related tasks, showing significant performance in Code2Code and NL2Code semantic search tasks. However, while the embedding models themselves are small, the engineering challenges of maintaining these indexing systems (e.g., storage requirements, update mechanisms, and infrastructure maintenance) make them difficult to adapt to fast-evolving codebases." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 566, + 289, + 579 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 566, + 289, + 579 + ], + "spans": [ + { + "bbox": [ + 67, + 566, + 289, + 579 + ], + "type": "text", + "content": "2.2 LLM-based Generative Retrieval Methods" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 586, + 291, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 586, + 291, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 586, + 291, + 774 + ], + "type": "text", + "content": "Recently, LLMs with advanced code reasoning capabilities have demonstrated superior performance by directly processing queries and raw code for code localization (Kang et al., 2023; Wu et al., 2023; Xia et al., 2024; Kang et al., 2024). For example, Agentless (Xia et al., 2024), initially designed for automated program repair, uses a simplistic hierarchical localization process powered by LLM. It employs a straightforward three-phase approach that first localizes relevant code sections before attempting to fix the identified issues, challenging the assumption that complex agent architectures are necessary for effective code understanding and modification tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 71, + 526, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 368 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 368 + ], + "type": "text", + "content": "Expanding on these techniques, agent-based methods utilize multi-step reasoning to enable automated codebase traversal. Specifically, OpenHands (Wang et al., 2025) implements a generalist coding agent that supports bash commands like grep and tools for viewing files. SWE-Agent (Yang et al., 2024) integrates a custom Agent-Computer Interface to support agents to navigate entire repositories. MoatlessTools (Örwall, 2024) combines an agentic searching loop and semantic search to obtain code locations. However, existing agent-based methods face two critical limitations: (a) they primarily navigate codebases through directory traversal rather than understanding semantic relationships, (b) and they struggle to extract and reason about complex cross-file dependencies when these relationships aren't explicitly represented in the repository structure. This significantly impairs their ability to locate code that requires modification when the issue involves interactions between structurally distant components in the codebase." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 378, + 493, + 404 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 378, + 493, + 404 + ], + "spans": [ + { + "bbox": [ + 302, + 378, + 493, + 404 + ], + "type": "text", + "content": "2.3 Graph-based Code Representation Methods" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 409, + 526, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 409, + 526, + 665 + ], + "spans": [ + { + "bbox": [ + 302, + 409, + 526, + 665 + ], + "type": "text", + "content": "Due to the inherent structure of code, several works have employed graph-based representations to improve code understanding by capturing key relationships between components. Aider (2023) constructs a RepoMap and uses a graph ranking algorithm to identify the most significant contextual elements. Similarly, as a plugin, RepoGraph (Ouyang et al., 2025) performs subgraph retrieval – extracting an ego-network of relevant lines and their neighbors – to provide structured context. CodexGraph (Liu et al., 2024) indexes the repository into a Neo4j graph database, where LLM agents query the database precisely using Cypher. The efficiency of its retrieval process depends heavily on the querying capabilities of the LLM. These methods focus primarily on providing relevant context but do not enhance the traversal process itself, as they do not explicitly model directory structure or file hierarchies." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 667, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 667, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 667, + 526, + 775 + ], + "type": "text", + "content": "In contrast, RepoUnderstander (Ma et al., 2024) builds hierarchical and function-call graphs, using Monte Carlo Tree Search (MCTS) guided by an LLM for exploration. While thorough, MCTS introduces extra computational overhead, making it less efficient than simpler traversal methods like BFS, particularly in large repositories. OrcaLoca (Yu et al., 2025) uses a simplified graph" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 68, + 69, + 526, + 269 + ], + "blocks": [ + { + "bbox": [ + 68, + 69, + 526, + 269 + ], + "lines": [ + { + "bbox": [ + 68, + 69, + 526, + 269 + ], + "spans": [ + { + "bbox": [ + 68, + 69, + 526, + 269 + ], + "type": "image", + "image_path": "68d78298f4ef4a0462b402972796e7a35e59e5ec5b78ccafd6cd08b74dd8ad0d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 66, + 291, + 525, + 339 + ], + "lines": [ + { + "bbox": [ + 66, + 291, + 525, + 339 + ], + "spans": [ + { + "bbox": [ + 66, + 291, + 525, + 339 + ], + "type": "text", + "content": "Figure 2: Overview of LOCAGENT framework. LOCAGENT first parses the given codebase to build a graph-based code representation with various types of entities and relations. It then constructs sparse indexes for exploring structures and searching content. Using these indexes, it performs agent-guided searches that combine the graph and tools." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 70, + 351, + 524, + 441 + ], + "blocks": [ + { + "bbox": [ + 70, + 351, + 524, + 441 + ], + "lines": [ + { + "bbox": [ + 70, + 351, + 524, + 441 + ], + "spans": [ + { + "bbox": [ + 70, + 351, + 524, + 441 + ], + "type": "table", + "html": "
MethodRelation TypesNode TypesSearch/Traversal Strategy
ContainImportInheritInvokeDirectoryFileClassFunction
CodexGraph(Liu et al., 2024)XXXCypher queries
RepoGraph(Ouyang et al., 2025)XXXEgo-graph retrieval
RepoUnderstander(Ma et al., 2024)XMCTS
OrcaLoca(Yu et al., 2025)XXSimple search tools
LOCAGENT(Ours)Unified retrieval tools
", + "image_path": "eff607d6667dc8fa01afca421fe5518165076b8c38bf5f9855a1411d560992d1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 156, + 448, + 436, + 460 + ], + "lines": [ + { + "bbox": [ + 156, + 448, + 436, + 460 + ], + "spans": [ + { + "bbox": [ + 156, + 448, + 436, + 460 + ], + "type": "text", + "content": "Table 1: Comparison of Graph-Based Code Representation Methods." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 473, + 291, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 473, + 291, + 595 + ], + "spans": [ + { + "bbox": [ + 67, + 473, + 291, + 595 + ], + "type": "text", + "content": "enhanced by priority scheduling and context pruning. It maintains efficient search but may miss complex invocation dependencies. Table 1 summarizes the differences between these methods and LOCAGENT. Compared to these approaches, LOCAGENT offers a more comprehensive and unified representation of the repository, along with efficient, unified retrieval tools specifically designed for LLM consumption." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 604, + 238, + 618 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 604, + 238, + 618 + ], + "spans": [ + { + "bbox": [ + 67, + 604, + 238, + 618 + ], + "type": "text", + "content": "3 The LOCAGENT Framework" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 626, + 292, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 626, + 292, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 626, + 292, + 775 + ], + "type": "text", + "content": "We introduce LOCAGENT, a graph-oriented LLM-agent framework for code localization. Figure 2 illustrates the overall framework. When given a repository, LOCAGENT can locate all the relevant code sections at various granularities (file, class, function, or line level) for different types of GitHub issues (such as bug reports, feature requests, performance bottlenecks, and security vulnerabilities) through automated in-depth exploration and analysis of the codebase. Section 3.1 proposes a novel graph-based indexing approach as an intermediate" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 473, + 525, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 473, + 525, + 527 + ], + "spans": [ + { + "bbox": [ + 302, + 473, + 525, + 527 + ], + "type": "text", + "content": "representation for codebases. Section 3.2 presents our agent-based code search on the indexes and Section 3.3 describes our model fine-tuning and distillation process." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 539, + 493, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 539, + 493, + 553 + ], + "spans": [ + { + "bbox": [ + 302, + 539, + 493, + 553 + ], + "type": "text", + "content": "3.1 Graph-based Code Representation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 301, + 557, + 526, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 557, + 526, + 638 + ], + "spans": [ + { + "bbox": [ + 301, + 557, + 526, + 638 + ], + "type": "text", + "content": "Codebases contain rich structural information, both explicit and implicit, that is essential for agent reasoning. Building on this insight, we develop a graph-based indexing that comprehensively captures codebase relationships while maintaining a granularity suitable for LLM-agents to retrieve." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "text", + "content": "Code Graph Construction. We construct a heterogeneous directed graph " + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "inline_equation", + "content": "\\mathcal{G}(\\mathcal{V},\\mathcal{E},\\mathcal{A},\\mathcal{R})" + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "text", + "content": " to index the codebase, where " + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "inline_equation", + "content": "\\nu = \\{v_{i}\\}_{i = 1}^{n}" + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "text", + "content": " is the node set and " + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "inline_equation", + "content": "\\mathcal{E}\\subseteq \\mathcal{V}\\times \\mathcal{V}" + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "text", + "content": " is the edge set. Each node " + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "inline_equation", + "content": "v\\in \\mathcal{V}" + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "text", + "content": " and edge " + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "inline_equation", + "content": "e\\in \\mathcal{E}" + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "text", + "content": " has an associated type mapping function. For nodes, " + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "inline_equation", + "content": "\\tau (v):\\mathcal{V}\\to \\mathcal{A}" + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "text", + "content": " maps to types " + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "inline_equation", + "content": "\\mathcal{A} = \\{\\mathrm{directory},\\mathrm{file},\\mathrm{class},\\mathrm{function}\\}" + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "text", + "content": ". For edges, " + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "inline_equation", + "content": "\\phi (e):\\mathcal{E}\\rightarrow \\mathcal{R}" + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "text", + "content": " maps to relationships " + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "inline_equation", + "content": "\\mathcal{R} = \\{\\mathrm{contain},\\mathrm{import},\\mathrm{invoke},\\mathrm{inherit}\\}" + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "text", + "content": ". In this paper, we focus our study on Python reposito" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 289, + 98 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 289, + 98 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 289, + 98 + ], + "type": "text", + "content": "ries and leave codebases with other programming languages as future work." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 100, + 290, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 100, + 290, + 235 + ], + "spans": [ + { + "bbox": [ + 67, + 100, + 290, + 235 + ], + "type": "text", + "content": "First, we include all directories and Python files as nodes. Then, we parse each Python file using the abstract syntax tree (AST) to identify inner functions and classes recursively as nodes. We set the function level as the smallest node granularity and use each function's code content as the document for agent retrieval. This approach creates a good balance of information density between the index and documents, allowing LLMs to reason effectively within their context window limitations." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 238, + 291, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 238, + 291, + 386 + ], + "spans": [ + { + "bbox": [ + 69, + 238, + 291, + 386 + ], + "type": "text", + "content": "As shown in Figure 2, all nodes with different types can be connected as a single tree using the contain relationship. This structure supports standard codebase-navigation operations from existing works. Our code graph further incorporates more advanced codebase relationships as edges: (1) the invoke relationship from function/class to function/class, where an invoke to a class represents class instantiation; (2) the import relationship from file to function/class; and (3) the inherit relationship between classes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 388, + 291, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 388, + 291, + 659 + ], + "spans": [ + { + "bbox": [ + 69, + 388, + 291, + 659 + ], + "type": "text", + "content": "Sparse Hierarchical Entity Indexing. We treat nodes in our code graph as entities and build hierarchical indexing based on their contents. For each keyword, we lookup the indexes from top to bottom: (1) We build an entity ID index as a unique identifier for each node using its fully qualified name. For example, a function calculate_sum in the MathUtils class located in src/utils.py would be represented as: src/utils.py:MathUtilscalculate_sum. (2) We construct a global dictionary to map the entity name (e.g., calculate_sum) to all nodes that share the same name. (3) We index entity IDs through an inverted index (i.e., BM25) to handle keyword searches that don't exactly match the IDs or names of entities. (4) For cases where input keywords aren't part of the entities' IDs (e.g., when a keyword refers to a global variable), we build an inverted index that maps code chunk(s) to each entity to cover all possible matches." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "type": "text", + "content": "Remark. Rather than relying solely on directory structures or hierarchical module indexing, our approach captures module dependencies that transcend directory boundaries. Two modules in distant directories (A and B) may appear unrelated in traditional navigation, but if they invoke each other or share inheritance, they're syntactically close in our graph representation. This syntactic" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 306, + 68, + 524, + 200 + ], + "blocks": [ + { + "bbox": [ + 306, + 68, + 524, + 200 + ], + "lines": [ + { + "bbox": [ + 306, + 68, + 524, + 200 + ], + "spans": [ + { + "bbox": [ + 306, + 68, + 524, + 200 + ], + "type": "table", + "html": "
Tool NameInput ParamsOutput
SearchEntityKeywordsRelated Entities with Code Snippets
TraverseGraphStart Entity IDs Direction Traverse Hops Entity Types Relation TypesTraversed Subgraph, including Entities and Relations
RetrieveEntityEntity IDsComplete Code of Specified Entities
", + "image_path": "3ee9190f79ad7623fb4b180523bd53a7f66d2934c043f7c4febdadae07f9b9c0.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 206, + 525, + 232 + ], + "lines": [ + { + "bbox": [ + 302, + 206, + 525, + 232 + ], + "spans": [ + { + "bbox": [ + 302, + 206, + 525, + 232 + ], + "type": "text", + "content": "Table 2: List of unified APIs provided by LocAgent for code search and exploration." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 301, + 241, + 525, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 241, + 525, + 321 + ], + "spans": [ + { + "bbox": [ + 301, + 241, + 525, + 321 + ], + "type": "text", + "content": "proximity is essential for code localization because issues typically manifest through call relationships rather than directory structure. By capturing these functional dependencies, our approach efficiently identifies related components even when physically distant in the codebase." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 334, + 456, + 347 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 334, + 456, + 347 + ], + "spans": [ + { + "bbox": [ + 302, + 334, + 456, + 347 + ], + "type": "text", + "content": "3.2 Agent-guided Code Search" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 301, + 353, + 525, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 353, + 525, + 514 + ], + "spans": [ + { + "bbox": [ + 301, + 353, + 525, + 514 + ], + "type": "text", + "content": "We develop tools based on the indexes built offline. During runtime, LOCAGENT takes issue statements as input and launches agents that autonomously use tools to localize target code sections. While the agent may iteratively invoke multiple tools internally to explore the codebase, LOCAGENT presents a simplified interface to users, requiring only a single-turn interaction—users submit an issue statement and receive localization results without additional input. This autonomous, self-contained workflow makes LOCAGENT both easy to deploy and highly practical for real-world use." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 301, + 517, + 526, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 517, + 526, + 692 + ], + "spans": [ + { + "bbox": [ + 301, + 517, + 526, + 692 + ], + "type": "text", + "content": "Tool Design for Codebase Exploration. Recent works (Örwall, 2024; Wang et al., 2025), inspired by GUI-based IDEs, have developed numerous specialized tools for agents to explore codebases. However, these tools are initially designed for human readability, which sacrifices the compactness and efficiency that LLM agents prefer (Yang et al., 2024). Building upon our graph-based code representation, we can develop tools that support efficient higher-order codebase exploration to address these challenges. We unify all codebase navigation, search, and view operations into three tools (Table 2), introduced as follows." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 693, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 693, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 693, + 525, + 775 + ], + "type": "text", + "content": "SearchEntity: This tool searches codebases using keywords to locate relevant entities through our Hierarchical Entity Index. When an exact match isn't found in the upper index, the system performs a fuzzy search using the lower index. For each entity found, we return its code snippet in three detail" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 290, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 290, + 111 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 290, + 111 + ], + "type": "text", + "content": "levels: fold, preview, and full code (Figure 6). This effectively prevents lengthy code context and reduces noise fed into agents." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 112, + 291, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 112, + 291, + 449 + ], + "spans": [ + { + "bbox": [ + 69, + 112, + 291, + 449 + ], + "type": "text", + "content": "**TraverseGraph:** This tool performs a type-aware breadth-first search (BFS) on the code graph, starting from input entities and allowing control over both traversal direction and number of hops. This supports agents to perform arbitrary multi-hop codebase navigation through only one action, significantly improving the efficiency compared with existing agent systems. Note that by allowing agents to select entity types and relation types for each traversal, this tool effectively leverages the LLM agents' coding expertise to generate proper meta paths—a crucial element for heterogeneous graph analysis (Lv et al., 2021). For example, by specifying entity types to {class, function} and relation types to {contain, inherit}, this tool returns the UML diagram. Additionally, we design an expanded tree-based format for the output subgraph that encodes both relation types and directions (Figure 7). (Fatemi et al., 2023) demonstrates that LLM performance on graph reasoning depends on the input graph format. Converting a graph into a tree structure encodes topology through the spatial distance between entity names, thereby deriving better performance. For detailed comparisons with alternative graph formats, please see Appendix A.1.2." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 450, + 290, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 450, + 290, + 503 + ], + "spans": [ + { + "bbox": [ + 67, + 450, + 290, + 503 + ], + "type": "text", + "content": "RetreiveEntity: This tool retrieves complete entity attributes for each input entity ID, including essential information such as file path, line number, and code content." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 504, + 291, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 504, + 291, + 652 + ], + "spans": [ + { + "bbox": [ + 69, + 504, + 291, + 652 + ], + "type": "text", + "content": "Chain-of-Thought Agent Planning. We use chain-of-thought (CoT) prompting (shown in Appendix D) to guide the agent in solving code localization problems step by step. The agent systematically follows these steps: (1) Keyword extraction. The agent begins by breaking down the issue statement into different categories and then extracts relevant keywords that are closely related to the problem. (2) Linking keywords to code entities. The agent invokes SearchEntity to complete and clarify each extracted keyword." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 654, + 290, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 654, + 290, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 654, + 290, + 774 + ], + "type": "text", + "content": "(3) Generate the logical flow from fault to failure. The agent first identifies the entry points that trigger the problem. Then, it iteratively traverse the codebase with TraverseGraph, retrieves code contents with RetrieveEntity, and searches new keywords with SearchEntity. Finally, it generates the logic flow based on the issue and additional context. (4) Locate the target entities. The agent pinpoints all suspicious code entities that need modification" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 71, + 524, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 524, + 97 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 524, + 97 + ], + "type": "text", + "content": "based on the logic flow. Then, it ranks these entities based on their relevance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 99, + 525, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 99, + 525, + 260 + ], + "spans": [ + { + "bbox": [ + 302, + 99, + 525, + 260 + ], + "type": "text", + "content": "Confidence Estimation Based on Consistency. After generating a complete ranked list of candidate entities, to obtain a more consistent ranking, we measure the consistency (Wang et al., 2023a) of the LLM's predictions across multiple iterations. Specifically, we use the Reciprocal Rank as the initial confidence score for each predicted location. We then aggregate the scores for each entity across iterations to compute its final confidence score. The intuition behind this approach is that if the LLM consistently ranks a location higher in multiple iterations, it is more likely to be relevant." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 271, + 481, + 285 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 271, + 481, + 285 + ], + "spans": [ + { + "bbox": [ + 302, + 271, + 481, + 285 + ], + "type": "text", + "content": "3.3 Open-source Model Fine-tuning" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 289, + 525, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 289, + 525, + 545 + ], + "spans": [ + { + "bbox": [ + 302, + 289, + 525, + 545 + ], + "type": "text", + "content": "Given the high costs of proprietary LLM APIs and data security concerns, we fine-tuned open-source models to improve their code localization capabilities and enable local deployment. We collect 433 successful trajectories generated with Claude-3.5, where the agent completed tasks from the SWEBench training set. Due to budget constraints, we sample an additional 335 trajectories generated by the initially fine-tuned Qwen2.5-32B model. Importantly, we only select successful trajectories where the model correctly localized the issues, creating a high-quality dataset of correct reasoning paths. These successful examples are then used to refine the same 32B model further, reinforcing effective reasoning patterns through this self-improvement loop. The entire dataset, combining both Claude-3.5 trajectories and successful Qwen2.5-32B samples, was then used to distill knowledge to a smaller 7B model." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 547, + 525, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 547, + 525, + 628 + ], + "spans": [ + { + "bbox": [ + 302, + 547, + 525, + 628 + ], + "type": "text", + "content": "To fine-tune the smaller model, we employ Supervised Fine-Tuning (SFT) with LoRA (Hu et al., 2021). Our experiments show that this straightforward distillation method significantly enhances the performance of smaller models. See Appendix C.1.3 for more training details." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 640, + 514, + 666 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 640, + 514, + 666 + ], + "spans": [ + { + "bbox": [ + 302, + 640, + 514, + 666 + ], + "type": "text", + "content": "4 LOC-BENCH: A New Benchmark for Code Localization" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 676, + 476, + 688 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 676, + 476, + 688 + ], + "spans": [ + { + "bbox": [ + 302, + 676, + 476, + 688 + ], + "type": "text", + "content": "4.1 Revisiting Existing Benchmark" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 693, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 693, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 693, + 525, + 775 + ], + "type": "text", + "content": "SWE-Bench(Jimenez et al., 2023) is a widely used benchmark that collects GitHub issues and corresponding code patches that resolve them. Xia et al. (2024); Suresh et al. (2024) adapt its subset, SWE-Bench-Lite, for code localization, treating the patched files and functions as the targets." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 291, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 291, + 327 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 291, + 327 + ], + "type": "text", + "content": "However, existing datasets, including SWE-Bench, present challenges for effectively evaluating code localization methods. First, they are at risk of contamination, as they may include data overlapping with the repositories or issues used by modern models during pre-training. Second, existing datasets are not specifically designed for code localization (Tomassi et al., 2019). SWE-Bench, for instance, was created primarily to evaluate end-to-end bug-fixing capabilities, with localization being only an implicit intermediate step. This focus results in datasets dominated by bug reports (85% of SWE-Bench-Lite examples) while severely underrepresenting other common software maintenance tasks such as feature requests (14%), security vulnerabilities (1%), and performance optimizations (0%). This imbalance fails to capture the diverse localization challenges faced in real-world software development." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 338, + 193, + 349 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 338, + 193, + 349 + ], + "spans": [ + { + "bbox": [ + 69, + 338, + 193, + 349 + ], + "type": "text", + "content": "4.2 Dataset Construction" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 354, + 290, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 354, + 290, + 502 + ], + "spans": [ + { + "bbox": [ + 69, + 354, + 290, + 502 + ], + "type": "text", + "content": "To address the limitations of existing benchmarks, we introduce LOC-BENCH, a new dataset specifically designed for code localization. This dataset collects up-to-date issues from Python repositories to mitigate the influence of pre-training bias in the latest LLMs. Additionally, LOC-BENCH covers wider categories, including bug reports, feature requests, security, and performance issues, enabling a more comprehensive evaluation of code localization methods. The statistics of LOC-BENCH are shown in Table 3." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 503, + 290, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 503, + 290, + 651 + ], + "spans": [ + { + "bbox": [ + 69, + 503, + 290, + 651 + ], + "type": "text", + "content": "For the Bug Report category, we collect GitHub issues created after October 2024, which is later than the release dates of most modern LLMs. To enrich the dataset with more instances of security and performance issues, we use the GitHub Search API to search for relevant keywords, such as \"latency improvement\" for performance-related issues. We exclude instances that involve modifying more than five Python files or more than ten functions in the corresponding patch. For further details, see Appendix B.1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 661, + 153, + 675 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 661, + 153, + 675 + ], + "spans": [ + { + "bbox": [ + 69, + 661, + 153, + 675 + ], + "type": "text", + "content": "5 Experiments" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 681, + 290, + 773 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 681, + 290, + 773 + ], + "spans": [ + { + "bbox": [ + 69, + 681, + 290, + 773 + ], + "type": "text", + "content": "Our experiments aim to evaluate four key aspects of LOCAGENT: (1) the effectiveness of our graph-based representation and tooling for code localization compared to existing methods, (2) the performance of fine-tuned open-source models as cost-effective alternatives to proprietary LLMs, (3) a detailed analysis of how performance varies across" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 312, + 69, + 517, + 223 + ], + "blocks": [ + { + "bbox": [ + 312, + 69, + 517, + 223 + ], + "lines": [ + { + "bbox": [ + 312, + 69, + 517, + 223 + ], + "spans": [ + { + "bbox": [ + 312, + 69, + 517, + 223 + ], + "type": "table", + "html": "
DatasetCategory#Sample
SWE-Bench-Lite (Total = 300)Bug Report254
Feature Request43
Security Issue3
Performance Issue0
Loc-Bench (Totoal = 560)Bug Report242
Feature Request150
Security Issue29
Performance Issue139
", + "image_path": "dffede0cb6e1140d33ef35874b6f55be329c72cec08ada3fd8a7e8684a261136.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 232, + 524, + 256 + ], + "lines": [ + { + "bbox": [ + 304, + 232, + 524, + 256 + ], + "spans": [ + { + "bbox": [ + 304, + 232, + 524, + 256 + ], + "type": "text", + "content": "Table 3: Distribution of samples across different categories in the SWE-Bench-Lite and Loc-Bench datasets." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 267, + 524, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 267, + 524, + 359 + ], + "spans": [ + { + "bbox": [ + 304, + 267, + 524, + 359 + ], + "type": "text", + "content": "task categories, and (4) the contribution of each component in our framework through comprehensive ablation studies. We evaluate on both SWE-Bench-Lite and our introduced Loc-Bench dataset. Additionally, we examine the impact of improved localization on downstream software maintenance tasks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 364, + 433, + 378 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 364, + 433, + 378 + ], + "spans": [ + { + "bbox": [ + 304, + 364, + 433, + 378 + ], + "type": "text", + "content": "5.1 Experimental Settings" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 383, + 524, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 383, + 524, + 476 + ], + "spans": [ + { + "bbox": [ + 304, + 383, + 524, + 476 + ], + "type": "text", + "content": "Datasets. We first conduct experiments on SWEBench-Lite, treating the patched files and functions as the targets for localization. Following Suresh et al. (2024), we excluded examples where no existing functions were modified by the patch, ultimately retaining 274 out of the original 300 examples." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 478, + 524, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 478, + 524, + 719 + ], + "spans": [ + { + "bbox": [ + 304, + 478, + 524, + 719 + ], + "type": "text", + "content": "Metrics. To assess performance, we use a modified accuracy metric inspired by R-Precision from information retrieval, following Agentless(Xia et al., 2024). To assess performance, we use Acc@k (Accuracy at k) as our evaluation metric, following Agentless(Xia et al., 2024). For each example, we select the top-k predicted locations and consider a localization attempt successful only if all relevant locations are correctly identified within these top-k predictions. This approach measures the ability to fully identify all necessary code sections that require modification. We report results across multiple " + }, + { + "bbox": [ + 304, + 478, + 524, + 719 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 478, + 524, + 719 + ], + "type": "text", + "content": " values: file localization at Acc@1, Acc@3, and Acc@5, and function localization at Acc@5 and Acc@10. Additionally, to provide a more relaxed evaluation criteria, we assess module localization, which only requires finding any function within the patched class." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 730, + 373, + 742 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 730, + 373, + 742 + ], + "spans": [ + { + "bbox": [ + 304, + 730, + 373, + 742 + ], + "type": "text", + "content": "5.2 Baselines" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 749, + 524, + 773 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 749, + 524, + 773 + ], + "spans": [ + { + "bbox": [ + 304, + 749, + 524, + 773 + ], + "type": "text", + "content": "We evaluate LOCAGENT against three categories of competitive baselines: (a) Retrieval-based meth" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 71, + 54, + 525, + 253 + ], + "blocks": [ + { + "bbox": [ + 71, + 54, + 525, + 253 + ], + "lines": [ + { + "bbox": [ + 71, + 54, + 525, + 253 + ], + "spans": [ + { + "bbox": [ + 71, + 54, + 525, + 253 + ], + "type": "table", + "html": "
TypeMethodLoc-ModelFile (%)Module (%)Function (%)
Acc@1Acc@3Acc@5Acc@5Acc@10Acc@5Acc@10
Embedding-BasedBM25 (Robertson et al., 1994)38.6951.8261.6845.2652.9231.7536.86
E5-base-v2 (Wang et al., 2022)49.6474.4580.2967.8872.2639.4251.09
Jina-Code-v2 (Günther et al., 2023)43.4371.1780.2963.5072.6342.3452.19
Codesage-large-v2 (Zhang et al., 2024)47.8169.3478.1060.5869.7133.9444.53
CodeRankEmbed (Suresh et al., 2024)52.5577.7484.6771.9078.8351.8258.76
Procedure-BasedAgentless (Xia et al., 2024)GPT-4o67.1574.4574.4567.1567.1555.4755.47
Claude-3.572.6379.2079.5668.9868.9858.7658.76
Agent-BasedMoutlessTools (Örwall, 2024)GPT-4o73.3684.3185.0474.8276.2857.3059.49
Claude-3.572.6385.7786.1376.2876.2864.6064.96
SWE-agent (Yang et al., 2024)GPT-4o57.3064.9668.9858.0358.0345.9946.35
Claude-3.577.3787.2390.1577.7478.1064.2364.60
Openhands (Wang et al., 2025)GPT-4o60.9571.9073.7262.4163.8749.6450.36
Claude-3.576.2889.7890.1583.2183.5868.2570.07
LOCAGENT (Ours)Qwen2.5-7B(ft)70.8084.6788.3281.0282.8564.2371.53
Qwen2.5-32B(ft)75.9190.5192.7085.7787.2371.9077.01
Claude-3.577.7491.9794.1686.5087.5973.3677.37
", + "image_path": "ad9888427e4959ea8df866a9f22f434e24d97aa01a98851cc1f0d57d84fed76f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 260, + 526, + 321 + ], + "lines": [ + { + "bbox": [ + 67, + 260, + 526, + 321 + ], + "spans": [ + { + "bbox": [ + 67, + 260, + 526, + 321 + ], + "type": "text", + "content": "Table 4: Performance comparison with baseline methods on code localization on SWE-bench lite. Results show the accuracy at file, module, and function levels. For Agent-Based methods, we use GPT-4o-2024-0513 (abbr. as GPT-4o) and Claude-3-5-sonnet-20241022 (abbr. as Claude-3.5) as the localization model. Additionally, the performance of our fine-tuned open-source models, Qwen2.5-7B(ft) and Qwen2.5-32B(ft), are included for comparison." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 66, + 327, + 292, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 327, + 292, + 613 + ], + "spans": [ + { + "bbox": [ + 66, + 327, + 292, + 613 + ], + "type": "text", + "content": "ods: We include the sparse retrieval approach BM25 (Robertson et al., 1994) and several state-of-the-art embedding models, including the general-purpose E5-base-v2 (Wang et al., 2022) and specialized code embedding models such as JinaCode-v2 (Günther et al., 2023), Codesage-large-v2 (Zhang et al., 2024), and the current SOTA code embedding model CodeRankEmbed (Suresh et al., 2024). Proprietary embedding solutions were excluded due to API costs. (b) Procedure-based methods: We compare against Agentless (Xia et al., 2024), which employs a structured hierarchical approach to code localization without complex agent architectures. (c) Agent-based methods: We include several advanced agent frameworks designed for code exploration and modification, specifically OpenHands (Wang et al., 2025) (using its default CodeActAgent implementation), SWE-Agent (Yang et al., 2024), and MoatlessTools (Örwall, 2024). For implementation details, please refer to Appendix C.1.1." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 622, + 281, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 622, + 281, + 634 + ], + "spans": [ + { + "bbox": [ + 67, + 622, + 281, + 634 + ], + "type": "text", + "content": "5.3 Evaluation Results on SWE-Bench-Lite" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 640, + 292, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 640, + 292, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 640, + 292, + 775 + ], + "type": "text", + "content": "As shown in Table 4, Agent-Based methods consistently outperform other approaches, and our method demonstrates competitive performance by achieving the best results across all levels of code localization. Unlike traditional retrieval-based methods, Agentless identifies only a limited number of locations due to its narrow repository scope, which hinders performance gains when considering a broader set of candidates. The results of the NDCG are presented in Table 11 in the Appendix." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 310, + 323, + 518, + 430 + ], + "blocks": [ + { + "bbox": [ + 310, + 323, + 518, + 430 + ], + "lines": [ + { + "bbox": [ + 310, + 323, + 518, + 430 + ], + "spans": [ + { + "bbox": [ + 310, + 323, + 518, + 430 + ], + "type": "image", + "image_path": "f929dd53f4adf74c47eab0524e9c10d9df2c8753a81e68bd756d7c86d5d4876b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 311, + 433, + 518, + 540 + ], + "blocks": [ + { + "bbox": [ + 311, + 433, + 518, + 540 + ], + "lines": [ + { + "bbox": [ + 311, + 433, + 518, + 540 + ], + "spans": [ + { + "bbox": [ + 311, + 433, + 518, + 540 + ], + "type": "image", + "image_path": "dd0a043ec99e4aa7d3cdd9cab21f0f8c8ff70d258cc383c3b7961f81be3880dc.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 549, + 526, + 623 + ], + "lines": [ + { + "bbox": [ + 302, + 549, + 526, + 623 + ], + "spans": [ + { + "bbox": [ + 302, + 549, + 526, + 623 + ], + "type": "text", + "content": "Figure 3: Performance analysis at different difficulty levels for file- and function-level localization. All agent-based methods and Agentless use Claude-3.5 as the localization model. Hop " + }, + { + "bbox": [ + 302, + 549, + 526, + 623 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 302, + 549, + 526, + 623 + ], + "type": "text", + "content": " refers to the distances between functions mentioned in the issue description and the ground truth patch on our code graph." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 640, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 640, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 640, + 526, + 775 + ], + "type": "text", + "content": "To further analyze the results, we examine performance across different task difficulty levels. We measure the task difficulty by calculating the shortest hops between the functions mentioned in the issue descriptions and the patched functions on our code graph (See Appendix C.1.2 for more details). As shown in Figure 3, performance decreases for all methods as the task becomes more challenging. However, Agent-based methods demonstrate better robustness as the difficulty increases, with" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 63, + 283, + 176 + ], + "blocks": [ + { + "bbox": [ + 75, + 63, + 283, + 176 + ], + "lines": [ + { + "bbox": [ + 75, + 63, + 283, + 176 + ], + "spans": [ + { + "bbox": [ + 75, + 63, + 283, + 176 + ], + "type": "image", + "image_path": "594e4e848668151fa0f0d585dcf3a37fcc51677d8a82c5fdceaf97947d73c7bb.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 190, + 291, + 250 + ], + "lines": [ + { + "bbox": [ + 67, + 190, + 291, + 250 + ], + "spans": [ + { + "bbox": [ + 67, + 190, + 291, + 250 + ], + "type": "text", + "content": "Figure 4: Comparison of performance between the original and fine-tuned Qwen models. The metrics used are file-level Acc@5 and module/function-level Acc@10. Dashed lines represent the performance of the Claude-3.5 model for reference." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 260, + 291, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 260, + 291, + 423 + ], + "spans": [ + { + "bbox": [ + 67, + 260, + 291, + 423 + ], + "type": "text", + "content": "our method maintaining competitive performance across various difficulty levels. Retrieval-based methods, such as E5-Base-v2 and CodeRankEmbed, perform poorly at the function level, even when the patched functions are explicitly mentioned in the query. This is because they treat the query as a whole, failing to capture fine-grained details. Agentless performs even worse than retrieval-based methods when exploration beyond the query is needed (" + }, + { + "bbox": [ + 67, + 260, + 291, + 423 + ], + "type": "inline_equation", + "content": "hop \\geq 0" + }, + { + "bbox": [ + 67, + 260, + 291, + 423 + ], + "type": "text", + "content": ") due to its simplistic localization process and limited view focused only on the repository structure." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 433, + 246, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 433, + 246, + 445 + ], + "spans": [ + { + "bbox": [ + 67, + 433, + 246, + 445 + ], + "type": "text", + "content": "5.4 Fine-tuned Open-source Models" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 449, + 291, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 449, + 291, + 680 + ], + "spans": [ + { + "bbox": [ + 67, + 449, + 291, + 680 + ], + "type": "text", + "content": "Figure 4 demonstrates that after fine-tuning, both the 7B and 32B models show significant improvements on this task. LOCAGENT with finetuned Qwen-2.5-Coder-Instruct-32B (abbreviated as Qwen2.5-32B(ft)) achieves performance comparable to Claude-3.5, and LOCAGENT with Qwen2.5-7B(ft) also delivers results on par with that obtained using GPT-4o. As shown in Table 4, our method with Qwen2.5-32B(ft) outperforms nearly all baselines, including those that use larger and more powerful LLMs. The original 7B model performs poorly due to its limited tool-use capability (Chen et al., 2024). These results validate the feasibility of deploying our fine-tuned open-source models as promising alternatives to proprietary APIs, especially in resource-constrained applications." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 689, + 185, + 703 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 689, + 185, + 703 + ], + "spans": [ + { + "bbox": [ + 67, + 689, + 185, + 703 + ], + "type": "text", + "content": "5.5 Efficiency Analysis" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "type": "text", + "content": "Table 5 presents an efficiency analysis comparing agent-based methods in terms of cost and the number of agent interactions required. MoatlessTools demonstrates good cost-efficiency and requires relatively fewer rounds of interaction. However, the" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 302, + 68, + 520, + 180 + ], + "blocks": [ + { + "bbox": [ + 302, + 68, + 520, + 180 + ], + "lines": [ + { + "bbox": [ + 302, + 68, + 520, + 180 + ], + "spans": [ + { + "bbox": [ + 302, + 68, + 520, + 180 + ], + "type": "table", + "html": "
MethodLM#RoundCost($)Acc@10
Cost
MoatlessToolsGPT-4o50.461.3
Claude-3.550.461.4
SWE-agentGPT-4o80.560.8
Claude-3.590.671.0
OpenhandsGPT-4o150.830.6
Claude-3.5130.790.9
OursClaude-3.570.661.2
Qwen2.5-7B(ft)60.0513.2
Qwen2.5-32B(ft)90.098.6
", + "image_path": "ca58188c6fe4c2ffec34aeccdb6ee5a73bdf63c458fea192f02d6865ca76db6a.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 304, + 253, + 523, + 371 + ], + "blocks": [ + { + "bbox": [ + 302, + 184, + 525, + 244 + ], + "lines": [ + { + "bbox": [ + 302, + 184, + 525, + 244 + ], + "spans": [ + { + "bbox": [ + 302, + 184, + 525, + 244 + ], + "type": "text", + "content": "Table 5: Efficiency analysis comparing the average cost and number of agent interaction rounds required by different methods. The cost-efficiency of each method is evaluated using the ratio of function-level Acc@10 to average cost." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 304, + 253, + 523, + 371 + ], + "lines": [ + { + "bbox": [ + 304, + 253, + 523, + 371 + ], + "spans": [ + { + "bbox": [ + 304, + 253, + 523, + 371 + ], + "type": "table", + "html": "
Model SettingFile Acc@5Module Acc@10Function Acc@10
Ours88.3282.8571.53
w/o TraverseGraph86.1378.4766.06
Relation Types: contain86.5079.5666.42
Traverse Hops: 186.8680.2966.79
w/o RetrieveEntity87.5981.3969.34
w/o SearchEntity68.9861.3153.28
w/o BM25 index75.1868.9860.22
", + "image_path": "cbbf57fce918f5d0e7521a35c3e24e9a0612bad18584b2403a531e9d40562501.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 380, + 525, + 441 + ], + "lines": [ + { + "bbox": [ + 302, + 380, + 525, + 441 + ], + "spans": [ + { + "bbox": [ + 302, + 380, + 525, + 441 + ], + "type": "text", + "content": "Table 6: The ablation study of our model. The metrics used here are file-level Acc@5, module-level Acc@10, and function-level Acc@10. The impact of removing or fixing components is analyzed to observe how each component contributes to the overall accuracy." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 301, + 460, + 526, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 460, + 526, + 690 + ], + "spans": [ + { + "bbox": [ + 301, + 460, + 526, + 690 + ], + "type": "text", + "content": "dense embeddings it uses make it difficult and slow to adapt to fast-evolving codebases. SWE-agent and Openhands also show moderate costs but still do not match the efficiency of LOCAGENT. For LOCAGENT with Claude-3.5, although more rounds of interaction are required, the cost remains lower than that of Openhands, illustrating the token efficiency of our tool's outputs. LOCAGENT with fine-tuned Qwen models stands out for its superior efficiency1. Qwen2.5-7B(ft) is the most cost-efficient option, requiring only $0.05 per example, while Qwen2.5-32B(ft) offers a more cost-effective alternative to Claude-3.5. These results highlight the potential of fine-tuned open-source models as efficient alternatives, providing an optimal balance of cost-effectiveness and performance that surpasses other methods." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 713, + 525, + 775 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 713, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 713, + 525, + 775 + ], + "type": "text", + "content": "1We calculate the cost based on the prices from AI inference providers (Hyperbolic, 2025; artificialanalysis.ai, 2025). Specifically, for the Qwen2.5-32B(ft) model, the cost is $0.20/1M tokens for both input and output. For the Qwen2.5-7B(ft) model, the cost is $0.14/1M tokens for input and $0.28/1M tokens for output." + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 71, + 166, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 71, + 166, + 84 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 166, + 84 + ], + "type": "text", + "content": "5.6 Ablation Study" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 90, + 290, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 90, + 290, + 156 + ], + "spans": [ + { + "bbox": [ + 67, + 90, + 290, + 156 + ], + "type": "text", + "content": "We conduct an ablation study to evaluate the effectiveness of each component of our toolsets. Due to budget constraints, we use the fine-tuned Qwen-2.5-7B as the localization model for these experiments." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 158, + 291, + 634 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 67, + 158, + 290, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 158, + 290, + 266 + ], + "spans": [ + { + "bbox": [ + 67, + 158, + 290, + 266 + ], + "type": "text", + "content": "(1) Each tool in our toolset plays a critical role in code localization performance. As shown in Table 6, removing any tool, especially the SearchEntity tool, leads to varying degrees of accuracy degradation, particularly in module and function level localization. This highlights the critical role each tool plays in identifying relevant modules and functions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 269, + 291, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 269, + 291, + 442 + ], + "spans": [ + { + "bbox": [ + 69, + 269, + 291, + 442 + ], + "type": "text", + "content": "(2) The graph structure provides essential information for accurate code localization. Removing TraverseGraph tool decreases module and function level performance since the agent cannot obtain any structure information about the codebase and relies on reasoning capability to identify call relationship or directory structure. Adding contain relationship provides only marginal improvements compared to fully removing TraverseGraph, emphasizing the importance of the other three relationship types and explaining why our method surpasses others relying only on the repository structure." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 444, + 290, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 444, + 290, + 539 + ], + "spans": [ + { + "bbox": [ + 67, + 444, + 290, + 539 + ], + "type": "text", + "content": "(3) Multi-hop exploration is crucial for deep code understanding. When compared to the full setting, fixing " + }, + { + "bbox": [ + 67, + 444, + 290, + 539 + ], + "type": "inline_equation", + "content": "Hops = 1" + }, + { + "bbox": [ + 67, + 444, + 290, + 539 + ], + "type": "text", + "content": " leads to a moderate decline in file and module-level accuracy, but it causes a more significant decrease in function-level accuracy, underscoring the importance of multi-hop exploration for identifying relevant entities." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 540, + 290, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 540, + 290, + 634 + ], + "spans": [ + { + "bbox": [ + 67, + 540, + 290, + 634 + ], + "type": "text", + "content": "(4) Sparse indexing significantly enhances localization performance. Removing SearchEntity tool, or even partial removal of its index, causes a substantial drop in performance across all metrics. This demonstrates the effectiveness of building a sparse index on our code graph for improving localization performance." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 647, + 250, + 660 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 647, + 250, + 660 + ], + "spans": [ + { + "bbox": [ + 67, + 647, + 250, + 660 + ], + "type": "text", + "content": "5.7 Evaluation Results on Loc-Bench" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 666, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 666, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 666, + 290, + 775 + ], + "type": "text", + "content": "To ensure the robustness and generalization of our methods and fine-tuned Qwen models, and to eliminate potential data leakage, we evaluate our new dataset. Since Loc-Bench includes examples that edit 1 to 5 files, we assess file localization at top-5 and top-10 ranks, and function/module localization at top-10 and top-15 ranks. Table 7 shows that our fine-tuned Qwen2.5-7B model exhibits strong gen" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 67, + 518, + 164 + ], + "blocks": [ + { + "bbox": [ + 310, + 67, + 518, + 164 + ], + "lines": [ + { + "bbox": [ + 310, + 67, + 518, + 164 + ], + "spans": [ + { + "bbox": [ + 310, + 67, + 518, + 164 + ], + "type": "image", + "image_path": "40113cdec1e1feca7000cb77d3a5294a6135d63a0d343288699aecc05a7b7a58.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 311, + 168, + 518, + 265 + ], + "blocks": [ + { + "bbox": [ + 311, + 168, + 518, + 265 + ], + "lines": [ + { + "bbox": [ + 311, + 168, + 518, + 265 + ], + "spans": [ + { + "bbox": [ + 311, + 168, + 518, + 265 + ], + "type": "image", + "image_path": "f3ff500f60d006d9ab2100b2e34cdaaed024cc944a6d5f1861937bb334d1a3cf.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 273, + 525, + 321 + ], + "lines": [ + { + "bbox": [ + 302, + 273, + 525, + 321 + ], + "spans": [ + { + "bbox": [ + 302, + 273, + 525, + 321 + ], + "type": "text", + "content": "Figure 5: Performance analysis at different difficulty category for file- and function-level localization. All agent-based baselines and Agentless use Claude-3.5 as the localization model." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 301, + 336, + 525, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 336, + 525, + 457 + ], + "spans": [ + { + "bbox": [ + 301, + 336, + 525, + 457 + ], + "type": "text", + "content": "eralization capabilities, maintaining competitive performance compared to SWE-agent using more expensive and strong model. These results highlight the practicality of the fine-tuned Qwen2.5-7B model for real-world applications. Despite being an open-source alternative, it achieves a performance comparable to Claude-3.5, supporting its feasibility as a cost-effective substitute for commercial models in practical scenarios." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 459, + 525, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 459, + 525, + 675 + ], + "spans": [ + { + "bbox": [ + 302, + 459, + 525, + 675 + ], + "type": "text", + "content": "Additionally, we evaluate the performance across four different difficulty categories. Figure 5 clearly shows that our method outperforms other methods in almost all categories of code localization. However, it also highlights a noticeable decrease in performance across the other three categories compared to the Bug Report category. This performance gap likely reflects our training data distribution, which contained more bug report examples, potentially leading to scaffolds better optimized for bug localization tasks. This trend suggests that while our method is highly effective for bug report localization, there is still room for improvement in handling the other categories through more balanced training data and category-specific optimization strategies." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 687, + 524, + 714 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 687, + 524, + 714 + ], + "spans": [ + { + "bbox": [ + 302, + 687, + 524, + 714 + ], + "type": "text", + "content": "5.8 Application: Better Localization Leads to More Solved GitHub Issues" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 720, + 524, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 720, + 524, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 720, + 524, + 775 + ], + "type": "text", + "content": "To assess the impact of localization methods on downstream tasks, we evaluated their effectiveness in solving GitHub issues. We choose Agentless as the baseline, ranking among the top-performing" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 88, + 68, + 503, + 190 + ], + "blocks": [ + { + "bbox": [ + 88, + 68, + 503, + 190 + ], + "lines": [ + { + "bbox": [ + 88, + 68, + 503, + 190 + ], + "spans": [ + { + "bbox": [ + 88, + 68, + 503, + 190 + ], + "type": "table", + "html": "
MethodLoc ModelFile (%)Module (%)Function (%)
Acc@5Acc@10Acc@10Acc@15Acc@10Acc@15
IR-BasedCodeRankEmbed74.2980.8963.2167.5043.3946.61
AgentlessClaude-3.567.5067.5053.3953.3942.6842.68
OpenHandsClaude-3.579.8280.0068.9369.1159.1159.29
SWE-agentClaude-3.577.6877.6863.5763.7551.9651.96
LocAgent (Ours)Qwen2.5-7B(ft)78.5779.6463.0463.0451.4351.79
Claude-3.583.3986.0770.8971.0759.2960.71
", + "image_path": "6c9737e3fa48ebc0fa6a563806676ce4a098be8490a88f39f510258c9a12539f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 68, + 230, + 287, + 290 + ], + "blocks": [ + { + "bbox": [ + 156, + 198, + 436, + 210 + ], + "lines": [ + { + "bbox": [ + 156, + 198, + 436, + 210 + ], + "spans": [ + { + "bbox": [ + 156, + 198, + 436, + 210 + ], + "type": "text", + "content": "Table 7: Performance evaluation on the real-world LocBench dataset." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 68, + 230, + 287, + 290 + ], + "lines": [ + { + "bbox": [ + 68, + 230, + 287, + 290 + ], + "spans": [ + { + "bbox": [ + 68, + 230, + 287, + 290 + ], + "type": "table", + "html": "
MethodLocalization LMAcc@5Pass@1Pass@10
AgentlessClaude-3.558.3926.3133.58
OursQwen2.5-32B(ft)69.3426.7936.13
Claude-3.573.3627.9237.59
", + "image_path": "cd90b427caeb94e68300cb0963a3939e90034375ecc1b70e22bb7ba12b53914b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 297, + 289, + 323 + ], + "lines": [ + { + "bbox": [ + 67, + 297, + 289, + 323 + ], + "spans": [ + { + "bbox": [ + 67, + 297, + 289, + 323 + ], + "type": "text", + "content": "Table 8: Impact of localization accuracy on downstream bug repair tasks." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 334, + 290, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 334, + 290, + 416 + ], + "spans": [ + { + "bbox": [ + 67, + 334, + 290, + 416 + ], + "type": "text", + "content": "open-source submissions on SWE-Bench-Lite. For consistency, we utilized Claude-3.5 as the editing model in conjunction with the Agentless editing method. Table 8 shows that the success rate for solving GitHub issues improves significantly with better code localization accuracy." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 425, + 147, + 437 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 425, + 147, + 437 + ], + "spans": [ + { + "bbox": [ + 67, + 425, + 147, + 437 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 446, + 291, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 446, + 291, + 581 + ], + "spans": [ + { + "bbox": [ + 67, + 446, + 291, + 581 + ], + "type": "text", + "content": "In conclusion, LOCAGENT enhances code localization by structuring codebases as graphs, enabling efficient repository-level exploration for LLM agents. With fine-tuned open-source models, our method achieves high localization accuracy while significantly reducing costs compared to larger proprietary models. Experimental results demonstrate the effectiveness of LOCAGENT in identifying relevant code components and improving downstream tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 592, + 130, + 603 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 592, + 130, + 603 + ], + "spans": [ + { + "bbox": [ + 67, + 592, + 130, + 603 + ], + "type": "text", + "content": "Limitations" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 613, + 290, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 613, + 290, + 734 + ], + "spans": [ + { + "bbox": [ + 67, + 613, + 290, + 734 + ], + "type": "text", + "content": "First, our study primarily focused on fine-tuning Qwen-2.5-Coder models. Exploring a broader range of base models, including other open-source LLMs like CodeLlama, Mistral, or Yi, could provide valuable insights into model selection trade-offs. Additionally, investigating different finetuning approaches beyond LoRA, such as full finetuning or other parameter-efficient methods, could potentially yield better performance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 735, + 290, + 776 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 735, + 290, + 776 + ], + "spans": [ + { + "bbox": [ + 67, + 735, + 290, + 776 + ], + "type": "text", + "content": "Second, though we demonstrated improved bug repair performance with better localization, we only scratched the surface of potential downstream" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 232, + 526, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 232, + 526, + 312 + ], + "spans": [ + { + "bbox": [ + 302, + 232, + 526, + 312 + ], + "type": "text", + "content": "applications. Future work should evaluate LocAgent's impact on other software engineering tasks like refactoring, feature addition, security vulnerability patching, and performance optimization. This would provide a more comprehensive understanding of the framework's practical utility." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 313, + 526, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 313, + 526, + 449 + ], + "spans": [ + { + "bbox": [ + 302, + 313, + 526, + 449 + ], + "type": "text", + "content": "Moreover, our fine-tuning process relied heavily on trajectories generated by Claude-3.5 and the fine-tuned Qwen2.5-32B model. A more diverse training dataset incorporating examples from different models, tasks, and repositories could improve the robustness and generalization of fine-tuned models. Additionally, analyzing the impact of different dataset compositions and filtering strategies on model performance could yield valuable insights." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 449, + 527, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 449, + 527, + 558 + ], + "spans": [ + { + "bbox": [ + 302, + 449, + 527, + 558 + ], + "type": "text", + "content": "Finally, the current evaluation focuses primarily on Python codebases. Extending LOCAGENT to support other programming languages and evaluating its performance across different language paradigms would better demonstrate its generalizability. Further, our evaluation metrics could be expanded to include more nuanced measures of localization quality beyond accuracy and NDCG." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 581, + 362, + 593 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 581, + 362, + 593 + ], + "spans": [ + { + "bbox": [ + 304, + 581, + 362, + 593 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 303, + 600, + 526, + 775 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 303, + 600, + 524, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 600, + 524, + 624 + ], + "spans": [ + { + "bbox": [ + 303, + 600, + 524, + 624 + ], + "type": "text", + "content": "Aider. 2023. Building a better repository map with tree sitter. Accessed: April 15, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 303, + 632, + 525, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 632, + 525, + 655 + ], + "spans": [ + { + "bbox": [ + 303, + 632, + 525, + 655 + ], + "type": "text", + "content": "Anthropic. 2023. Claude: Conversational ai by anthropic. Accessed: January 21, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 303, + 664, + 526, + 698 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 664, + 526, + 698 + ], + "spans": [ + { + "bbox": [ + 303, + 664, + 526, + 698 + ], + "type": "text", + "content": "artificialanalysis.ai. 2025. Artificial analysis. https://artificialanalysis.ai/models/. Accessed: 2025-04-28." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 303, + 708, + 526, + 775 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 708, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 303, + 708, + 526, + 775 + ], + "type": "text", + "content": "Marcel Böhme, Ezekiel O Soremekun, Sudipta Chattopadhyay, Emamurho Ugherughe, and Andreas Zeller. 2017. Where is the bug and how is it fixed? an experiment with practitioners. In Proceedings of the 2017 11th joint meeting on foundations of software engineering, pages 117-128." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 773 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 289, + 160 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 289, + 160 + ], + "type": "text", + "content": "Zehui Chen, Weihua Du, Wenwei Zhang, Kuikun Liu, Jiangning Liu, Miao Zheng, Jingming Zhuo, Songyang Zhang, Dahua Lin, Kai Chen, et al. 2024. T-eval: Evaluating the tool utilization capability of large language models step by step. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9510-9529." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 169, + 289, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 169, + 289, + 190 + ], + "spans": [ + { + "bbox": [ + 69, + 169, + 289, + 190 + ], + "type": "text", + "content": "Cognition.ai. 2024. Introducing devin, the first ai software engineer." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 200, + 289, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 200, + 289, + 265 + ], + "spans": [ + { + "bbox": [ + 69, + 200, + 289, + 265 + ], + "type": "text", + "content": "John Ellson, Emden Gansner, Lefteris Koutsofios, Stephen C North, and Gordon Woodhull. 2002. Graphviz—open source graph drawing tools. In Graph Drawing: 9th International Symposium, GD 2001 Vienna, Austria, September 23–26, 2001 Revised Papers 9, pages 483–484. Springer." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 275, + 289, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 275, + 289, + 307 + ], + "spans": [ + { + "bbox": [ + 69, + 275, + 289, + 307 + ], + "type": "text", + "content": "Bahare Fatemi, Jonathan Halcrow, and Bryan Perozzi. 2023. Talk like a graph: Encoding graphs for large language models. arXiv preprint arXiv:2310.04560." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 317, + 289, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 317, + 289, + 338 + ], + "spans": [ + { + "bbox": [ + 69, + 317, + 289, + 338 + ], + "type": "text", + "content": "Paul Gauthier. 2024. How aider scored sota " + }, + { + "bbox": [ + 69, + 317, + 289, + 338 + ], + "type": "inline_equation", + "content": "26.3\\%" + }, + { + "bbox": [ + 69, + 317, + 289, + 338 + ], + "type": "text", + "content": " on swe bench lite | aider. Accessed: January 21, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 347, + 289, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 347, + 289, + 402 + ], + "spans": [ + { + "bbox": [ + 69, + 347, + 289, + 402 + ], + "type": "text", + "content": "Jiafeng Guo, Yixing Fan, Qingyao Ai, and W Bruce Croft. 2016. A deep relevance matching model for ad-hoc retrieval. In Proceedings of the 25th ACM international on conference on information and knowledge management, pages 55-64." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 411, + 289, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 411, + 289, + 465 + ], + "spans": [ + { + "bbox": [ + 69, + 411, + 289, + 465 + ], + "type": "text", + "content": "Jiafeng Guo, Yixing Fan, Liang Pang, Liu Yang, Qingyao Ai, Hamed Zamani, Chen Wu, W Bruce Croft, and Xueqi Cheng. 2020. A deep look into neural ranking models for information retrieval. Information Processing & Management, 57(6):102067." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 475, + 289, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 475, + 289, + 518 + ], + "spans": [ + { + "bbox": [ + 69, + 475, + 289, + 518 + ], + "type": "text", + "content": "Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. 2020. Retrieval augmented language model pre-training. In International conference on machine learning, pages 3929-3938. PMLR." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 527, + 289, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 527, + 289, + 581 + ], + "spans": [ + { + "bbox": [ + 69, + 527, + 289, + 581 + ], + "type": "text", + "content": "Michael Gunther, Louis Milliken, Jonathan Geuter, Georgios Mastrupas, Bo Wang, and Han Xiao. 2023. Jina embeddings: A novel set of high-performance sentence embedding models. Preprint, arXiv:2307.11224." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 591, + 289, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 591, + 289, + 645 + ], + "spans": [ + { + "bbox": [ + 69, + 591, + 289, + 645 + ], + "type": "text", + "content": "Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 655, + 289, + 742 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 655, + 289, + 742 + ], + "spans": [ + { + "bbox": [ + 69, + 655, + 289, + 742 + ], + "type": "text", + "content": "Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, Kai Dang, Yang Fan, Yichang Zhang, An Yang, Rui Men, Fei Huang, Bo Zheng, Yibo Miao, Shanghaoran Quan, Yunlong Feng, Xingzhang Ren, Xuancheng Ren, Jingren Zhou, and Junyang Lin. 2024. Qwen2.5-coder technical report. Preprint, arXiv:2409.12186." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 751, + 289, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 751, + 289, + 773 + ], + "spans": [ + { + "bbox": [ + 69, + 751, + 289, + 773 + ], + "type": "text", + "content": "Hyperbolic. 2025. Hyperbolic website. https:// hyperbolic.xyz/. Accessed: 2025-04-15." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 524, + 772 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 304, + 72, + 524, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 524, + 126 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 524, + 126 + ], + "type": "text", + "content": "Carlos E Jimenez, John Yang, Alexander Wettig, Shunyu Yao, Kexin Pei, Ofir Press, and Karthik Narasimhan. 2023. Swe-bench: Can language models resolve real-world github issues? arXiv preprint arXiv:2310.06770." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 136, + 524, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 136, + 524, + 169 + ], + "spans": [ + { + "bbox": [ + 304, + 136, + 524, + 169 + ], + "type": "text", + "content": "Sungmin Kang, Gabin An, and Shin Yoo. 2023. A preliminary evaluation of llm-based fault localization. arXiv preprint arXiv:2308.05487." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 178, + 524, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 178, + 524, + 222 + ], + "spans": [ + { + "bbox": [ + 304, + 178, + 524, + 222 + ], + "type": "text", + "content": "Sungmin Kang, Gabin An, and Shin Yoo. 2024. A quantitative and qualitative evaluation of llm-based explainable fault localization. Proceedings of the ACM on Software Engineering, 1(FSE):1424-1446." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 230, + 524, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 230, + 524, + 296 + ], + "spans": [ + { + "bbox": [ + 304, + 230, + 524, + 296 + ], + "type": "text", + "content": "Patrick Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, et al. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems, 33:9459-9474." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 305, + 524, + 359 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 305, + 524, + 359 + ], + "spans": [ + { + "bbox": [ + 304, + 305, + 524, + 359 + ], + "type": "text", + "content": "Xiangyan Liu, Bo Lan, Zhiyuan Hu, Yang Liu, Zhicheng Zhang, Fei Wang, Michael Shieh, and Wenmeng Zhou. 2024. Codexgraph: Bridging large language models and code repositories via code graph databases. Preprint, arXiv:2408.03910." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 369, + 524, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 369, + 524, + 456 + ], + "spans": [ + { + "bbox": [ + 304, + 369, + 524, + 456 + ], + "type": "text", + "content": "Qingsong Lv, Ming Ding, Qiang Liu, Yuxiang Chen, Wenzheng Feng, Siming He, Chang Zhou, Jianguo Jiang, Yuxiao Dong, and Jie Tang. 2021. Are we really making much progress? revisiting, benchmarking and refining heterogeneous graph neural networks. In Proceedings of the 27th ACM SIGKDD conference on knowledge discovery & data mining, pages 1150-1160." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 465, + 524, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 465, + 524, + 510 + ], + "spans": [ + { + "bbox": [ + 304, + 465, + 524, + 510 + ], + "type": "text", + "content": "Yingwei Ma, Qingping Yang, Rongyu Cao, Binhua Li, Fei Huang, and Yongbin Li. 2024. How to understand whole software repository? arXiv e-prints, pages arXiv-2406." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 518, + 524, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 518, + 524, + 572 + ], + "spans": [ + { + "bbox": [ + 304, + 518, + 524, + 572 + ], + "type": "text", + "content": "Niels Mündler, Mark Müller, Jingxuan He, and Martin Vechev. 2024. Swt-bench: Testing and validating real-world bug-fixes with code agents. Advances in Neural Information Processing Systems, 37:81857-81887." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 581, + 524, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 581, + 524, + 604 + ], + "spans": [ + { + "bbox": [ + 304, + 581, + 524, + 604 + ], + "type": "text", + "content": "OpenAI. 2023. Chatgpt: Language model by openai. Accessed: January 21, 2025." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 613, + 524, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 613, + 524, + 678 + ], + "spans": [ + { + "bbox": [ + 304, + 613, + 524, + 678 + ], + "type": "text", + "content": "Siru Ouyang, Wenhao Yu, Kaixin Ma, Zilin Xiao, Zhihan Zhang, Mengzhao Jia, Jiawei Han, Hongming Zhang, and Dong Yu. 2025. Repograph: Enhancing AI software engineering with repository-level code graph. In The Thirteenth International Conference on Learning Representations." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 687, + 524, + 710 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 687, + 524, + 710 + ], + "spans": [ + { + "bbox": [ + 304, + 687, + 524, + 710 + ], + "type": "text", + "content": "PerplexityAI. 2023. Perplexity ai: An ai-powered search engine. Accessed: January 21, 2025." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 718, + 524, + 772 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 718, + 524, + 772 + ], + "spans": [ + { + "bbox": [ + 304, + 718, + 524, + 772 + ], + "type": "text", + "content": "Yihao Qin, Shangwen Wang, Yiling Lou, Jinhao Dong, Kaixin Wang, Xiaoling Li, and Xiaoguang Mao. 2024. Agentfl: Scaling llm-based fault localization to project-level context. arXiv preprint arXiv:2403.16362." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 773 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 291, + 139 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 291, + 139 + ], + "type": "text", + "content": "Chen Qu, Liu Yang, Cen Chen, Minghui Qiu, W Bruce Croft, and Mohit Iyyer. 2020. Open-retrieval conversational question answering. In Proceedings of the 43rd International ACM SIGIR conference on research and development in Information Retrieval, pages 539-548." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 148, + 290, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 148, + 290, + 192 + ], + "spans": [ + { + "bbox": [ + 69, + 148, + 290, + 192 + ], + "type": "text", + "content": "Stephen Robertson, Hugo Zaragoza, et al. 2009. The probabilistic relevance framework: Bm25 and beyond. Foundations and Trends® in Information Retrieval, 3(4):333-389." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 201, + 290, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 201, + 290, + 235 + ], + "spans": [ + { + "bbox": [ + 69, + 201, + 290, + 235 + ], + "type": "text", + "content": "Stephen E. Robertson, Steve Walker, Susan Jones, Micheline Hancock-Beaulieu, and Mike Gatford. 1994. Okapi at trec-3. In Text Retrieval Conference." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 244, + 290, + 299 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 244, + 290, + 299 + ], + "spans": [ + { + "bbox": [ + 69, + 244, + 290, + 299 + ], + "type": "text", + "content": "Tarun Suresh, Revanth Gangi Reddy, Yifei Xu, Zach Nussbaum, Andriy Mulyar, Brandon Duderstadt, and Heng Ji. 2024. Cornstack: High-quality contrastive data for better code ranking. arXiv preprint arXiv:2412.01007." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 309, + 290, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 309, + 290, + 374 + ], + "spans": [ + { + "bbox": [ + 69, + 309, + 290, + 374 + ], + "type": "text", + "content": "David A. Tomassi, Naji Dmeiri, Yichen Wang, Antara Bhowmick, Yen-Chuan Liu, Premkumar Devanbu, Bogdan Vasilescu, and Cindy Rubio-Gonzalez. 2019. Bugswarm: Mining and continuously growing a dataset of reproducible failures and fixes. Preprint, arXiv:1903.06725." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 384, + 289, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 384, + 289, + 406 + ], + "spans": [ + { + "bbox": [ + 69, + 384, + 289, + 406 + ], + "type": "text", + "content": "VoyageAI. 2024. Voyage-code-2: Elevate your code retrieval. Accessed: 2024-02-02." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 416, + 290, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 416, + 290, + 470 + ], + "spans": [ + { + "bbox": [ + 69, + 416, + 290, + 470 + ], + "type": "text", + "content": "Liang Wang, Nan Yang, Xiaolong Huang, Bixing Jiao, Linjun Yang, Daxin Jiang, Rangan Majumder, and Furu Wei. 2022. Text embeddings by weakly-supervised contrastive pre-training. arXiv preprint arXiv:2212.03533." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 481, + 290, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 481, + 290, + 591 + ], + "spans": [ + { + "bbox": [ + 69, + 481, + 290, + 591 + ], + "type": "text", + "content": "Xingyao Wang, Boxuan Li, Yufan Song, Frank F. Xu, Xiangru Tang, Mingchen Zhuge, Jiayi Pan, Yueqi Song, Bowen Li, Jaskirat Singh, Hoang H. Tran, Fuqiang Li, Ren Ma, Mingzhang Zheng, Bill Qian, Yanjun Shao, Niklas Muennighoff, Yizhe Zhang, Binyuan Hui, Junyang Lin, Robert Brennan, Hao Peng, Heng Ji, and Graham Neubig. 2025. Open hands: An open platform for AI software developers as generalist agents. In The Thirteenth International Conference on Learning Representations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 600, + 290, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 600, + 290, + 655 + ], + "spans": [ + { + "bbox": [ + 69, + 600, + 290, + 655 + ], + "type": "text", + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2023a. Self-consistency improves chain of thought reasoning in language models. Preprint, arXiv:2203.11171." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 665, + 290, + 719 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 665, + 290, + 719 + ], + "spans": [ + { + "bbox": [ + 69, + 665, + 290, + 719 + ], + "type": "text", + "content": "Yue Wang, Hung Le, Akhilesh Deepak Gotmare, Nghi D. Q. Bui, Junnan Li, and Steven C. H. Hoi. 2023b. Codet5+: Open code large language models for code understanding and generation. Preprint, arXiv:2305.07922." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 729, + 290, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 729, + 290, + 773 + ], + "spans": [ + { + "bbox": [ + 69, + 729, + 290, + 773 + ], + "type": "text", + "content": "Zora Zhiruo Wang, Akari Asai, Xinyan Velocity Yu, Frank F. Xu, Yiqing Xie, Graham Neubig, and Daniel Fried. 2024. Coderag-bench: Can retrieval augment code generation? Preprint, arXiv:2406.14497." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 525, + 369 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 305, + 72, + 525, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 525, + 116 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 525, + 116 + ], + "type": "text", + "content": "Yonghao Wu, Zheng Li, Jie M Zhang, Mike Papadakis, Mark Harman, and Yong Liu. 2023. Large language models in fault localisation. arXiv preprint arXiv:2308.15276." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 125, + 525, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 125, + 525, + 169 + ], + "spans": [ + { + "bbox": [ + 304, + 125, + 525, + 169 + ], + "type": "text", + "content": "Chunqiu Steven Xia, Yinlin Deng, Soren Dunn, and Lingming Zhang. 2024. Agentless: Demystifying llm-based software engineering agents. arXiv preprint arXiv:2407.01489." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 178, + 525, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 178, + 525, + 233 + ], + "spans": [ + { + "bbox": [ + 304, + 178, + 525, + 233 + ], + "type": "text", + "content": "John Yang, Carlos E Jimenez, Alexander Wettig, Kili-ian Lieret, Shunyu Yao, Karthik Narasimhan, and Ofir Press. 2024. Swe-agent: Agent-computer interfaces enable automated software engineering. arXiv preprint arXiv:2405.15793." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 241, + 525, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 241, + 525, + 285 + ], + "spans": [ + { + "bbox": [ + 304, + 241, + 525, + 285 + ], + "type": "text", + "content": "Zhongming Yu, Hejia Zhang, Yujie Zhao, Hanxian Huang, Matrix Yao, Ke Ding, and Jishen Zhao. 2025. Ocaloca: An llm agent framework for software issue localization. arXiv preprint arXiv:2502.00350." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 294, + 525, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 294, + 525, + 349 + ], + "spans": [ + { + "bbox": [ + 304, + 294, + 525, + 349 + ], + "type": "text", + "content": "Dejiao Zhang, Wasi Uddin Ahmad, Ming Tan, Hantian Ding, Ramesh Nallapati, Dan Roth, Xiaofei Ma, and Bing Xiang. 2024. CODE REPRESENTATION LEARNING AT SCALE. In The Twelfth International Conference on Learning Representations." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 358, + 452, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 358, + 452, + 369 + ], + "spans": [ + { + "bbox": [ + 304, + 358, + 452, + 369 + ], + "type": "text", + "content": "Albert Örwall. 2024. Moatless tools." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 70, + 230, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 70, + 230, + 84 + ], + "spans": [ + { + "bbox": [ + 67, + 70, + 230, + 84 + ], + "type": "text", + "content": "A LOCAGENT Design Details" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 92, + 190, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 92, + 190, + 105 + ], + "spans": [ + { + "bbox": [ + 67, + 92, + 190, + 105 + ], + "type": "text", + "content": "A.1 Tool Output Design" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 110, + 278, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 110, + 278, + 137 + ], + "spans": [ + { + "bbox": [ + 67, + 110, + 278, + 137 + ], + "type": "text", + "content": "A.1.1 Three-level format for SearchEntity output" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 140, + 290, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 140, + 290, + 260 + ], + "spans": [ + { + "bbox": [ + 67, + 140, + 290, + 260 + ], + "type": "text", + "content": "Once invoked by the LLM agent, the retrieval APIs search for files, classes, methods, and code snippets in the codebase, and return the results back to the agent. To avoid forming very lengthy code context that may containing noisy information to LLM, we return only necessary information as API outputs. To achieve this, we desgined four granular standard output formats (Figure 6): fold, preview, full code." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 269, + 279, + 296 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 269, + 279, + 296 + ], + "spans": [ + { + "bbox": [ + 67, + 269, + 279, + 296 + ], + "type": "text", + "content": "A.1.2 Tree-based Subgraph Formatting for TraverseGraph Output" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 300, + 290, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 300, + 290, + 434 + ], + "spans": [ + { + "bbox": [ + 67, + 300, + 290, + 434 + ], + "type": "text", + "content": "The TraverseGraph tool traverses the code graph and returns a local subgraph for each input entity. The agent reasons about these subgraphs to understand each entity's complex dependencies. However, reasoning about graphs remains challenging for LLMs. Research by (Fatemi et al., 2023) demonstrates that LLM performance varies significantly based on graph formatting (how graphs are encoded as text). This makes the format design for output subgraphs crucial." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 435, + 292, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 435, + 292, + 569 + ], + "spans": [ + { + "bbox": [ + 67, + 435, + 292, + 569 + ], + "type": "text", + "content": "We have developed a new tree-based format, shown in Figure 7, with several features that enhance LLM reasoning: (1) We represent subgraphs as trees, allowing LLMs to use indentation to determine a node's distance from the root, (2) We display complete entity IDs for each node (e.g., django/core-validators.py:RegexValidator) to help LLMs locate nodes easily, and (3) We explicitly specify relation types for each edge, including reversed relations" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 571, + 291, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 571, + 291, + 704 + ], + "spans": [ + { + "bbox": [ + 67, + 571, + 291, + 704 + ], + "type": "text", + "content": "To evaluate how different graph formats impact code localization performance, we conducted an experiment using 37 challenging samples from SWEBench-Lite. These samples were considered \"challenging\" because they could not be solved by any baseline agent methods. Using Claude-3.5 as the Localization Model across all settings, we compared various output formats. Table 9 presents our findings. The baseline output formats we tested are described below:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 80, + 716, + 290, + 743 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 716, + 290, + 743 + ], + "spans": [ + { + "bbox": [ + 80, + 716, + 290, + 743 + ], + "type": "text", + "content": "- row: For each line, list one row of the adjacency matrix. For example," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 88, + 748, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 748, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 88, + 748, + 290, + 775 + ], + "type": "text", + "content": "function \"fileA.py:funcA\" invokes function \"fileA.py:funcB\", \"fileA.py:funcC\"" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 315, + 71, + 525, + 160 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 315, + 71, + 525, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 71, + 525, + 97 + ], + "spans": [ + { + "bbox": [ + 315, + 71, + 525, + 97 + ], + "type": "text", + "content": "- row (w/ entity attributes): Additionally include entity attributes for format row." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 316, + 106, + 525, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 106, + 525, + 160 + ], + "spans": [ + { + "bbox": [ + 316, + 106, + 525, + 160 + ], + "type": "text", + "content": "- incident: The incident format mentioned in (Fatemi et al., 2023). An integer instead of entity ID is used to represent each node. For example," + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 322, + 165, + 524, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 165, + 524, + 205 + ], + "spans": [ + { + "bbox": [ + 322, + 165, + 524, + 205 + ], + "type": "text", + "content": "Map function \"fileA.py:funcA\" to index 0. Map function \"fileA.py:funcB\" to index 1. Map function \"fileA.py:funcC\" to index 2." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 323, + 210, + 464, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 210, + 464, + 223 + ], + "spans": [ + { + "bbox": [ + 323, + 210, + 464, + 223 + ], + "type": "text", + "content": "function " + }, + { + "bbox": [ + 323, + 210, + 464, + 223 + ], + "type": "inline_equation", + "content": "O" + }, + { + "bbox": [ + 323, + 210, + 464, + 223 + ], + "type": "text", + "content": " invokes function 1,2." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 231, + 524, + 293 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 316, + 231, + 524, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 231, + 524, + 258 + ], + "spans": [ + { + "bbox": [ + 316, + 231, + 524, + 258 + ], + "type": "text", + "content": "Graphviz DOT: Represent graph in Graphviz DOT language (Ellson et al., 2002)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 267, + 524, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 267, + 524, + 293 + ], + "spans": [ + { + "bbox": [ + 316, + 267, + 524, + 293 + ], + "type": "text", + "content": "- JSON: Expand the subgraph as a tree, and convert it to JSON format." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 302, + 302, + 526, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 302, + 526, + 464 + ], + "spans": [ + { + "bbox": [ + 302, + 302, + 526, + 464 + ], + "type": "text", + "content": "As shown in Table 9, expanding subgraphs as trees (i.e., JSON, tree-based) can significantly improve the performance. Our tree-based format achieves the best overall performance across different levels of localization tasks. We also test returning entity attributes along with subgraphs. We notice that row (w/ entity attributes) consistently underperforms row, indicating the attributes for all the nodes may be very noisy. Besides, although using incident format can simplify the output and show improvements in file-level localization, it degradation the module- and file-level localization." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 474, + 408, + 486 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 474, + 408, + 486 + ], + "spans": [ + { + "bbox": [ + 302, + 474, + 408, + 486 + ], + "type": "text", + "content": "A.2 Implementation" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 301, + 491, + 525, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 491, + 525, + 693 + ], + "spans": [ + { + "bbox": [ + 301, + 491, + 525, + 693 + ], + "type": "text", + "content": "To enable the LLM agent to invoke the Code Localization APIs, we handle the interaction differently based on the LLM's capabilities. For LLMs that support tool-calling features, we define the tools as a list of JSON objects, which are then used as parameters for the API calls. For LLMs that do not support tool-calling (such as Qwen), we provide the description of the API and the expected output as part of the LLM's prompt. When the agent decides to invoke a set of retrieval APIs, it responds with a list of API call names and their corresponding arguments. These retrieval API requests are processed locally by searching over the built code graph. The results from executing these APIs locally are returned to the agent." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 302, + 694, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 694, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 694, + 525, + 775 + ], + "type": "text", + "content": "By default, we query the LLM with a temperature setting of 1.0. We conduct two interactions, after which we rerank the results based on mean reciprocal rank (MRR) scores. We also leverage multiprocessing execution to speed up the process. Since all our tools are read-only, LOCAGENT does" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 76, + 72, + 218, + 289 + ], + "blocks": [ + { + "bbox": [ + 76, + 72, + 218, + 289 + ], + "lines": [ + { + "bbox": [ + 76, + 72, + 218, + 289 + ], + "spans": [ + { + "bbox": [ + 76, + 72, + 218, + 289 + ], + "type": "image", + "image_path": "e3da4d4339e99b1a14d6f8d73ea975c643cc44ad95a8dc1803fff42294d4f99a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 297, + 525, + 333 + ], + "lines": [ + { + "bbox": [ + 67, + 297, + 525, + 333 + ], + "spans": [ + { + "bbox": [ + 67, + 297, + 525, + 333 + ], + "type": "text", + "content": "Figure 6: Different output formats designed for efficient agent-code interaction. Left: Full code output when matched entities " + }, + { + "bbox": [ + 67, + 297, + 525, + 333 + ], + "type": "inline_equation", + "content": "\\leq 3" + }, + { + "bbox": [ + 67, + 297, + 525, + 333 + ], + "type": "text", + "content": ". Middle: Preview output showing module skeleton for large files. Right: Fold output showing only entity IDs when matches " + }, + { + "bbox": [ + 67, + 297, + 525, + 333 + ], + "type": "inline_equation", + "content": ">3" + }, + { + "bbox": [ + 67, + 297, + 525, + 333 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 223, + 73, + 370, + 289 + ], + "blocks": [ + { + "bbox": [ + 223, + 73, + 370, + 289 + ], + "lines": [ + { + "bbox": [ + 223, + 73, + 370, + 289 + ], + "spans": [ + { + "bbox": [ + 223, + 73, + 370, + 289 + ], + "type": "image", + "image_path": "003dcde246a439f5b9b36cc33df7c37daaa5e4eaf0478ed30be80b30cbe72965.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 372, + 73, + 519, + 289 + ], + "blocks": [ + { + "bbox": [ + 372, + 73, + 519, + 289 + ], + "lines": [ + { + "bbox": [ + 372, + 73, + 519, + 289 + ], + "spans": [ + { + "bbox": [ + 372, + 73, + 519, + 289 + ], + "type": "image", + "image_path": "86b464c56ef20eb2e4a58b1077f46f88aa113910e37ea427df9e03b94670489d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 160, + 345, + 433, + 535 + ], + "blocks": [ + { + "bbox": [ + 160, + 345, + 433, + 535 + ], + "lines": [ + { + "bbox": [ + 160, + 345, + 433, + 535 + ], + "spans": [ + { + "bbox": [ + 160, + 345, + 433, + 535 + ], + "type": "image", + "image_path": "503072355158697ea6cb6ac7e2712ec0ffc412f1cd70207465e4cb36e21f3c9b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 543, + 524, + 556 + ], + "lines": [ + { + "bbox": [ + 67, + 543, + 524, + 556 + ], + "spans": [ + { + "bbox": [ + 67, + 543, + 524, + 556 + ], + "type": "text", + "content": "Figure 7: A truncated example of the expanded tree-based format for the output subgraph of tool TraverseGraph." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 577, + 289, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 577, + 289, + 603 + ], + "spans": [ + { + "bbox": [ + 67, + 577, + 289, + 603 + ], + "type": "text", + "content": "not require a specialized Docker environment to operate." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 614, + 265, + 626 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 614, + 265, + 626 + ], + "spans": [ + { + "bbox": [ + 68, + 614, + 265, + 626 + ], + "type": "text", + "content": "B Dataset construction and statistics" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 635, + 227, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 635, + 227, + 647 + ], + "spans": [ + { + "bbox": [ + 68, + 635, + 227, + 647 + ], + "type": "text", + "content": "B.1 Dataset construction details" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 653, + 290, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 653, + 290, + 747 + ], + "spans": [ + { + "bbox": [ + 67, + 653, + 290, + 747 + ], + "type": "text", + "content": "Example collection. We collected examples on popular Python repositories on Github follow (Jimenez et al., 2023). To gather issues related to performance and security, we searched for the keywords listed in Table 10 using the GitHub Search APIs. We then used GPT-4o-2024-0513 as the classifier based on the issue descriptions." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 748, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 748, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 748, + 290, + 775 + ], + "type": "text", + "content": "Ground Truth Locations. The affected files or functions in the original codebase, as identified in" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 577, + 526, + 751 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 577, + 526, + 751 + ], + "spans": [ + { + "bbox": [ + 302, + 577, + 526, + 751 + ], + "type": "text", + "content": "the patches, are considered the target locations for the given issue. While it is possible to fix a bug in a location different from the ground truth, the extracted ground-truth locations still serve as approximate targets for localization. Additionally, edited code such as documents, import statements, and comments are excluded from the localization target. These elements are not considered relevant for bug localization, as they do not directly impact the functionality of the code or its execution. By filtering out these elements, the focus is maintained on the core code changes that are relevant for localization." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 94, + 68, + 500, + 190 + ], + "blocks": [ + { + "bbox": [ + 94, + 68, + 500, + 190 + ], + "lines": [ + { + "bbox": [ + 94, + 68, + 500, + 190 + ], + "spans": [ + { + "bbox": [ + 94, + 68, + 500, + 190 + ], + "type": "table", + "html": "
Output FormatFile(%)Module(%)Function(%)
Acc@1Acc@3Acc@5Acc@5Acc@10Acc@5Acc@10
row41.1867.6570.5961.7661.7635.2938.24
row (w/ entity attributes)41.1864.7164.7150.0050.0032.3532.35
incident41.1870.5973.5355.8855.8829.4132.35
Graphviz DOT41.1873.5382.3564.7164.7135.2935.29
JSON41.1867.6576.4767.6570.5938.2441.18
tree-based (Ours)47.0679.4179.4164.7164.7138.2441.18
", + "image_path": "65aa2b242676fd4ce7ba9e60ceadb537140f50dff384a8c6651710edad591512.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 94, + 219, + 500, + 370 + ], + "blocks": [ + { + "bbox": [ + 129, + 195, + 462, + 208 + ], + "lines": [ + { + "bbox": [ + 129, + 195, + 462, + 208 + ], + "spans": [ + { + "bbox": [ + 129, + 195, + 462, + 208 + ], + "type": "text", + "content": "Table 9: Localization performance under different TraverseGraph output formats." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 94, + 219, + 500, + 370 + ], + "lines": [ + { + "bbox": [ + 94, + 219, + 500, + 370 + ], + "spans": [ + { + "bbox": [ + 94, + 219, + 500, + 370 + ], + "type": "table", + "html": "
CategoryKeywords
Performancebottleneck, performance improvement, memory usage optimization, time complexity reduction, latency improvement, scalability improvement, CPU usage reduction, caching improvement, concurrency optimization
SecurityOut-of-bounds Write, Out-of-bounds Read, NULL Pointer Dereference, Missing Authorization, memory leak fix, security vulnerability, security issue, authentication bypass, authentication issue, better maintained, buffer overflow, denial of service, security hardening, security patch, unsafe deserialization, Use After Free, Integer Overflow or Wraparound, Uncontrolled Resource Consumption, Missing Authentication for Critical Function
", + "image_path": "2976dea7e0ff07dfee13b430fc8f8efa94af68a9c7230eb416ed229809e5c751.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 377, + 524, + 391 + ], + "lines": [ + { + "bbox": [ + 67, + 377, + 524, + 391 + ], + "spans": [ + { + "bbox": [ + 67, + 377, + 524, + 391 + ], + "type": "text", + "content": "Table 10: We use these Keywords to search for Performance and Security related issues with Github Search APIs." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 411, + 214, + 425 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 411, + 214, + 425 + ], + "spans": [ + { + "bbox": [ + 67, + 411, + 214, + 425 + ], + "type": "text", + "content": "C Additional Experiments" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 433, + 208, + 446 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 433, + 208, + 446 + ], + "spans": [ + { + "bbox": [ + 67, + 433, + 208, + 446 + ], + "type": "text", + "content": "C.1 Implementation Details" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 450, + 227, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 450, + 227, + 464 + ], + "spans": [ + { + "bbox": [ + 67, + 450, + 227, + 464 + ], + "type": "text", + "content": "C.1.1 Baselines Implementation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 466, + 291, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 466, + 291, + 615 + ], + "spans": [ + { + "bbox": [ + 67, + 466, + 291, + 615 + ], + "type": "text", + "content": "Regarding the embedding-based methods in our evaluation, these approaches operate primarily at the function level, where each function is embedded as a separate unit. The function's context (its containing file and class) is appended to the function representation before embedding, rather than being embedded separately. While theoretically these methods could employ hierarchical indexing, the standard implementations we evaluated use flat indexing structures where each function is embedded as a single unit." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 616, + 291, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 616, + 291, + 670 + ], + "spans": [ + { + "bbox": [ + 67, + 616, + 291, + 670 + ], + "type": "text", + "content": "We use OpenHands's remote runtime feature to parallelize evaluation on OpenHands and SWEagent. We use Openhands version 0.12.0 released on Oct 31, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 677, + 280, + 704 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 677, + 280, + 704 + ], + "spans": [ + { + "bbox": [ + 67, + 677, + 280, + 704 + ], + "type": "text", + "content": "C.1.2 Quantifying Task Difficulty Based on Code Graph Distance" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "type": "text", + "content": "We measure task difficulty by computing the average shortest hop distance between the functions mentioned in the issue descriptions and the patched functions within our code graph. Specifically, we first extract potential function names from each" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 412, + 526, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 412, + 526, + 547 + ], + "spans": [ + { + "bbox": [ + 302, + 412, + 526, + 547 + ], + "type": "text", + "content": "issue description using GPT-4o-2024-0513, and identify their corresponding nodes in the code graph using the global dictionary. These identified nodes form the set of predicted nodes, denoted as " + }, + { + "bbox": [ + 302, + 412, + 526, + 547 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 302, + 412, + 526, + 547 + ], + "type": "text", + "content": ". Similarly, we link the ground truth functions from the patch to their corresponding nodes in the code graph, forming the set of target nodes, denoted as " + }, + { + "bbox": [ + 302, + 412, + 526, + 547 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 302, + 412, + 526, + 547 + ], + "type": "text", + "content": ". To quantify the difficulty " + }, + { + "bbox": [ + 302, + 412, + 526, + 547 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 302, + 412, + 526, + 547 + ], + "type": "text", + "content": ", we calculate the average shortest hop distance between the predicted nodes " + }, + { + "bbox": [ + 302, + 412, + 526, + 547 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 302, + 412, + 526, + 547 + ], + "type": "text", + "content": " and the target nodes " + }, + { + "bbox": [ + 302, + 412, + 526, + 547 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 302, + 412, + 526, + 547 + ], + "type": "text", + "content": ", defined as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 343, + 552, + 485, + 583 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 552, + 485, + 583 + ], + "spans": [ + { + "bbox": [ + 343, + 552, + 485, + 583 + ], + "type": "interline_equation", + "content": "\\delta = \\frac {1}{| \\mathcal {C} |} \\sum_ {c \\in \\mathcal {C}} \\frac {1}{m i n _ {t \\in \\mathcal {T}} d (c , t) + 1}", + "image_path": "3501fac23dcf5fe773840a66ad8f7737fdc4bb7268e280936b833deb06905a3d.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 590, + 526, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 590, + 526, + 671 + ], + "spans": [ + { + "bbox": [ + 302, + 590, + 526, + 671 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 590, + 526, + 671 + ], + "type": "inline_equation", + "content": "d(c, t)" + }, + { + "bbox": [ + 302, + 590, + 526, + 671 + ], + "type": "text", + "content": " represents the shortest hop distance between nodes " + }, + { + "bbox": [ + 302, + 590, + 526, + 671 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 302, + 590, + 526, + 671 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 590, + 526, + 671 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 302, + 590, + 526, + 671 + ], + "type": "text", + "content": " in the graph. For performance analysis stratified by difficulty, we round " + }, + { + "bbox": [ + 302, + 590, + 526, + 671 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 302, + 590, + 526, + 671 + ], + "type": "text", + "content": " down to " + }, + { + "bbox": [ + 302, + 590, + 526, + 671 + ], + "type": "inline_equation", + "content": "\\lfloor \\delta \\rfloor" + }, + { + "bbox": [ + 302, + 590, + 526, + 671 + ], + "type": "text", + "content": " to group samples by difficulty levels, and we exclude samples where the LLM fails to extract any valid function names." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 677, + 418, + 691 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 677, + 418, + 691 + ], + "spans": [ + { + "bbox": [ + 302, + 677, + 418, + 691 + ], + "type": "text", + "content": "C.1.3 Training details." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 694, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 694, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 694, + 525, + 775 + ], + "type": "text", + "content": "Fine-tuning Settings. We use Qwen-2.5-Coder-Instruct (Hui et al., 2024) 7B and 32B variants as our base models. We fine-tuned Qwen-2.5-Coder-Instruct 7B and 32B models on 768 training samples from the SWE-Bench training dataset, leveraging LoRA" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 71, + 69, + 523, + 239 + ], + "blocks": [ + { + "bbox": [ + 71, + 69, + 523, + 239 + ], + "lines": [ + { + "bbox": [ + 71, + 69, + 523, + 239 + ], + "spans": [ + { + "bbox": [ + 71, + 69, + 523, + 239 + ], + "type": "table", + "html": "
TypeMethodLoc-ModelFile (%)Module (%)Function (%)
NDCG@1NDCG@3NDCG@5NDCG@5NDCG@10NDCG@5NDCG@10
Embedding-BasedBM25 (Robertson et al., 2009)38.6946.550.6137.3139.8626.1527.92
E5-base-v2 (Wang et al., 2022)49.6464.1966.653.1554.4531.3935.3
Jina-Code-v2 (Günther et al., 2023)43.4359.9363.751.0254.1333.2836.44
Codesage-large-v2 (Zhang et al., 2024)47.8160.8264.3949.3852.2227.0330.74
CodeRankEmbed (Suresh et al., 2024)52.5567.5470.3957.5159.7640.2842.55
Procedure-BasedAgentless (Xia et al., 2024)GPT-4o67.1571.7671.7664.3164.3153.8153.81
Claude-3.572.6376.7276.8767.3667.3657.5557.55
Agent-BasedMoatlessTools (Örwall, 2024)GPT-4o73.3680.0380.3368.5769.0949.7750.62
Claude-3.572.6380.7380.8869.1169.1153.0353.16
SWE-agent (Yang et al., 2024)GPT-4o57.363.9664.1253.9553.9542.3242.44
Claude-3.577.3784.3284.9372.7772.959.6759.79
Openshands (Wang et al., 2025)GPT-4o60.9567.6268.3958.1858.644.3444.66
Claude-3.576.2884.2784.4375.7975.9263.1363.8
LocAgent (Ours)Qwen2.5-7B(ft)70.8079.3680.970.9971.6855.6258.09
Qwen2.5-32B(ft)75.9184.7485.6476.2876.7764.2765.93
Claude-3.577.7486.1987.1477.7378.164.3465.57
", + "image_path": "d58f8658622664aa6fdd9f0ba4233e824e52755bf040929bc8346fe186a5d5e3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 130, + 248, + 462, + 260 + ], + "lines": [ + { + "bbox": [ + 130, + 248, + 462, + 260 + ], + "spans": [ + { + "bbox": [ + 130, + 248, + 462, + 260 + ], + "type": "text", + "content": "Table 11: NDCG scores comparison showing ranking quality of different methods." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 282, + 290, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 282, + 290, + 376 + ], + "spans": [ + { + "bbox": [ + 67, + 282, + 290, + 376 + ], + "type": "text", + "content": "for efficient adaptation. The training set included 447 samples generated by Claude-3.5, while the remaining samples were iteratively generated using the fine-tuned Qwen2.5-32B model. The fine-tuning process was conducted over 5 epochs with max_token set to " + }, + { + "bbox": [ + 67, + 282, + 290, + 376 + ], + "type": "inline_equation", + "content": "128k" + }, + { + "bbox": [ + 67, + 282, + 290, + 376 + ], + "type": "text", + "content": " and a learning rate of " + }, + { + "bbox": [ + 67, + 282, + 290, + 376 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-4}" + }, + { + "bbox": [ + 67, + 282, + 290, + 376 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 386, + 131, + 400 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 386, + 131, + 400 + ], + "spans": [ + { + "bbox": [ + 68, + 386, + 131, + 400 + ], + "type": "text", + "content": "D Prompt" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 408, + 289, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 408, + 289, + 435 + ], + "spans": [ + { + "bbox": [ + 67, + 408, + 289, + 435 + ], + "type": "text", + "content": "In this section, we go through the prompt template that make up the agent's history." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 76, + 214, + 102, + 223 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 214, + 102, + 223 + ], + "spans": [ + { + "bbox": [ + 76, + 214, + 102, + 223 + ], + "type": "text", + "content": "Prompt" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 226, + 518, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 226, + 518, + 242 + ], + "spans": [ + { + "bbox": [ + 76, + 226, + 518, + 242 + ], + "type": "text", + "content": "Given the following GitHub problem description, your objective is to localize the specific files, classes or functions, and lines of code that need modification or contain key information to resolve the issue." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 76, + 246, + 218, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 246, + 218, + 254 + ], + "spans": [ + { + "bbox": [ + 76, + 246, + 218, + 254 + ], + "type": "text", + "content": "Follow these steps to localize the issue:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 254, + 273, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 254, + 273, + 261 + ], + "spans": [ + { + "bbox": [ + 76, + 254, + 273, + 261 + ], + "type": "text", + "content": "Step 1: Categorize and Extract Key Problem Information" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 76, + 262, + 431, + 288 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 76, + 262, + 292, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 262, + 292, + 269 + ], + "spans": [ + { + "bbox": [ + 76, + 262, + 292, + 269 + ], + "type": "text", + "content": "- Classify the problem statement into the following categories:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 76, + 269, + 371, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 269, + 371, + 275 + ], + "spans": [ + { + "bbox": [ + 76, + 269, + 371, + 275 + ], + "type": "text", + "content": "Problem description, error trace, code to reproduce the bug, and additional context." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 76, + 275, + 350, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 275, + 350, + 281 + ], + "spans": [ + { + "bbox": [ + 76, + 275, + 350, + 281 + ], + "type": "text", + "content": "- Identify modules in the '{package_name}' package mentioned in each category." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 76, + 281, + 431, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 281, + 431, + 288 + ], + "spans": [ + { + "bbox": [ + 76, + 281, + 431, + 288 + ], + "type": "text", + "content": "- Use extracted keywords and line numbers to search for relevant code references for additional context." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 76, + 294, + 201, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 294, + 201, + 301 + ], + "spans": [ + { + "bbox": [ + 76, + 294, + 201, + 301 + ], + "type": "text", + "content": "Step 2: Locate Referenced Modules" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 83, + 302, + 211, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 302, + 211, + 307 + ], + "spans": [ + { + "bbox": [ + 83, + 302, + 211, + 307 + ], + "type": "text", + "content": "Accurately determine specific modules" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 83, + 308, + 295, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 308, + 295, + 314 + ], + "spans": [ + { + "bbox": [ + 83, + 308, + 295, + 314 + ], + "type": "text", + "content": "- Explore the repo to familiarize yourself with its structure." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 83, + 315, + 421, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 315, + 421, + 321 + ], + "spans": [ + { + "bbox": [ + 83, + 315, + 421, + 321 + ], + "type": "text", + "content": "- Analyze the described execution flow to identify specific modules or components being referenced." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 76, + 322, + 486, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 322, + 486, + 327 + ], + "spans": [ + { + "bbox": [ + 76, + 322, + 486, + 327 + ], + "type": "text", + "content": "- Pay special attention to distinguishing between modules with similar names using context and described execution flow." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 76, + 328, + 238, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 328, + 238, + 334 + ], + "spans": [ + { + "bbox": [ + 76, + 328, + 238, + 334 + ], + "type": "text", + "content": "- Output Format for collected relevant modules:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 83, + 335, + 228, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 335, + 228, + 341 + ], + "spans": [ + { + "bbox": [ + 83, + 335, + 228, + 341 + ], + "type": "text", + "content": "- Use the format: 'file path:QualifiedName'" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 83, + 342, + 500, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 342, + 500, + 348 + ], + "spans": [ + { + "bbox": [ + 83, + 342, + 500, + 348 + ], + "type": "text", + "content": "- E.q., for a function `calculate_sum` in the `MathUtilities` class located in `src/helpers/mathHelpers.py`, represent it as:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 93, + 349, + 275, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 349, + 275, + 355 + ], + "spans": [ + { + "bbox": [ + 93, + 349, + 275, + 355 + ], + "type": "text", + "content": "'src/helpers/mathHelpers.py:MathUtil calculator_sum'." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 76, + 360, + 235, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 360, + 235, + 368 + ], + "spans": [ + { + "bbox": [ + 76, + 360, + 235, + 368 + ], + "type": "text", + "content": "## Step 3: Analyze and Reproducing the Problem" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 83, + 369, + 193, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 369, + 193, + 375 + ], + "spans": [ + { + "bbox": [ + 83, + 369, + 193, + 375 + ], + "type": "text", + "content": "Clarify the Purpose of the Issue" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 83, + 375, + 425, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 375, + 425, + 382 + ], + "spans": [ + { + "bbox": [ + 83, + 375, + 425, + 382 + ], + "type": "text", + "content": "- If expanding capabilities: Identify where and how to incorporate new behavior, fields, or modules." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 83, + 383, + 395, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 383, + 395, + 389 + ], + "spans": [ + { + "bbox": [ + 83, + 383, + 395, + 389 + ], + "type": "text", + "content": "- If addressing unexpected behavior: Focus on localizing modules containing potential bugs." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 76, + 390, + 188, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 390, + 188, + 396 + ], + "spans": [ + { + "bbox": [ + 76, + 390, + 188, + 396 + ], + "type": "text", + "content": "- Reconstruct the execution flow" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 83, + 396, + 254, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 396, + 254, + 403 + ], + "spans": [ + { + "bbox": [ + 83, + 396, + 254, + 403 + ], + "type": "text", + "content": "- Identify main entry points triggering the issue." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 83, + 403, + 316, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 403, + 316, + 409 + ], + "spans": [ + { + "bbox": [ + 83, + 403, + 316, + 409 + ], + "type": "text", + "content": "- Trace function calls, class interactions, and sequences of events." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 83, + 410, + 258, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 410, + 258, + 416 + ], + "spans": [ + { + "bbox": [ + 83, + 410, + 258, + 416 + ], + "type": "text", + "content": "- Identify potential breakpoints causing the issue." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 83, + 417, + 394, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 417, + 394, + 423 + ], + "spans": [ + { + "bbox": [ + 83, + 417, + 394, + 423 + ], + "type": "text", + "content": "Important: Keep the reconstructed flow focused on the problem, avoiding irrelevant details." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 76, + 428, + 215, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 428, + 215, + 435 + ], + "spans": [ + { + "bbox": [ + 76, + 428, + 215, + 435 + ], + "type": "text", + "content": "## Step 4: Locate Areas for Modification" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 76, + 436, + 520, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 436, + 520, + 443 + ], + "spans": [ + { + "bbox": [ + 76, + 436, + 520, + 443 + ], + "type": "text", + "content": "- Locate specific files, functions, or lines of code requiring changes or containing critical information for resolving the issue." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 76, + 444, + 391, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 444, + 391, + 449 + ], + "spans": [ + { + "bbox": [ + 76, + 444, + 391, + 449 + ], + "type": "text", + "content": "- Consider upstream and downstream dependencies that may affect or be affected by the issue." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 76, + 450, + 354, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 450, + 354, + 456 + ], + "spans": [ + { + "bbox": [ + 76, + 450, + 354, + 456 + ], + "type": "text", + "content": "- If applicable, identify where to introduce new fields, functions, or variables." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 76, + 457, + 456, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 457, + 456, + 463 + ], + "spans": [ + { + "bbox": [ + 76, + 457, + 456, + 463 + ], + "type": "text", + "content": "- Think Thoroughly: List multiple potential solutions and consider edge cases that could impact the resolution." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 76, + 469, + 196, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 469, + 196, + 476 + ], + "spans": [ + { + "bbox": [ + 76, + 469, + 196, + 476 + ], + "type": "text", + "content": "Output Format for Final Results:" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 76, + 477, + 408, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 477, + 408, + 483 + ], + "spans": [ + { + "bbox": [ + 76, + 477, + 408, + 483 + ], + "type": "text", + "content": "Your final output should list the locations requiring modification, wrapped with triple back ticks" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 76, + 484, + 506, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 484, + 506, + 490 + ], + "spans": [ + { + "bbox": [ + 76, + 484, + 506, + 490 + ], + "type": "text", + "content": "Each location should include the file path, class name (if applicable), function name, or line numbers, ordered by importance." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 76, + 491, + 238, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 491, + 238, + 497 + ], + "spans": [ + { + "bbox": [ + 76, + 491, + 238, + 497 + ], + "type": "text", + "content": "Your answer would better include about 5 files." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 76, + 502, + 122, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 502, + 122, + 511 + ], + "spans": [ + { + "bbox": [ + 76, + 502, + 122, + 511 + ], + "type": "text", + "content": "Examples:" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 76, + 516, + 143, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 516, + 143, + 523 + ], + "spans": [ + { + "bbox": [ + 76, + 516, + 143, + 523 + ], + "type": "text", + "content": "full_path1/file1.py" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 77, + 524, + 105, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 524, + 105, + 529 + ], + "spans": [ + { + "bbox": [ + 77, + 524, + 105, + 529 + ], + "type": "text", + "content": "line: 10" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 77, + 530, + 129, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 530, + 129, + 537 + ], + "spans": [ + { + "bbox": [ + 77, + 530, + 129, + 537 + ], + "type": "text", + "content": "class: MyClass1" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 77, + 537, + 153, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 537, + 153, + 544 + ], + "spans": [ + { + "bbox": [ + 77, + 537, + 153, + 544 + ], + "type": "text", + "content": "function: my_function1" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 77, + 550, + 143, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 550, + 143, + 557 + ], + "spans": [ + { + "bbox": [ + 77, + 550, + 143, + 557 + ], + "type": "text", + "content": "full path2/file2.py" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 77, + 558, + 105, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 558, + 105, + 564 + ], + "spans": [ + { + "bbox": [ + 77, + 558, + 105, + 564 + ], + "type": "text", + "content": "line:76" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 77, + 564, + 183, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 564, + 183, + 571 + ], + "spans": [ + { + "bbox": [ + 77, + 564, + 183, + 571 + ], + "type": "text", + "content": "function: MyClass2.my_function2" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 77, + 576, + 143, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 576, + 143, + 584 + ], + "spans": [ + { + "bbox": [ + 77, + 576, + 143, + 584 + ], + "type": "text", + "content": "full_path3/file3.py" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 77, + 585, + 105, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 585, + 105, + 591 + ], + "spans": [ + { + "bbox": [ + 77, + 585, + 105, + 591 + ], + "type": "text", + "content": "line: 24" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 77, + 591, + 109, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 591, + 109, + 597 + ], + "spans": [ + { + "bbox": [ + 77, + 591, + 109, + 597 + ], + "type": "text", + "content": "line: 156" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 77, + 597, + 153, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 597, + 153, + 604 + ], + "spans": [ + { + "bbox": [ + 77, + 597, + 153, + 604 + ], + "type": "text", + "content": "function: my_function3" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 77, + 604, + 86, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 604, + 86, + 609 + ], + "spans": [ + { + "bbox": [ + 77, + 604, + 86, + 609 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 76, + 617, + 169, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 617, + 169, + 624 + ], + "spans": [ + { + "bbox": [ + 76, + 617, + 169, + 624 + ], + "type": "text", + "content": "Return just the location(s)" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 76, + 625, + 330, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 625, + 330, + 632 + ], + "spans": [ + { + "bbox": [ + 76, + 625, + 330, + 632 + ], + "type": "text", + "content": "Note: Your thinking should be thorough and so it's fine if it's very long." + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 185, + 645, + 406, + 657 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 645, + 406, + 657 + ], + "spans": [ + { + "bbox": [ + 185, + 645, + 406, + 657 + ], + "type": "text", + "content": "Figure 8: The task instruction prompt for LOCAGENT." + } + ] + } + ], + "index": 51, + "type": "text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 17 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09198/ef3c6a72-d844-464a-90c5-502ddc16df65_content_list.json b/data/2025/2503_09xxx/2503.09198/ef3c6a72-d844-464a-90c5-502ddc16df65_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..7eb3b1a494a80ebd96c987af309453b45024734f --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/ef3c6a72-d844-464a-90c5-502ddc16df65_content_list.json @@ -0,0 +1,1213 @@ +[ + { + "type": "text", + "text": "A 3D particle visualization system for temperature management", + "text_level": 1, + "bbox": [ + 194, + 112, + 803, + 175 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Lange B. $^{a}$ , Rodriguez N. $^{a}$ , Puech W. $^{a}$ , Rey H. $^{b}$ and Vasques X. $^{b}$", + "bbox": [ + 333, + 200, + 661, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{a}$ LIRMM, 141 rue ADA, Montpellier, France;", + "bbox": [ + 330, + 234, + 666, + 251 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "b IBM, Rue de la vieille poste, Montpellier,", + "bbox": [ + 338, + 252, + 658, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "France", + "bbox": [ + 470, + 270, + 527, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 465, + 314, + 531, + 328 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This paper deals with a 3D visualization technique proposed to analyze and manage energy efficiency from a data center. Data are extracted from sensors located in the IBM Green Data Center in Montpellier France. These sensors measure different information such as hygrometry, pressure and temperature. We want to visualize in real-time the large among of data produced by these sensors. A visualization engine has been designed, based on particles system and a client server paradigm. In order to solve performance problems, a Level Of Detail solution has been developed. These methods are based on the earlier work introduced by J. Clark in $1976^{1}$ . In this paper we introduce a particle method used for this work and subsequently we explain different simplification methods applied to improve our solution.", + "bbox": [ + 214, + 337, + 785, + 513 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: 3D Visualization, Sensors, Particles, Client/Server, Level Of Details", + "bbox": [ + 143, + 526, + 733, + 541 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. INTRODUCTION", + "text_level": 1, + "bbox": [ + 145, + 575, + 370, + 595 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we present a method to produce a 3D visualization for analyzing and managing temperature. Data are extracted from sensors located in the IBM Green Data Center in Montpellier, which provides many different types of information like temperature, pressure or hygrometry. In our system, sensors are placed in a virtual room and the internal space is modeled using particles. The main constraint here is to produce a real-time rendering. However, latency appears du to the number of vertices. In this paper, we use a solution called LOD (Level Of Detail) to produce multi resolution 3D objects. This solution has been introduced in 1976 by J. Clark1. In this paper, J. Clark introduces the use of several mesh resolutions to simplify the 3D scene complexity. In our work, we use various simplification methods to provide interactive rendering and allows rendering the most important part of data extracted from sensors. In this paper, we describe how we create a room, and the methods used to produce different resolution visualization. In Section 2, we introduce related work on particles systems and LOD. In Section 3, we expose our solution to simplify particles system. In Section 4 we give some results and finally, in Section 5 we present our conclusions and future work.", + "bbox": [ + 142, + 609, + 841, + 864 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1/10", + "bbox": [ + 483, + 970, + 514, + 979 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2. RELATED WORK", + "text_level": 1, + "bbox": [ + 145, + 69, + 375, + 88 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section we present several previous works concerning data visualization, particle systems and level of detail methods.", + "bbox": [ + 143, + 104, + 828, + 136 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Some previous work present solutions to visualize large data flow extracted from mantle convection. M. Damon et al. $^{2}$ and K. E. Jordan et al. $^{3}$ present interactive viewers for this kind of data. These data are computed by using Hight Performance Computing (HPC) and visualized on a large display. The rendering is calculated by using another HPC. The data flow is very important and a real-time 3D simulation is hard to obtain. W. Kapfer and", + "bbox": [ + 143, + 138, + 836, + 222 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "T. Riser6 introduce how to use particle system to visualize astronomic simulation, particles representing space objects. The number of particles is extremely important for computing motion in real-time. GPU computing is preferred to render instead of a common HPC solution. To display their data, they have developed their own 3D graphical engine. The space objects are represented by point sprite instead of sphere. Lights are used to give a spherical aspect to the point sprite. This solution allows to render more stars than spherical object method. The 3D engine provides different rendering methods to group space objects: cell simplification or extraction of isosurface. The use of GPU seems quite well for a particle solution, parallel processing allows to render large data; the astrological data seems to be well suited.", + "bbox": [ + 143, + 234, + 851, + 404 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In 1976, J. Clark introduces Level Of Detail (LOD) concept1. LOD consists of several resolution meshes for using them at different distances from the camera. Firstly, designer produces these meshes. First algorithms, in 1992 Schroeder et al. developed a method by decimation for simplify the mesh7. It analyses mesh geometry and evaluates the complexity of triangles. Vertices are removed if only constraints set by the user are respected. Vertices are removed and gaps are filled using triangulation. These algorithms of simplification are not enough to simplify mesh efficiently because shape is not always totally respected. D. Luebke, in 1997, has proposed a taxonomy of mesh simplification8. He presented the most used algorithms. He extracted different ways to use each algorithm. But in this paper, only one solution works with volumetric mesh9. T. He et al. propose a method based on voxel simplification by using a grid for clustering voxels. A marching cube10 algorithm was applied to produce a surface mesh. But this simplification algorithm did not preserve the shape of the mesh. In our work, we look for point cloud simplification. Indeed, previous methods which deal with simplification for surface point cloud like11-13 are not adapted to our case. All of these methods produce LOD for surface mesh and point cloud is extracted from scanner.", + "bbox": [ + 143, + 406, + 851, + 676 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. PROPOSED APPROACH", + "text_level": 1, + "bbox": [ + 145, + 710, + 452, + 729 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This section presents the different methods that are used to visualize a kind of data from Green Data Center (GDC). The main goal is to be able to visualize in real-time the evolution of temperature in the data center. For this, we use a special particle method. Particles are located using a segmentation algorithm based on Voronoi cell extraction and Delaunay triangulation. The latency due to the large flow of particles is avoided by using a client server paradigm. We improve our solution by using LOD methods to simplify rendering.", + "bbox": [ + 143, + 744, + 844, + 847 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2/10", + "bbox": [ + 483, + 970, + 514, + 979 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3.1 Particle systems", + "text_level": 1, + "bbox": [ + 142, + 68, + 333, + 85 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Rooms are the bases of our study. For modeling a room, we extract the shape of the space representation which is composed by a box with three measures: length $(l \\in \\mathbb{R})$ , width $(w \\in \\mathbb{R})$ , height $(h \\in \\mathbb{R})$ . Sensors are represented by $S = \\{\\mathrm{S}_1, \\dots, \\mathrm{S}_M\\}$ , where $M$ is the number of sensors. Sensors $\\mathrm{S}_i (\\mathrm{i} \\in \\{1, \\dots, M\\})$ are placed on the space on a layer $\\mathbf{L} \\in \\mathbb{N}$ and have a location represented by: $\\{\\mathbf{X}_i, \\mathbf{Y}_i, \\mathbf{L}_j\\}$ with $\\mathbf{X}_i \\in \\mathbb{R}$ , $\\mathbf{Y}_i \\in \\mathbb{R}$ and $j$ is the layer used. For modeling the space inside a room, we use a particle system instead of 2D map representations which have some lacks. $^{14}$ Actually 2D map does not allow having a real visualization of space. A particle visualization gives a better efficiency for modeling space. We use a large number of particles to represent the entire space. $\\mathbf{N} \\in \\mathbb{N}$ represents the number of particles in the room. It can be calculated using:", + "bbox": [ + 138, + 97, + 844, + 270 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nN = \\frac {\\left(\\left(l + 1\\right) \\times (h + 1) \\times (w + 1)\\right)}{\\delta^ {3}} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 277, + 524, + 316 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\delta \\in \\mathbb{R}$ is the space between particles. The particle grid is regular. In this model, three layers of temperature sensors compose rooms. They are defined according to their real locations in the data center. Figure ?? presents the different layers of sensors in the data center.", + "bbox": [ + 138, + 329, + 841, + 395 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Particles carry information, and flow motion can be simulated if needed by changing the value of particles and the computational cost is inferior.", + "bbox": [ + 138, + 398, + 836, + 433 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 Segmentation algorithms", + "text_level": 1, + "bbox": [ + 140, + 448, + 418, + 467 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In our solution, each sensors has an influence on surrounding particules. To calculate the set of particles in the sensor range, we use two methods: Voronoi cells extraction and Delaunay triangulation.", + "bbox": [ + 138, + 477, + 834, + 527 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Voronoi cells is a method to extract a partition of space $^{15}$ . This method is available for $\\phi$ dimensions where $\\phi \\in [1, +\\infty]$ , but most of implementations are done in 2D. Tools for extracting 3D Voronoi diagrams exist: Voro++ and QHull but particles are discrete and these solutions are not suitable because they extract Voronoi diagram in a continuous way. Then we designed our own method based on sphere expansion. We search nearest sensors for each particle. This part allows to weight particles outside the sensors mesh. A second method to weight the interior of the sensors mesh is used. We extract the mesh tetrahedron of sensors using the Delaunay triangulation implemented in QHull. This method was used to analyze the location of particle. We compute the exact location using ray tracing on the soup of tetrahedron. First, we search the nearest particles inside the hull of each tetrahedron. We extract the normal of each face of tetrahedron and we apply these normals on each particle. If the ray cuts three faces or more, the particle is inside the tetrahedron. This method is cost expensive and done in preprocessing. Moreover, particles are static and position didn't need to be update.", + "bbox": [ + 138, + 527, + 854, + 768 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.3 Client server paradigm", + "text_level": 1, + "bbox": [ + 140, + 782, + 398, + 801 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To improve computation, a client server paradigm is used. We define a low cost communication protocol to transfer data from a server to a client. Server computes the modification of particles and the client displays the results. This protocol works in five steps. These steps are: sending header, sending sensor data, sending particle data, sending footer and receiving acknowledgment/language command from client. At each step, the server waits the acknowledgment from the client. We develop two ways to send data. The first sends the entire point cloud (sensors and particles). The biggest problem of this method is the", + "bbox": [ + 138, + 811, + 849, + 931 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3/10", + "bbox": [ + 480, + 969, + 517, + 979 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "transmission of data. Sensors are sent with their coordinates and their value. We encode these data in bit words. For the particles data, the same method was used. The footer was sent for closing the communication. The second method is used to reduce efficiently the communication cost. We only send modified sensors and particles. The id and the new value is sent instead of coordinates. The last step is the command sent by the client. It allows the user to interact with the server. We use it to modify the camera viewpoint.", + "bbox": [ + 138, + 65, + 830, + 167 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4 Level of detail for particles", + "text_level": 1, + "bbox": [ + 142, + 183, + 434, + 200 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Level of detail (LOD) is one of the most important methods in computer graphics. It allows to solve rendering problems or performance problems. This method consists by producing several resolution of a 3D object. In our works, we use some features to define the object resolution: hardware and viewpoint. Hardware and viewpoint do not need the same data structure and we need to recompute it for each modification of the viewpoint or when hardware changes. LOD was defined by two problems statement. The first one uses a sample of original points, the second one uses a new point data set. In this part, we define six methods to produce LOD. The four first methods are for the client, the other are for the server.", + "bbox": [ + 138, + 210, + 848, + 363 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Problems statement:", + "text_level": 1, + "bbox": [ + 140, + 378, + 307, + 392 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For this two approaches, we have a set $\\omega$ of Vertices $V$ , $V = \\{V_1, \\ldots, V_\\omega\\}$ . Each vertex is defined in $\\mathbb{R}^3$ . Simplify a mesh using a sample vertex means $\\omega > \\omega 2$ , where $\\omega 2$ is the size of the second data set. For approach 1, we obtain a new object $\\mathrm{V}2 = \\{\\mathrm{V}2_1, \\ldots, \\mathrm{V}2_\\omega\\}$ with fewer points than V but V 2 is a subset of V. For approach 2, we obtain a new object $\\mathrm{V}3 = \\{\\mathrm{V}3_1, \\ldots, \\mathrm{V}3_\\omega\\}$ with fewer points than V but each point in V 3 is a new vertex.", + "bbox": [ + 138, + 393, + 854, + 481 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In Section 2 we have presented methods to produce simplification. A few were designed for volumetric simplification. In this section, we propose several methods to produce different volumetric simplifications on our client. We develop four approaches to simplify 3D objects: clustering, neighbor simplification and two approaches based on server. Clustering method was based on He et al. $^{9}$ works, it consists of clustering particles using a 3D grid. Cells sizes of grid are set depending to the viewpoint of the camera. Clusters were being weight with the average of the different values of particles. The position is the barycenter of these particles. Figures 1(a)-1(e) give some examples of simplification using clustering solution. Figure 1(a) present the original point of cloud mesh. Figure", + "bbox": [ + 138, + 493, + 848, + 647 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "1(b) and 1(d) give two different methods for clustering. And finally, Figure 1(c) and 1(e) give the results of clustering methods.", + "bbox": [ + 138, + 657, + 849, + 691 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/ae6cbd294176dc493b50c73240c504ea8c9fc09c9e3151fba6f0be709b525025.jpg", + "image_caption": [ + "Figure 1. Clustering method for simplification point cloud." + ], + "image_footnote": [ + "The second solution used is based on neighborhood extraction. Before runtime, we extract all neighbors of a particle. We measure the distance between each particle. Some optimization can help to decrease complexity: we can estimate easily in our structure which particle is closer to another one (using the fact that particle grid is regular). After this," + ], + "bbox": [ + 148, + 696, + 849, + 821 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4/10", + "bbox": [ + 480, + 969, + 517, + 979 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "we extract the main value of particles. We explore each neighbor of particles and we keep the most important. In some cases, the most important can be the high values, in other the low values and in other both of them. This solution is able to produce a low resolution model with the most important information structure. Several low resolution models are created by exploring deeper in neighborhood. Figures 2(a)-2(c) illustrate a neighbor, and two simplifications of this mesh.", + "bbox": [ + 140, + 65, + 844, + 167 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/23944f9963b290146f2ac445e8970545bcf00b2949e999067a13468653d749cc.jpg", + "image_caption": [ + "(a) Neighborhood cloud." + ], + "image_footnote": [], + "bbox": [ + 148, + 172, + 379, + 339 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/0e41ba0aeddc8af5f2b6664c64abea63f9b32bfd4692c38f916d07f501d537e5.jpg", + "image_caption": [ + "(b) Simplification neighborhood of 1." + ], + "image_footnote": [], + "bbox": [ + 383, + 172, + 612, + 337 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/c01c12cfcf72e187db35ff49ad4d5edb57331b8d93efa4c1f20941ebf580a3d7.jpg", + "image_caption": [ + "(c) Simplification neighborhood of 2.", + "Figure 2. Neighbor method for simplification." + ], + "image_footnote": [], + "bbox": [ + 617, + 172, + 846, + 338 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Other methods were based on server instead of client. Client sent via TCP connection his viewpoint. The server recomputes the particles structure and recreates the entire structure. With this solution, it is possible to produce a point cloud resolution depending on hardware. Figure 3(a) presents particles rendering with a distance of 2 from the camera. Figure 3(b) is the decimation produced with a distance of 3 and Figure 3(c) is a distance of 1.", + "bbox": [ + 138, + 409, + 852, + 492 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Another method was based on Voronoi diffusion of temperature. The bandwidth for transmitting data is limited. We developed Voronoi temperature diffusion to solve this communication. In this approach, we update data using sphere expansion. Each time, we update particles depending on their distance from sensors. The more particles are distant from sensors the later they will be refreshed. This method sends only modified particles. The bandwidth is saved and the visualization gives a flow effect. Figure 4(a) represents values at time 0. At time 1, values of sensors change, 4(b). After time 2, we update a first range of particles 4(c) and finally the second range 4(d).", + "bbox": [ + 138, + 506, + 856, + 642 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/7eac2440a2fedd66d943c82b390c3a9df5d95d019d523a29c7a3e2f9c7218928.jpg", + "image_caption": [ + "(a) Particles server $(\\mathrm{D} = 2)$", + "Figure 3. Particle simplification using server and distance." + ], + "image_footnote": [], + "bbox": [ + 156, + 650, + 380, + 815 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/c4777a0507eee1c1beaa13c04e351827098bc588e6ced9d8de6cff01430e3df5.jpg", + "image_caption": [ + "(b) Particles produce server $(\\mathrm{D} = 3)$" + ], + "image_footnote": [], + "bbox": [ + 388, + 650, + 612, + 815 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/5dabdfa6b0129921b2abb27785be9608573a25f26e4646d20c6b92250fad1414.jpg", + "image_caption": [ + "(c) Particles produce by server $(\\mathrm{D} = 1)$" + ], + "image_footnote": [], + "bbox": [ + 620, + 650, + 844, + 815 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5/10", + "bbox": [ + 482, + 969, + 514, + 979 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/e340d9807d4ed601eee3bd74351618a9adfda026a8dbd720f9d2aa40857dad26.jpg", + "image_caption": [ + "(a) Particles and sensors (T = 0)." + ], + "image_footnote": [], + "bbox": [ + 150, + 66, + 321, + 193 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/92c09c0b767ea453098777786fb815e8c2f833a8abc790807397f42e9a6f6887.jpg", + "image_caption": [ + "(b) Sensors update $(\\mathrm{T} = 1)$ ." + ], + "image_footnote": [], + "bbox": [ + 328, + 66, + 496, + 194 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/416dbc943d118d6834e80c8c5ad759b34f940700b96fd0b87bb23c082f10cf10.jpg", + "image_caption": [ + "(c) First range $(\\mathrm{T} = 2)$ .", + "Figure 4. Simplification using bandwidth size." + ], + "image_footnote": [], + "bbox": [ + 503, + 68, + 669, + 194 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a79e8f2104ab8c32df4137808fc6b4ce9b7465bfa15c2080bd3aabb2c58986d0.jpg", + "image_caption": [ + "(d) Second range $(\\mathrm{T} = 3)$ ." + ], + "image_footnote": [], + "bbox": [ + 674, + 68, + 848, + 194 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. EXPERIMENTAL RESULTS", + "text_level": 1, + "bbox": [ + 140, + 287, + 490, + 306 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The data are extracted from two rooms of the IBM data center. Firstly, we present our method for rendering the room, and later we present our results using Level Of Detail methods.", + "bbox": [ + 138, + 321, + 805, + 371 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Data visualization", + "text_level": 1, + "bbox": [ + 140, + 388, + 346, + 406 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We want to visualize and manage the consumption of a data center. For the visualization, we want to use an IFC viewer. But the IFC model for GDC is not available yet. Data center extraction of the room space is for the moment done by hand. The room is empty and was represent by a simple shape a box with 4 meters length, 3 meters width and 2.5 meters height. We use point cloud visualization based on particle paradigm. We use the two rooms of the data center and we put the same number of particles (30000) and 35 sensors distributed on three layers at 1 meter; 2 meter and on the ground. We define high and low temperature regarding the real sensors value. Figure 5(a) presents temperature color scale, Figure 5(b) and Figure 5(c) present data center sensors.", + "bbox": [ + 138, + 417, + 848, + 571 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The next step is to interpolate data from sensors. For this, we extract the sensor mesh. We use QHULL to produce a soup of tetrahedrons. Particles need to be located. We can determine which tetrahedron is the nearest, we extract the box hull of tetrahedron and we apply for each particle the norms of each tetrahedron face. If these rays cut three or more faces, then particle is inside the tetrahedron. With this method, we can determine exactly the location of each particles regarding to the tetrahedrons, a weight is given to them easily. It was used to apply a coefficient to the value of each vertex of tetrahedron. For the outside particles, another solution was used: Voronoi cells. This method is based on a discrete extraction of Voronoi cells. We use our own method because other method like Voro ++ or QHull extract Voronoi diagram in a continuous way.", + "bbox": [ + 138, + 584, + 852, + 755 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7c729f1706460b0bf0adde7a8dbd071f9ae77f611ef3f85b3d951fb62795016c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 151, + 878, + 367, + 922 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8c16ad7a59f34e5974a4322466cec0149f8905a1a1b1cfadff910a3fec50d004.jpg", + "image_caption": [ + "(b) Room one." + ], + "image_footnote": [], + "bbox": [ + 398, + 763, + 606, + 898 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2eb0e7b79700cbbd72a8a244992163f567dbbd68f9d3db6bad3e5808cda4dabb.jpg", + "image_caption": [ + "(c) Room two." + ], + "image_footnote": [], + "bbox": [ + 643, + 765, + 849, + 898 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6/10", + "bbox": [ + 482, + 969, + 514, + 979 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 Level of details", + "text_level": 1, + "bbox": [ + 142, + 98, + 325, + 116 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In the earlier days of this project, first solution proposed gives a low frame rates, about 15 FPS (Frame Per Second): visualization was not in real-time (real-time is about 24 FPS). For solving this problem, we define a client server paradigm. This solution allows to produce a real-time rendering on the client. Figure ?? gives an example of LOD for particles. We use Openscenegraph $^{20}$ as a 3D engine. It owns several features useful in LOD. A special object is defined to manage multi-resolution model. It calculates the distance of the object from the camera. For our experimentation we use five resolutions of mesh. The first mesh was the original mesh, it is set at 0 to 500. The next mesh was set at 500 to 1000, the next at 1000 to 1500 and the other at 1500 to 2000. These three meshes were constructed by specific LOD methods: clustering and significant vertices. Clustering defines a 3D grid inside the room. The size of each cell depends on the viewpoint location. The size of the cluster depends on the visibility of the clustered particles. First results are given Figure 6(a) and 6(b). Value of cluster is an average of clustered value. The number of points of the final mesh depends on the grid size. Table 1 shows the results at several distances.", + "bbox": [ + 138, + 126, + 857, + 367 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/389ef0f63a90171c14db5cd9a925792e376222c8c56a5801cacb3dcc99360c96.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
D = 0 to 500D = 500 to 1000D = 1000 to 1500D = 1500 to 2000
C = X30000390024036
", + "bbox": [ + 143, + 375, + 854, + 450 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9fbe1142edda7eab13f0abb9592b84e63d1611257e7426d7d569a68b9df82b38.jpg", + "image_caption": [ + "Figure 5. Data use to model the system.", + "(a) $\\mathrm{D} = 500$ to 1000.", + "Figure 6. Clustering visualization algorithms." + ], + "image_footnote": [], + "bbox": [ + 158, + 508, + 500, + 705 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/69ae709df8988787ff641b5b6eb5a608ca0636bba7110a3a4735064168d117da.jpg", + "image_caption": [ + "Table 1. Results of clustering simplification.", + "(b) $D = 1000$ to 1500." + ], + "image_footnote": [], + "bbox": [ + 504, + 508, + 846, + 705 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Significant points method extracts the neighbors for each particle. We extract the highest and lowest temperatures, by exploring the neighborhood of a particle, in order to have significant vertices of the model. For the first step of simplified model we explore neighbor. For the second model, we explore neighbor and neighbor of neighbor, etc. This solution simplifies drastically the model. First results are given Figure ??-??. Table 2 shows the number of vertices at several distance.", + "bbox": [ + 138, + 756, + 843, + 859 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7/10", + "bbox": [ + 482, + 969, + 516, + 979 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/3cd8a951d8771e165427c2faca0d2508e4c8cef94fb244dc3a04f2ca031996a3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
D = 0 to 500D = 500 to 1000D = 1000 to 1500D = 1500 to 2000
C = X300002295045543524
", + "bbox": [ + 143, + 61, + 854, + 137 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4050e8f862ac40be7bc6d5c239997192325936e244911b8d69fe60a4ab8810b8.jpg", + "image_caption": [ + "(a) Neighborhood 1.", + "Figure 7. Clustering visualization algorithms using neighbor." + ], + "image_footnote": [], + "bbox": [ + 153, + 195, + 496, + 402 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/dafb2130c8265a91a7faed53db3de1181cd524770896acda68d5a953df004f87.jpg", + "image_caption": [ + "(b) Neighborhood 2." + ], + "image_footnote": [], + "bbox": [ + 500, + 195, + 843, + 402 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The first server solution receives orders from client as presented Section 3.4. We calculate the viewpoint distance and we send data according to it. A new structure is recalculated if the camera is too far from the object. After the recomputing, we send the new data. This solution allows the user to receive more or less data according to its distance to the object. Table 3 shows some different resolutions produced with this method.", + "bbox": [ + 138, + 470, + 844, + 555 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/9434d9911bf9b1660b16b84b9672b9a68ac66eec53e61ed383a2c752f12025a2.jpg", + "table_caption": [ + "Table 2. Results of neighbor simplification." + ], + "table_footnote": [], + "table_body": "
D = 0 to 500D = 500 to 1000D = 1000 to 1500D = 1500 to 2000
C = X1200003000075001875
", + "bbox": [ + 143, + 566, + 854, + 640 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3. Several resolution of model.", + "bbox": [ + 138, + 674, + 424, + 688 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Another solution is to use bandwidth latency. We send data at several times, we do not send the entire set of data but only modified particles. We send at first time the sensors data, and subsequently we send a range of data (the nearest). After few minutes, all data are sent. This solution gives good results, and simulates a thermal diffusion in the whole structure of particles. Figure 8(a)-8(c) illustrate this method.", + "bbox": [ + 138, + 691, + 852, + 775 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2fcee6efa3b1d3c44144e2639f3857a15c4079f838c4f9132953fd427a8b1be9.jpg", + "image_caption": [ + "(a) $\\mathrm{T} = 0$" + ], + "image_footnote": [], + "bbox": [ + 148, + 779, + 380, + 912 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b2be70bd4cc7d261ac38e8bc653a0c09c537d49d2a5612ed5c067058a98ef463.jpg", + "image_caption": [ + "(b) $\\mathrm{T} = 1$" + ], + "image_footnote": [], + "bbox": [ + 383, + 779, + 616, + 912 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0d4e22f04d3ea25761125c93fc6d70fe856bf840ef353c043f32d21c9633724e.jpg", + "image_caption": [ + "(c) $\\mathrm{T} = 4$" + ], + "image_footnote": [], + "bbox": [ + 617, + 779, + 849, + 912 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8/10", + "bbox": [ + 482, + 969, + 514, + 979 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. CONCLUSION", + "text_level": 1, + "bbox": [ + 142, + 114, + 339, + 135 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this paper, we have presented a method to visualize sensors data extracted from a Green Data Center. This approach produces interpolation visualization for managing and visualizing data. This interpolation used a Delaunay triangulation and a cell extraction based on Voronoi. An unusual way of use particles helps to process data. First results present the solution proposed to visualize the inside of a GDC space. The second results proposed in this paper aim to improve the rendering.", + "bbox": [ + 140, + 148, + 849, + 251 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "For this, first step introduces a client/server protocol a second step illustrates methods to simplify the model. With these different approaches we improve the rendering time, preserving most important data are kept. In future works, we will work on data \"dressing\". We want to find a way to improve rendering of the scene using meatballs or marching cube algorithms. A main constraint of this work is real-time computation. Future work also concern to add rooms to the visualization. At present, we only visualize a single room. We want to visualize building, and complex form, by using an IFC loader.", + "bbox": [ + 140, + 252, + 852, + 371 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 142, + 402, + 419, + 422 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We want to thanks the PSSC (Products and Solutions Support Center) team of IBM Montpellier for having provided the necessary equipment and data need for this experimentation. And we thank the FUI (Fonds Unique Interministriel) for their financial support.", + "bbox": [ + 138, + 438, + 836, + 506 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 142, + 539, + 310, + 558 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Clark, J. H., \"Hierarchical geometric models for visible surface algorithms,\" Communications of the ACM 19(10), 547-554 (1976).", + "[2] Damon, M., Kameyama, M., Knox, M., Porter, D., Yuen, D., and Sevre, E., \"Interactive visualization of 3d mantle convection,\" Visual Geosciences (2008).", + "[3] Jordan, K. E., Yuen, D. A., Reuteler, D. M., Zhang, S., and Haimes, R., \"Parallel interactive visualization of 3d mantle convection,\" IEEE Comput. Sci. Eng. 3(4), 29-37 (1996).", + "[4] Reeves, W. T., \"Particle systems - a technique for modeling a class of fuzzy objects,\" ACM Transactions on Graphics 2, 359-376 (1983).", + "[5] Latta, L., \"Building a million particle system,\" (2004).", + "[6] Kapferer, W. and Riser, T., \"Visualization needs and techniques for astrophysical simulations,\" New Journal of Physics 10(12), 125008 (15pp) (2008).", + "[7] Schroeder, W. J., Zarge, J. A., and Lorensen, W. E., \"Decimation of triangle meshes,\" 65-70 (1992).", + "[8] Luebke, D., \"A survey of polygonal simplification algorithms,\" (1997).", + "[9] He, T., Hong, L., Kaufman, A., Varshney, A., and Wang, S., \"Voxel based object simplification,\" in [Proc. SIGGRAPH Symposium on Interactive 3D Graphics], 296-303 (1995).", + "[10] Lorensen, W. E. and Cline, H. E., \"Marching cubes: A high resolution 3d surface construction algorithm,\" SIGGRAPH Comput. Graph. 21(4), 163-169 (1987).", + "[11] Pauly, M., Gross, M., and Kobbelt, L. P., \"Efficient simplification of point-sampled surfaces,\" (2002).", + "[12] Moenning, C., , Moenning, C., and Dodgson, N. A., \"Intrinsic point cloud simplification,\"" + ], + "bbox": [ + 140, + 573, + 852, + 931 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Figure 8. Bandwidth simplification.", + "bbox": [ + 140, + 65, + 413, + 80 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9/10", + "bbox": [ + 480, + 969, + 514, + 979 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "(2004).", + "[13] Song, H. and Feng, H.-Y., \"A progressive point cloud simplification algorithm with preserved sharp edge data,\" The International Journal of Advanced Manufacturing Technology 45, 583-592 (November 2009).", + "[14] Buschmann, C., Pfisterer, D., Fischer, S., Fekete, S. P., and Kröller, A., \"Spyglass: a wireless sensor network visualizer,\" SIGBED Rev. 2(1), 1-6 (2005).", + "[15] Avis, D. and Bhattacharya, B., \"Algorithms for computing d-dimensional voronoi diagrams and their duals,\" 1, 159-180 (1983).", + "[16] Rycroft, C. H., \"Voro++: a three-dimensional voronoi cell library in $c++$ ,\" Chaos 19 (2009). Lawrence Berkeley National Laboratory.", + "[17] Barber, C. B., Dobkin, D. P., and Huhdanpaa, H., \"The quickhull algorithm for convex hulls,\" ACM Trans. Math. Softw. 22(4), 469-483 (1996).", + "[18] Snyder, J. M. and Barr, A. H., \"Ray tracing complex models containing surface tessellations,\" SIGGRAPH Comput. Graph. 21(4), 119-128 (1987).", + "[19] Hoppe, H., \"Progressive meshes. computer graphics,\" SIGGRAPH96 Proceedings, 99108 (1996).", + "[20] Burns, D. and Osfield, R., \"Open scene graph a: Introduction, b: Examples and applications,\" 265 (2004)." + ], + "bbox": [ + 142, + 65, + 849, + 371 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Further author information:", + "bbox": [ + 166, + 393, + 388, + 407 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Lange B.: E-mail: benoit.lange@lirmm.fr", + "bbox": [ + 166, + 411, + 480, + 426 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Rodriguez N.: E-mail: nancy.rodriguez@lirmm.fr", + "bbox": [ + 166, + 428, + 542, + 443 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Puech W.: E-mail: william.puech@lirmm.fr", + "bbox": [ + 166, + 445, + 496, + 459 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Rey H.: E-mail:REYHERVE@fr.ibm.com", + "bbox": [ + 166, + 462, + 467, + 477 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Vasques X.: E-mail: xaviervasques@fr.ibm.com", + "bbox": [ + 166, + 479, + 527, + 494 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10/10", + "bbox": [ + 480, + 969, + 517, + 979 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09198/ef3c6a72-d844-464a-90c5-502ddc16df65_model.json b/data/2025/2503_09xxx/2503.09198/ef3c6a72-d844-464a-90c5-502ddc16df65_model.json new file mode 100644 index 0000000000000000000000000000000000000000..93a691e63f03b9354a3bce03d5bf39f7ee03e8db --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/ef3c6a72-d844-464a-90c5-502ddc16df65_model.json @@ -0,0 +1,1639 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.195, + 0.113, + 0.804, + 0.176 + ], + "angle": 0, + "content": "A 3D particle visualization system for temperature management" + }, + { + "type": "text", + "bbox": [ + 0.334, + 0.201, + 0.663, + 0.234 + ], + "angle": 0, + "content": "Lange B. \\(^{a}\\), Rodriguez N. \\(^{a}\\), Puech W. \\(^{a}\\), Rey H. \\(^{b}\\) and Vasques X. \\(^{b}\\)" + }, + { + "type": "text", + "bbox": [ + 0.331, + 0.236, + 0.667, + 0.252 + ], + "angle": 0, + "content": "\\(^{a}\\) LIRMM, 141 rue ADA, Montpellier, France;" + }, + { + "type": "text", + "bbox": [ + 0.339, + 0.253, + 0.659, + 0.269 + ], + "angle": 0, + "content": "b IBM, Rue de la vieille poste, Montpellier," + }, + { + "type": "text", + "bbox": [ + 0.471, + 0.271, + 0.528, + 0.285 + ], + "angle": 0, + "content": "France" + }, + { + "type": "title", + "bbox": [ + 0.467, + 0.315, + 0.532, + 0.329 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.338, + 0.786, + 0.514 + ], + "angle": 0, + "content": "This paper deals with a 3D visualization technique proposed to analyze and manage energy efficiency from a data center. Data are extracted from sensors located in the IBM Green Data Center in Montpellier France. These sensors measure different information such as hygrometry, pressure and temperature. We want to visualize in real-time the large among of data produced by these sensors. A visualization engine has been designed, based on particles system and a client server paradigm. In order to solve performance problems, a Level Of Detail solution has been developed. These methods are based on the earlier work introduced by J. Clark in \\(1976^{1}\\). In this paper we introduce a particle method used for this work and subsequently we explain different simplification methods applied to improve our solution." + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.527, + 0.735, + 0.542 + ], + "angle": 0, + "content": "Keywords: 3D Visualization, Sensors, Particles, Client/Server, Level Of Details" + }, + { + "type": "title", + "bbox": [ + 0.147, + 0.576, + 0.371, + 0.596 + ], + "angle": 0, + "content": "1. INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.143, + 0.611, + 0.842, + 0.866 + ], + "angle": 0, + "content": "In this paper, we present a method to produce a 3D visualization for analyzing and managing temperature. Data are extracted from sensors located in the IBM Green Data Center in Montpellier, which provides many different types of information like temperature, pressure or hygrometry. In our system, sensors are placed in a virtual room and the internal space is modeled using particles. The main constraint here is to produce a real-time rendering. However, latency appears du to the number of vertices. In this paper, we use a solution called LOD (Level Of Detail) to produce multi resolution 3D objects. This solution has been introduced in 1976 by J. Clark1. In this paper, J. Clark introduces the use of several mesh resolutions to simplify the 3D scene complexity. In our work, we use various simplification methods to provide interactive rendering and allows rendering the most important part of data extracted from sensors. In this paper, we describe how we create a room, and the methods used to produce different resolution visualization. In Section 2, we introduce related work on particles systems and LOD. In Section 3, we expose our solution to simplify particles system. In Section 4 we give some results and finally, in Section 5 we present our conclusions and future work." + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.971, + 0.516, + 0.98 + ], + "angle": 0, + "content": "1/10" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.147, + 0.07, + 0.376, + 0.089 + ], + "angle": 0, + "content": "2. RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.105, + 0.83, + 0.137 + ], + "angle": 0, + "content": "In this section we present several previous works concerning data visualization, particle systems and level of detail methods." + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.139, + 0.837, + 0.223 + ], + "angle": 0, + "content": "Some previous work present solutions to visualize large data flow extracted from mantle convection. M. Damon et al. \\(^{2}\\) and K. E. Jordan et al. \\(^{3}\\) present interactive viewers for this kind of data. These data are computed by using Hight Performance Computing (HPC) and visualized on a large display. The rendering is calculated by using another HPC. The data flow is very important and a real-time 3D simulation is hard to obtain. W. Kapfer and" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.235, + 0.852, + 0.405 + ], + "angle": 0, + "content": "T. Riser6 introduce how to use particle system to visualize astronomic simulation, particles representing space objects. The number of particles is extremely important for computing motion in real-time. GPU computing is preferred to render instead of a common HPC solution. To display their data, they have developed their own 3D graphical engine. The space objects are represented by point sprite instead of sphere. Lights are used to give a spherical aspect to the point sprite. This solution allows to render more stars than spherical object method. The 3D engine provides different rendering methods to group space objects: cell simplification or extraction of isosurface. The use of GPU seems quite well for a particle solution, parallel processing allows to render large data; the astrological data seems to be well suited." + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.407, + 0.852, + 0.678 + ], + "angle": 0, + "content": "In 1976, J. Clark introduces Level Of Detail (LOD) concept1. LOD consists of several resolution meshes for using them at different distances from the camera. Firstly, designer produces these meshes. First algorithms, in 1992 Schroeder et al. developed a method by decimation for simplify the mesh7. It analyses mesh geometry and evaluates the complexity of triangles. Vertices are removed if only constraints set by the user are respected. Vertices are removed and gaps are filled using triangulation. These algorithms of simplification are not enough to simplify mesh efficiently because shape is not always totally respected. D. Luebke, in 1997, has proposed a taxonomy of mesh simplification8. He presented the most used algorithms. He extracted different ways to use each algorithm. But in this paper, only one solution works with volumetric mesh9. T. He et al. propose a method based on voxel simplification by using a grid for clustering voxels. A marching cube10 algorithm was applied to produce a surface mesh. But this simplification algorithm did not preserve the shape of the mesh. In our work, we look for point cloud simplification. Indeed, previous methods which deal with simplification for surface point cloud like11-13 are not adapted to our case. All of these methods produce LOD for surface mesh and point cloud is extracted from scanner." + }, + { + "type": "title", + "bbox": [ + 0.147, + 0.711, + 0.453, + 0.731 + ], + "angle": 0, + "content": "3. PROPOSED APPROACH" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.746, + 0.845, + 0.848 + ], + "angle": 0, + "content": "This section presents the different methods that are used to visualize a kind of data from Green Data Center (GDC). The main goal is to be able to visualize in real-time the evolution of temperature in the data center. For this, we use a special particle method. Particles are located using a segmentation algorithm based on Voronoi cell extraction and Delaunay triangulation. The latency due to the large flow of particles is avoided by using a client server paradigm. We improve our solution by using LOD methods to simplify rendering." + }, + { + "type": "page_number", + "bbox": [ + 0.485, + 0.971, + 0.515, + 0.98 + ], + "angle": 0, + "content": "2/10" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.143, + 0.069, + 0.334, + 0.087 + ], + "angle": 0, + "content": "3.1 Particle systems" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.098, + 0.845, + 0.271 + ], + "angle": 0, + "content": "Rooms are the bases of our study. For modeling a room, we extract the shape of the space representation which is composed by a box with three measures: length \\((l \\in \\mathbb{R})\\), width \\((w \\in \\mathbb{R})\\), height \\((h \\in \\mathbb{R})\\). Sensors are represented by \\(S = \\{\\mathrm{S}_1, \\dots, \\mathrm{S}_M\\}\\), where \\(M\\) is the number of sensors. Sensors \\(\\mathrm{S}_i (\\mathrm{i} \\in \\{1, \\dots, M\\})\\) are placed on the space on a layer \\(\\mathbf{L} \\in \\mathbb{N}\\) and have a location represented by: \\(\\{\\mathbf{X}_i, \\mathbf{Y}_i, \\mathbf{L}_j\\}\\) with \\(\\mathbf{X}_i \\in \\mathbb{R}\\), \\(\\mathbf{Y}_i \\in \\mathbb{R}\\) and \\(j\\) is the layer used. For modeling the space inside a room, we use a particle system instead of 2D map representations which have some lacks.\\(^{14}\\) Actually 2D map does not allow having a real visualization of space. A particle visualization gives a better efficiency for modeling space. We use a large number of particles to represent the entire space. \\(\\mathbf{N} \\in \\mathbb{N}\\) represents the number of particles in the room. It can be calculated using:" + }, + { + "type": "equation", + "bbox": [ + 0.181, + 0.279, + 0.525, + 0.318 + ], + "angle": 0, + "content": "\\[\nN = \\frac {\\left(\\left(l + 1\\right) \\times (h + 1) \\times (w + 1)\\right)}{\\delta^ {3}} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.33, + 0.842, + 0.396 + ], + "angle": 0, + "content": "where \\(\\delta \\in \\mathbb{R}\\) is the space between particles. The particle grid is regular. In this model, three layers of temperature sensors compose rooms. They are defined according to their real locations in the data center. Figure ?? presents the different layers of sensors in the data center." + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.399, + 0.837, + 0.434 + ], + "angle": 0, + "content": "Particles carry information, and flow motion can be simulated if needed by changing the value of particles and the computational cost is inferior." + }, + { + "type": "title", + "bbox": [ + 0.142, + 0.449, + 0.419, + 0.468 + ], + "angle": 0, + "content": "3.2 Segmentation algorithms" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.478, + 0.836, + 0.528 + ], + "angle": 0, + "content": "In our solution, each sensors has an influence on surrounding particules. To calculate the set of particles in the sensor range, we use two methods: Voronoi cells extraction and Delaunay triangulation." + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.529, + 0.856, + 0.769 + ], + "angle": 0, + "content": "Voronoi cells is a method to extract a partition of space \\(^{15}\\). This method is available for \\(\\phi\\) dimensions where \\(\\phi \\in [1, +\\infty]\\), but most of implementations are done in 2D. Tools for extracting 3D Voronoi diagrams exist: Voro++ and QHull but particles are discrete and these solutions are not suitable because they extract Voronoi diagram in a continuous way. Then we designed our own method based on sphere expansion. We search nearest sensors for each particle. This part allows to weight particles outside the sensors mesh. A second method to weight the interior of the sensors mesh is used. We extract the mesh tetrahedron of sensors using the Delaunay triangulation implemented in QHull. This method was used to analyze the location of particle. We compute the exact location using ray tracing on the soup of tetrahedron. First, we search the nearest particles inside the hull of each tetrahedron. We extract the normal of each face of tetrahedron and we apply these normals on each particle. If the ray cuts three faces or more, the particle is inside the tetrahedron. This method is cost expensive and done in preprocessing. Moreover, particles are static and position didn't need to be update." + }, + { + "type": "title", + "bbox": [ + 0.142, + 0.783, + 0.4, + 0.803 + ], + "angle": 0, + "content": "3.3 Client server paradigm" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.812, + 0.851, + 0.933 + ], + "angle": 0, + "content": "To improve computation, a client server paradigm is used. We define a low cost communication protocol to transfer data from a server to a client. Server computes the modification of particles and the client displays the results. This protocol works in five steps. These steps are: sending header, sending sensor data, sending particle data, sending footer and receiving acknowledgment/language command from client. At each step, the server waits the acknowledgment from the client. We develop two ways to send data. The first sends the entire point cloud (sensors and particles). The biggest problem of this method is the" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.97, + 0.518, + 0.981 + ], + "angle": 0, + "content": "3/10" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.14, + 0.066, + 0.831, + 0.168 + ], + "angle": 0, + "content": "transmission of data. Sensors are sent with their coordinates and their value. We encode these data in bit words. For the particles data, the same method was used. The footer was sent for closing the communication. The second method is used to reduce efficiently the communication cost. We only send modified sensors and particles. The id and the new value is sent instead of coordinates. The last step is the command sent by the client. It allows the user to interact with the server. We use it to modify the camera viewpoint." + }, + { + "type": "title", + "bbox": [ + 0.143, + 0.184, + 0.436, + 0.202 + ], + "angle": 0, + "content": "3.4 Level of detail for particles" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.212, + 0.849, + 0.364 + ], + "angle": 0, + "content": "Level of detail (LOD) is one of the most important methods in computer graphics. It allows to solve rendering problems or performance problems. This method consists by producing several resolution of a 3D object. In our works, we use some features to define the object resolution: hardware and viewpoint. Hardware and viewpoint do not need the same data structure and we need to recompute it for each modification of the viewpoint or when hardware changes. LOD was defined by two problems statement. The first one uses a sample of original points, the second one uses a new point data set. In this part, we define six methods to produce LOD. The four first methods are for the client, the other are for the server." + }, + { + "type": "title", + "bbox": [ + 0.141, + 0.379, + 0.308, + 0.393 + ], + "angle": 0, + "content": "Problems statement:" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.394, + 0.856, + 0.482 + ], + "angle": 0, + "content": "For this two approaches, we have a set \\(\\omega\\) of Vertices \\(V\\), \\(V = \\{V_1, \\ldots, V_\\omega\\}\\). Each vertex is defined in \\(\\mathbb{R}^3\\). Simplify a mesh using a sample vertex means \\(\\omega > \\omega 2\\), where \\(\\omega 2\\) is the size of the second data set. For approach 1, we obtain a new object \\(\\mathrm{V}2 = \\{\\mathrm{V}2_1, \\ldots, \\mathrm{V}2_\\omega\\}\\) with fewer points than V but V 2 is a subset of V. For approach 2, we obtain a new object \\(\\mathrm{V}3 = \\{\\mathrm{V}3_1, \\ldots, \\mathrm{V}3_\\omega\\}\\) with fewer points than V but each point in V 3 is a new vertex." + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.494, + 0.849, + 0.648 + ], + "angle": 0, + "content": "In Section 2 we have presented methods to produce simplification. A few were designed for volumetric simplification. In this section, we propose several methods to produce different volumetric simplifications on our client. We develop four approaches to simplify 3D objects: clustering, neighbor simplification and two approaches based on server. Clustering method was based on He et al. \\(^{9}\\) works, it consists of clustering particles using a 3D grid. Cells sizes of grid are set depending to the viewpoint of the camera. Clusters were being weight with the average of the different values of particles. The position is the barycenter of these particles. Figures 1(a)-1(e) give some examples of simplification using clustering solution. Figure 1(a) present the original point of cloud mesh. Figure" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.659, + 0.851, + 0.693 + ], + "angle": 0, + "content": "1(b) and 1(d) give two different methods for clustering. And finally, Figure 1(c) and 1(e) give the results of clustering methods." + }, + { + "type": "image", + "bbox": [ + 0.149, + 0.697, + 0.851, + 0.822 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.14, + 0.843, + 0.593, + 0.859 + ], + "angle": 0, + "content": "Figure 1. Clustering method for simplification point cloud." + }, + { + "type": "image_footnote", + "bbox": [ + 0.139, + 0.86, + 0.854, + 0.929 + ], + "angle": 0, + "content": "The second solution used is based on neighborhood extraction. Before runtime, we extract all neighbors of a particle. We measure the distance between each particle. Some optimization can help to decrease complexity: we can estimate easily in our structure which particle is closer to another one (using the fact that particle grid is regular). After this," + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.97, + 0.518, + 0.98 + ], + "angle": 0, + "content": "4/10" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.141, + 0.066, + 0.845, + 0.168 + ], + "angle": 0, + "content": "we extract the main value of particles. We explore each neighbor of particles and we keep the most important. In some cases, the most important can be the high values, in other the low values and in other both of them. This solution is able to produce a low resolution model with the most important information structure. Several low resolution models are created by exploring deeper in neighborhood. Figures 2(a)-2(c) illustrate a neighbor, and two simplifications of this mesh." + }, + { + "type": "image", + "bbox": [ + 0.149, + 0.174, + 0.38, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.149, + 0.341, + 0.296, + 0.372 + ], + "angle": 0, + "content": "(a) Neighborhood cloud." + }, + { + "type": "image", + "bbox": [ + 0.384, + 0.174, + 0.613, + 0.338 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.384, + 0.341, + 0.532, + 0.373 + ], + "angle": 0, + "content": "(b) Simplification neighborhood of 1." + }, + { + "type": "image", + "bbox": [ + 0.619, + 0.174, + 0.848, + 0.339 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.619, + 0.341, + 0.766, + 0.373 + ], + "angle": 0, + "content": "(c) Simplification neighborhood of 2." + }, + { + "type": "image_caption", + "bbox": [ + 0.141, + 0.393, + 0.492, + 0.409 + ], + "angle": 0, + "content": "Figure 2. Neighbor method for simplification." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.41, + 0.854, + 0.493 + ], + "angle": 0, + "content": "Other methods were based on server instead of client. Client sent via TCP connection his viewpoint. The server recomputes the particles structure and recreates the entire structure. With this solution, it is possible to produce a point cloud resolution depending on hardware. Figure 3(a) presents particles rendering with a distance of 2 from the camera. Figure 3(b) is the decimation produced with a distance of 3 and Figure 3(c) is a distance of 1." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.507, + 0.857, + 0.643 + ], + "angle": 0, + "content": "Another method was based on Voronoi diffusion of temperature. The bandwidth for transmitting data is limited. We developed Voronoi temperature diffusion to solve this communication. In this approach, we update data using sphere expansion. Each time, we update particles depending on their distance from sensors. The more particles are distant from sensors the later they will be refreshed. This method sends only modified particles. The bandwidth is saved and the visualization gives a flow effect. Figure 4(a) represents values at time 0. At time 1, values of sensors change, 4(b). After time 2, we update a first range of particles 4(c) and finally the second range 4(d)." + }, + { + "type": "image", + "bbox": [ + 0.157, + 0.651, + 0.382, + 0.816 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.156, + 0.817, + 0.277, + 0.849 + ], + "angle": 0, + "content": "(a) Particles server \\((\\mathrm{D} = 2)\\)" + }, + { + "type": "image", + "bbox": [ + 0.389, + 0.651, + 0.614, + 0.816 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.389, + 0.818, + 0.577, + 0.849 + ], + "angle": 0, + "content": "(b) Particles produce server \\((\\mathrm{D} = 3)\\)" + }, + { + "type": "image", + "bbox": [ + 0.621, + 0.651, + 0.846, + 0.816 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.619, + 0.818, + 0.848, + 0.849 + ], + "angle": 0, + "content": "(c) Particles produce by server \\((\\mathrm{D} = 1)\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.141, + 0.868, + 0.595, + 0.884 + ], + "angle": 0, + "content": "Figure 3. Particle simplification using server and distance." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.97, + 0.516, + 0.98 + ], + "angle": 0, + "content": "5/10" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.151, + 0.068, + 0.323, + 0.194 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.15, + 0.196, + 0.325, + 0.22 + ], + "angle": 0, + "content": "(a) Particles and sensors (T = 0)." + }, + { + "type": "image", + "bbox": [ + 0.329, + 0.068, + 0.497, + 0.195 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.332, + 0.196, + 0.493, + 0.207 + ], + "angle": 0, + "content": "(b) Sensors update \\((\\mathrm{T} = 1)\\)." + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.069, + 0.671, + 0.195 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.517, + 0.196, + 0.658, + 0.208 + ], + "angle": 0, + "content": "(c) First range \\((\\mathrm{T} = 2)\\)." + }, + { + "type": "image", + "bbox": [ + 0.675, + 0.069, + 0.849, + 0.195 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.685, + 0.196, + 0.84, + 0.208 + ], + "angle": 0, + "content": "(d) Second range \\((\\mathrm{T} = 3)\\)." + }, + { + "type": "image_caption", + "bbox": [ + 0.14, + 0.238, + 0.496, + 0.255 + ], + "angle": 0, + "content": "Figure 4. Simplification using bandwidth size." + }, + { + "type": "title", + "bbox": [ + 0.142, + 0.288, + 0.491, + 0.308 + ], + "angle": 0, + "content": "4. EXPERIMENTAL RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.322, + 0.807, + 0.372 + ], + "angle": 0, + "content": "The data are extracted from two rooms of the IBM data center. Firstly, we present our method for rendering the room, and later we present our results using Level Of Detail methods." + }, + { + "type": "title", + "bbox": [ + 0.142, + 0.39, + 0.348, + 0.407 + ], + "angle": 0, + "content": "4.1 Data visualization" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.419, + 0.849, + 0.572 + ], + "angle": 0, + "content": "We want to visualize and manage the consumption of a data center. For the visualization, we want to use an IFC viewer. But the IFC model for GDC is not available yet. Data center extraction of the room space is for the moment done by hand. The room is empty and was represent by a simple shape a box with 4 meters length, 3 meters width and 2.5 meters height. We use point cloud visualization based on particle paradigm. We use the two rooms of the data center and we put the same number of particles (30000) and 35 sensors distributed on three layers at 1 meter; 2 meter and on the ground. We define high and low temperature regarding the real sensors value. Figure 5(a) presents temperature color scale, Figure 5(b) and Figure 5(c) present data center sensors." + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.585, + 0.854, + 0.756 + ], + "angle": 0, + "content": "The next step is to interpolate data from sensors. For this, we extract the sensor mesh. We use QHULL to produce a soup of tetrahedrons. Particles need to be located. We can determine which tetrahedron is the nearest, we extract the box hull of tetrahedron and we apply for each particle the norms of each tetrahedron face. If these rays cut three or more faces, then particle is inside the tetrahedron. With this method, we can determine exactly the location of each particles regarding to the tetrahedrons, a weight is given to them easily. It was used to apply a coefficient to the value of each vertex of tetrahedron. For the outside particles, another solution was used: Voronoi cells. This method is based on a discrete extraction of Voronoi cells. We use our own method because other method like Voro ++ or QHull extract Voronoi diagram in a continuous way." + }, + { + "type": "image", + "bbox": [ + 0.153, + 0.879, + 0.368, + 0.923 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.399, + 0.765, + 0.607, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.438, + 0.907, + 0.547, + 0.922 + ], + "angle": 0, + "content": "(b) Room one." + }, + { + "type": "image", + "bbox": [ + 0.645, + 0.766, + 0.851, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.692, + 0.907, + 0.8, + 0.922 + ], + "angle": 0, + "content": "(c) Room two." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.97, + 0.516, + 0.98 + ], + "angle": 0, + "content": "6/10" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.141, + 0.066, + 0.445, + 0.083 + ], + "angle": 0, + "content": "Figure 5. Data use to model the system." + }, + { + "type": "title", + "bbox": [ + 0.143, + 0.099, + 0.326, + 0.117 + ], + "angle": 0, + "content": "4.2 Level of details" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.127, + 0.859, + 0.368 + ], + "angle": 0, + "content": "In the earlier days of this project, first solution proposed gives a low frame rates, about 15 FPS (Frame Per Second): visualization was not in real-time (real-time is about 24 FPS). For solving this problem, we define a client server paradigm. This solution allows to produce a real-time rendering on the client. Figure ?? gives an example of LOD for particles. We use Openscenegraph \\(^{20}\\) as a 3D engine. It owns several features useful in LOD. A special object is defined to manage multi-resolution model. It calculates the distance of the object from the camera. For our experimentation we use five resolutions of mesh. The first mesh was the original mesh, it is set at 0 to 500. The next mesh was set at 500 to 1000, the next at 1000 to 1500 and the other at 1500 to 2000. These three meshes were constructed by specific LOD methods: clustering and significant vertices. Clustering defines a 3D grid inside the room. The size of each cell depends on the viewpoint location. The size of the cluster depends on the visibility of the clustered particles. First results are given Figure 6(a) and 6(b). Value of cluster is an average of clustered value. The number of points of the final mesh depends on the grid size. Table 1 shows the results at several distances." + }, + { + "type": "table", + "bbox": [ + 0.145, + 0.376, + 0.855, + 0.451 + ], + "angle": 0, + "content": "
D = 0 to 500D = 500 to 1000D = 1000 to 1500D = 1500 to 2000
C = X30000390024036
" + }, + { + "type": "image_caption", + "bbox": [ + 0.141, + 0.484, + 0.482, + 0.501 + ], + "angle": 0, + "content": "Table 1. Results of clustering simplification." + }, + { + "type": "image", + "bbox": [ + 0.159, + 0.51, + 0.501, + 0.706 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.236, + 0.706, + 0.425, + 0.722 + ], + "angle": 0, + "content": "(a) \\(\\mathrm{D} = 500\\) to 1000." + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.51, + 0.848, + 0.706 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.576, + 0.706, + 0.776, + 0.721 + ], + "angle": 0, + "content": "(b) \\(D = 1000\\) to 1500." + }, + { + "type": "image_caption", + "bbox": [ + 0.14, + 0.74, + 0.492, + 0.757 + ], + "angle": 0, + "content": "Figure 6. Clustering visualization algorithms." + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.757, + 0.844, + 0.86 + ], + "angle": 0, + "content": "Significant points method extracts the neighbors for each particle. We extract the highest and lowest temperatures, by exploring the neighborhood of a particle, in order to have significant vertices of the model. For the first step of simplified model we explore neighbor. For the second model, we explore neighbor and neighbor of neighbor, etc. This solution simplifies drastically the model. First results are given Figure ??-??. Table 2 shows the number of vertices at several distance." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.97, + 0.517, + 0.98 + ], + "angle": 0, + "content": "7/10" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.145, + 0.063, + 0.855, + 0.138 + ], + "angle": 0, + "content": "
D = 0 to 500D = 500 to 1000D = 1000 to 1500D = 1500 to 2000
C = X300002295045543524
" + }, + { + "type": "table_caption", + "bbox": [ + 0.14, + 0.171, + 0.474, + 0.187 + ], + "angle": 0, + "content": "Table 2. Results of neighbor simplification." + }, + { + "type": "image", + "bbox": [ + 0.154, + 0.196, + 0.497, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.228, + 0.403, + 0.422, + 0.42 + ], + "angle": 0, + "content": "(a) Neighborhood 1." + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.196, + 0.844, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.575, + 0.403, + 0.769, + 0.42 + ], + "angle": 0, + "content": "(b) Neighborhood 2." + }, + { + "type": "image_caption", + "bbox": [ + 0.14, + 0.441, + 0.607, + 0.459 + ], + "angle": 0, + "content": "Figure 7. Clustering visualization algorithms using neighbor." + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.471, + 0.845, + 0.556 + ], + "angle": 0, + "content": "The first server solution receives orders from client as presented Section 3.4. We calculate the viewpoint distance and we send data according to it. A new structure is recalculated if the camera is too far from the object. After the recomputing, we send the new data. This solution allows the user to receive more or less data according to its distance to the object. Table 3 shows some different resolutions produced with this method." + }, + { + "type": "table", + "bbox": [ + 0.145, + 0.567, + 0.855, + 0.641 + ], + "angle": 0, + "content": "
D = 0 to 500D = 500 to 1000D = 1000 to 1500D = 1500 to 2000
C = X1200003000075001875
" + }, + { + "type": "table_caption", + "bbox": [ + 0.14, + 0.675, + 0.425, + 0.689 + ], + "angle": 0, + "content": "Table 3. Several resolution of model." + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.692, + 0.854, + 0.776 + ], + "angle": 0, + "content": "Another solution is to use bandwidth latency. We send data at several times, we do not send the entire set of data but only modified particles. We send at first time the sensors data, and subsequently we send a range of data (the nearest). After few minutes, all data are sent. This solution gives good results, and simulates a thermal diffusion in the whole structure of particles. Figure 8(a)-8(c) illustrate this method." + }, + { + "type": "image", + "bbox": [ + 0.15, + 0.78, + 0.382, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.235, + 0.912, + 0.299, + 0.924 + ], + "angle": 0, + "content": "(a) \\(\\mathrm{T} = 0\\)" + }, + { + "type": "image", + "bbox": [ + 0.384, + 0.78, + 0.617, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.468, + 0.912, + 0.532, + 0.924 + ], + "angle": 0, + "content": "(b) \\(\\mathrm{T} = 1\\)" + }, + { + "type": "image", + "bbox": [ + 0.619, + 0.78, + 0.851, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.704, + 0.912, + 0.766, + 0.924 + ], + "angle": 0, + "content": "(c) \\(\\mathrm{T} = 4\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.97, + 0.516, + 0.98 + ], + "angle": 0, + "content": "8/10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.141, + 0.066, + 0.415, + 0.082 + ], + "angle": 0, + "content": "Figure 8. Bandwidth simplification." + }, + { + "type": "title", + "bbox": [ + 0.143, + 0.115, + 0.341, + 0.136 + ], + "angle": 0, + "content": "5. CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.15, + 0.851, + 0.252 + ], + "angle": 0, + "content": "In this paper, we have presented a method to visualize sensors data extracted from a Green Data Center. This approach produces interpolation visualization for managing and visualizing data. This interpolation used a Delaunay triangulation and a cell extraction based on Voronoi. An unusual way of use particles helps to process data. First results present the solution proposed to visualize the inside of a GDC space. The second results proposed in this paper aim to improve the rendering." + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.253, + 0.854, + 0.372 + ], + "angle": 0, + "content": "For this, first step introduces a client/server protocol a second step illustrates methods to simplify the model. With these different approaches we improve the rendering time, preserving most important data are kept. In future works, we will work on data \"dressing\". We want to find a way to improve rendering of the scene using meatballs or marching cube algorithms. A main constraint of this work is real-time computation. Future work also concern to add rooms to the visualization. At present, we only visualize a single room. We want to visualize building, and complex form, by using an IFC loader." + }, + { + "type": "title", + "bbox": [ + 0.143, + 0.404, + 0.421, + 0.424 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.439, + 0.838, + 0.507 + ], + "angle": 0, + "content": "We want to thanks the PSSC (Products and Solutions Support Center) team of IBM Montpellier for having provided the necessary equipment and data need for this experimentation. And we thank the FUI (Fonds Unique Interministriel) for their financial support." + }, + { + "type": "title", + "bbox": [ + 0.143, + 0.54, + 0.311, + 0.559 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.574, + 0.744, + 0.606 + ], + "angle": 0, + "content": "[1] Clark, J. H., \"Hierarchical geometric models for visible surface algorithms,\" Communications of the ACM 19(10), 547-554 (1976)." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.609, + 0.81, + 0.641 + ], + "angle": 0, + "content": "[2] Damon, M., Kameyama, M., Knox, M., Porter, D., Yuen, D., and Sevre, E., \"Interactive visualization of 3d mantle convection,\" Visual Geosciences (2008)." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.643, + 0.831, + 0.675 + ], + "angle": 0, + "content": "[3] Jordan, K. E., Yuen, D. A., Reuteler, D. M., Zhang, S., and Haimes, R., \"Parallel interactive visualization of 3d mantle convection,\" IEEE Comput. Sci. Eng. 3(4), 29-37 (1996)." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.677, + 0.844, + 0.71 + ], + "angle": 0, + "content": "[4] Reeves, W. T., \"Particle systems - a technique for modeling a class of fuzzy objects,\" ACM Transactions on Graphics 2, 359-376 (1983)." + }, + { + "type": "ref_text", + "bbox": [ + 0.144, + 0.712, + 0.571, + 0.727 + ], + "angle": 0, + "content": "[5] Latta, L., \"Building a million particle system,\" (2004)." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.728, + 0.783, + 0.761 + ], + "angle": 0, + "content": "[6] Kapferer, W. and Riser, T., \"Visualization needs and techniques for astrophysical simulations,\" New Journal of Physics 10(12), 125008 (15pp) (2008)." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.763, + 0.837, + 0.794 + ], + "angle": 0, + "content": "[7] Schroeder, W. J., Zarge, J. A., and Lorensen, W. E., \"Decimation of triangle meshes,\" 65-70 (1992)." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.797, + 0.696, + 0.812 + ], + "angle": 0, + "content": "[8] Luebke, D., \"A survey of polygonal simplification algorithms,\" (1997)." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.814, + 0.854, + 0.847 + ], + "angle": 0, + "content": "[9] He, T., Hong, L., Kaufman, A., Varshney, A., and Wang, S., \"Voxel based object simplification,\" in [Proc. SIGGRAPH Symposium on Interactive 3D Graphics], 296-303 (1995)." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.848, + 0.776, + 0.88 + ], + "angle": 0, + "content": "[10] Lorensen, W. E. and Cline, H. E., \"Marching cubes: A high resolution 3d surface construction algorithm,\" SIGGRAPH Comput. Graph. 21(4), 163-169 (1987)." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.882, + 0.791, + 0.914 + ], + "angle": 0, + "content": "[11] Pauly, M., Gross, M., and Kobbelt, L. P., \"Efficient simplification of point-sampled surfaces,\" (2002)." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.916, + 0.844, + 0.932 + ], + "angle": 0, + "content": "[12] Moenning, C., , Moenning, C., and Dodgson, N. A., \"Intrinsic point cloud simplification,\"" + }, + { + "type": "list", + "bbox": [ + 0.142, + 0.574, + 0.854, + 0.932 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.97, + 0.516, + 0.98 + ], + "angle": 0, + "content": "9/10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.066, + 0.195, + 0.08 + ], + "angle": 0, + "content": "(2004)." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.083, + 0.798, + 0.132 + ], + "angle": 0, + "content": "[13] Song, H. and Feng, H.-Y., \"A progressive point cloud simplification algorithm with preserved sharp edge data,\" The International Journal of Advanced Manufacturing Technology 45, 583-592 (November 2009)." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.134, + 0.841, + 0.166 + ], + "angle": 0, + "content": "[14] Buschmann, C., Pfisterer, D., Fischer, S., Fekete, S. P., and Kröller, A., \"Spyglass: a wireless sensor network visualizer,\" SIGBED Rev. 2(1), 1-6 (2005)." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.168, + 0.798, + 0.2 + ], + "angle": 0, + "content": "[15] Avis, D. and Bhattacharya, B., \"Algorithms for computing d-dimensional voronoi diagrams and their duals,\" 1, 159-180 (1983)." + }, + { + "type": "ref_text", + "bbox": [ + 0.144, + 0.202, + 0.851, + 0.235 + ], + "angle": 0, + "content": "[16] Rycroft, C. H., \"Voro++: a three-dimensional voronoi cell library in \\( c++ \\),\" Chaos 19 (2009). Lawrence Berkeley National Laboratory." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.237, + 0.819, + 0.268 + ], + "angle": 0, + "content": "[17] Barber, C. B., Dobkin, D. P., and Huhdanpaa, H., \"The quickhull algorithm for convex hulls,\" ACM Trans. Math. Softw. 22(4), 469-483 (1996)." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.271, + 0.773, + 0.303 + ], + "angle": 0, + "content": "[18] Snyder, J. M. and Barr, A. H., \"Ray tracing complex models containing surface tessellations,\" SIGGRAPH Comput. Graph. 21(4), 119-128 (1987)." + }, + { + "type": "ref_text", + "bbox": [ + 0.144, + 0.305, + 0.836, + 0.336 + ], + "angle": 0, + "content": "[19] Hoppe, H., \"Progressive meshes. computer graphics,\" SIGGRAPH96 Proceedings, 99108 (1996)." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.339, + 0.774, + 0.372 + ], + "angle": 0, + "content": "[20] Burns, D. and Osfield, R., \"Open scene graph a: Introduction, b: Examples and applications,\" 265 (2004)." + }, + { + "type": "list", + "bbox": [ + 0.143, + 0.066, + 0.851, + 0.372 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.167, + 0.394, + 0.39, + 0.409 + ], + "angle": 0, + "content": "Further author information:" + }, + { + "type": "text", + "bbox": [ + 0.167, + 0.412, + 0.482, + 0.427 + ], + "angle": 0, + "content": "Lange B.: E-mail: benoit.lange@lirmm.fr" + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.429, + 0.544, + 0.444 + ], + "angle": 0, + "content": "Rodriguez N.: E-mail: nancy.rodriguez@lirmm.fr" + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.446, + 0.498, + 0.46 + ], + "angle": 0, + "content": "Puech W.: E-mail: william.puech@lirmm.fr" + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.463, + 0.468, + 0.478 + ], + "angle": 0, + "content": "Rey H.: E-mail:REYHERVE@fr.ibm.com" + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.48, + 0.529, + 0.495 + ], + "angle": 0, + "content": "Vasques X.: E-mail: xaviervasques@fr.ibm.com" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.97, + 0.519, + 0.98 + ], + "angle": 0, + "content": "10/10" + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09198/ef3c6a72-d844-464a-90c5-502ddc16df65_origin.pdf b/data/2025/2503_09xxx/2503.09198/ef3c6a72-d844-464a-90c5-502ddc16df65_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b18d64ddc3337469734d990e27fd93b7953a6ad3 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/ef3c6a72-d844-464a-90c5-502ddc16df65_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ccb0b38b36e023702ff1726eaa2bd337a2762c59724e11e0f0dfcd4402cebf8 +size 964405 diff --git a/data/2025/2503_09xxx/2503.09198/full.md b/data/2025/2503_09xxx/2503.09198/full.md new file mode 100644 index 0000000000000000000000000000000000000000..5f43bb263267fc8297fe4185d5003b9afd722dc6 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/full.md @@ -0,0 +1,222 @@ +# A 3D particle visualization system for temperature management + +Lange B. $^{a}$ , Rodriguez N. $^{a}$ , Puech W. $^{a}$ , Rey H. $^{b}$ and Vasques X. $^{b}$ + +$^{a}$ LIRMM, 141 rue ADA, Montpellier, France; + +b IBM, Rue de la vieille poste, Montpellier, + +France + +# Abstract + +This paper deals with a 3D visualization technique proposed to analyze and manage energy efficiency from a data center. Data are extracted from sensors located in the IBM Green Data Center in Montpellier France. These sensors measure different information such as hygrometry, pressure and temperature. We want to visualize in real-time the large among of data produced by these sensors. A visualization engine has been designed, based on particles system and a client server paradigm. In order to solve performance problems, a Level Of Detail solution has been developed. These methods are based on the earlier work introduced by J. Clark in $1976^{1}$ . In this paper we introduce a particle method used for this work and subsequently we explain different simplification methods applied to improve our solution. + +Keywords: 3D Visualization, Sensors, Particles, Client/Server, Level Of Details + +# 1. INTRODUCTION + +In this paper, we present a method to produce a 3D visualization for analyzing and managing temperature. Data are extracted from sensors located in the IBM Green Data Center in Montpellier, which provides many different types of information like temperature, pressure or hygrometry. In our system, sensors are placed in a virtual room and the internal space is modeled using particles. The main constraint here is to produce a real-time rendering. However, latency appears du to the number of vertices. In this paper, we use a solution called LOD (Level Of Detail) to produce multi resolution 3D objects. This solution has been introduced in 1976 by J. Clark1. In this paper, J. Clark introduces the use of several mesh resolutions to simplify the 3D scene complexity. In our work, we use various simplification methods to provide interactive rendering and allows rendering the most important part of data extracted from sensors. In this paper, we describe how we create a room, and the methods used to produce different resolution visualization. In Section 2, we introduce related work on particles systems and LOD. In Section 3, we expose our solution to simplify particles system. In Section 4 we give some results and finally, in Section 5 we present our conclusions and future work. + +# 2. RELATED WORK + +In this section we present several previous works concerning data visualization, particle systems and level of detail methods. + +Some previous work present solutions to visualize large data flow extracted from mantle convection. M. Damon et al. $^{2}$ and K. E. Jordan et al. $^{3}$ present interactive viewers for this kind of data. These data are computed by using Hight Performance Computing (HPC) and visualized on a large display. The rendering is calculated by using another HPC. The data flow is very important and a real-time 3D simulation is hard to obtain. W. Kapfer and + +T. Riser6 introduce how to use particle system to visualize astronomic simulation, particles representing space objects. The number of particles is extremely important for computing motion in real-time. GPU computing is preferred to render instead of a common HPC solution. To display their data, they have developed their own 3D graphical engine. The space objects are represented by point sprite instead of sphere. Lights are used to give a spherical aspect to the point sprite. This solution allows to render more stars than spherical object method. The 3D engine provides different rendering methods to group space objects: cell simplification or extraction of isosurface. The use of GPU seems quite well for a particle solution, parallel processing allows to render large data; the astrological data seems to be well suited. + +In 1976, J. Clark introduces Level Of Detail (LOD) concept1. LOD consists of several resolution meshes for using them at different distances from the camera. Firstly, designer produces these meshes. First algorithms, in 1992 Schroeder et al. developed a method by decimation for simplify the mesh7. It analyses mesh geometry and evaluates the complexity of triangles. Vertices are removed if only constraints set by the user are respected. Vertices are removed and gaps are filled using triangulation. These algorithms of simplification are not enough to simplify mesh efficiently because shape is not always totally respected. D. Luebke, in 1997, has proposed a taxonomy of mesh simplification8. He presented the most used algorithms. He extracted different ways to use each algorithm. But in this paper, only one solution works with volumetric mesh9. T. He et al. propose a method based on voxel simplification by using a grid for clustering voxels. A marching cube10 algorithm was applied to produce a surface mesh. But this simplification algorithm did not preserve the shape of the mesh. In our work, we look for point cloud simplification. Indeed, previous methods which deal with simplification for surface point cloud like11-13 are not adapted to our case. All of these methods produce LOD for surface mesh and point cloud is extracted from scanner. + +# 3. PROPOSED APPROACH + +This section presents the different methods that are used to visualize a kind of data from Green Data Center (GDC). The main goal is to be able to visualize in real-time the evolution of temperature in the data center. For this, we use a special particle method. Particles are located using a segmentation algorithm based on Voronoi cell extraction and Delaunay triangulation. The latency due to the large flow of particles is avoided by using a client server paradigm. We improve our solution by using LOD methods to simplify rendering. + +# 3.1 Particle systems + +Rooms are the bases of our study. For modeling a room, we extract the shape of the space representation which is composed by a box with three measures: length $(l \in \mathbb{R})$ , width $(w \in \mathbb{R})$ , height $(h \in \mathbb{R})$ . Sensors are represented by $S = \{\mathrm{S}_1, \dots, \mathrm{S}_M\}$ , where $M$ is the number of sensors. Sensors $\mathrm{S}_i (\mathrm{i} \in \{1, \dots, M\})$ are placed on the space on a layer $\mathbf{L} \in \mathbb{N}$ and have a location represented by: $\{\mathbf{X}_i, \mathbf{Y}_i, \mathbf{L}_j\}$ with $\mathbf{X}_i \in \mathbb{R}$ , $\mathbf{Y}_i \in \mathbb{R}$ and $j$ is the layer used. For modeling the space inside a room, we use a particle system instead of 2D map representations which have some lacks. $^{14}$ Actually 2D map does not allow having a real visualization of space. A particle visualization gives a better efficiency for modeling space. We use a large number of particles to represent the entire space. $\mathbf{N} \in \mathbb{N}$ represents the number of particles in the room. It can be calculated using: + +$$ +N = \frac {\left(\left(l + 1\right) \times (h + 1) \times (w + 1)\right)}{\delta^ {3}} \tag {1} +$$ + +where $\delta \in \mathbb{R}$ is the space between particles. The particle grid is regular. In this model, three layers of temperature sensors compose rooms. They are defined according to their real locations in the data center. Figure ?? presents the different layers of sensors in the data center. + +Particles carry information, and flow motion can be simulated if needed by changing the value of particles and the computational cost is inferior. + +# 3.2 Segmentation algorithms + +In our solution, each sensors has an influence on surrounding particules. To calculate the set of particles in the sensor range, we use two methods: Voronoi cells extraction and Delaunay triangulation. + +Voronoi cells is a method to extract a partition of space $^{15}$ . This method is available for $\phi$ dimensions where $\phi \in [1, +\infty]$ , but most of implementations are done in 2D. Tools for extracting 3D Voronoi diagrams exist: Voro++ and QHull but particles are discrete and these solutions are not suitable because they extract Voronoi diagram in a continuous way. Then we designed our own method based on sphere expansion. We search nearest sensors for each particle. This part allows to weight particles outside the sensors mesh. A second method to weight the interior of the sensors mesh is used. We extract the mesh tetrahedron of sensors using the Delaunay triangulation implemented in QHull. This method was used to analyze the location of particle. We compute the exact location using ray tracing on the soup of tetrahedron. First, we search the nearest particles inside the hull of each tetrahedron. We extract the normal of each face of tetrahedron and we apply these normals on each particle. If the ray cuts three faces or more, the particle is inside the tetrahedron. This method is cost expensive and done in preprocessing. Moreover, particles are static and position didn't need to be update. + +# 3.3 Client server paradigm + +To improve computation, a client server paradigm is used. We define a low cost communication protocol to transfer data from a server to a client. Server computes the modification of particles and the client displays the results. This protocol works in five steps. These steps are: sending header, sending sensor data, sending particle data, sending footer and receiving acknowledgment/language command from client. At each step, the server waits the acknowledgment from the client. We develop two ways to send data. The first sends the entire point cloud (sensors and particles). The biggest problem of this method is the + +transmission of data. Sensors are sent with their coordinates and their value. We encode these data in bit words. For the particles data, the same method was used. The footer was sent for closing the communication. The second method is used to reduce efficiently the communication cost. We only send modified sensors and particles. The id and the new value is sent instead of coordinates. The last step is the command sent by the client. It allows the user to interact with the server. We use it to modify the camera viewpoint. + +# 3.4 Level of detail for particles + +Level of detail (LOD) is one of the most important methods in computer graphics. It allows to solve rendering problems or performance problems. This method consists by producing several resolution of a 3D object. In our works, we use some features to define the object resolution: hardware and viewpoint. Hardware and viewpoint do not need the same data structure and we need to recompute it for each modification of the viewpoint or when hardware changes. LOD was defined by two problems statement. The first one uses a sample of original points, the second one uses a new point data set. In this part, we define six methods to produce LOD. The four first methods are for the client, the other are for the server. + +# Problems statement: + +For this two approaches, we have a set $\omega$ of Vertices $V$ , $V = \{V_1, \ldots, V_\omega\}$ . Each vertex is defined in $\mathbb{R}^3$ . Simplify a mesh using a sample vertex means $\omega > \omega 2$ , where $\omega 2$ is the size of the second data set. For approach 1, we obtain a new object $\mathrm{V}2 = \{\mathrm{V}2_1, \ldots, \mathrm{V}2_\omega\}$ with fewer points than V but V 2 is a subset of V. For approach 2, we obtain a new object $\mathrm{V}3 = \{\mathrm{V}3_1, \ldots, \mathrm{V}3_\omega\}$ with fewer points than V but each point in V 3 is a new vertex. + +In Section 2 we have presented methods to produce simplification. A few were designed for volumetric simplification. In this section, we propose several methods to produce different volumetric simplifications on our client. We develop four approaches to simplify 3D objects: clustering, neighbor simplification and two approaches based on server. Clustering method was based on He et al. $^{9}$ works, it consists of clustering particles using a 3D grid. Cells sizes of grid are set depending to the viewpoint of the camera. Clusters were being weight with the average of the different values of particles. The position is the barycenter of these particles. Figures 1(a)-1(e) give some examples of simplification using clustering solution. Figure 1(a) present the original point of cloud mesh. Figure + +1(b) and 1(d) give two different methods for clustering. And finally, Figure 1(c) and 1(e) give the results of clustering methods. + +Figure 1. Clustering method for simplification point cloud. +![](images/ae6cbd294176dc493b50c73240c504ea8c9fc09c9e3151fba6f0be709b525025.jpg) +The second solution used is based on neighborhood extraction. Before runtime, we extract all neighbors of a particle. We measure the distance between each particle. Some optimization can help to decrease complexity: we can estimate easily in our structure which particle is closer to another one (using the fact that particle grid is regular). After this, + +we extract the main value of particles. We explore each neighbor of particles and we keep the most important. In some cases, the most important can be the high values, in other the low values and in other both of them. This solution is able to produce a low resolution model with the most important information structure. Several low resolution models are created by exploring deeper in neighborhood. Figures 2(a)-2(c) illustrate a neighbor, and two simplifications of this mesh. + +![](images/23944f9963b290146f2ac445e8970545bcf00b2949e999067a13468653d749cc.jpg) +(a) Neighborhood cloud. + +![](images/0e41ba0aeddc8af5f2b6664c64abea63f9b32bfd4692c38f916d07f501d537e5.jpg) +(b) Simplification neighborhood of 1. + +![](images/c01c12cfcf72e187db35ff49ad4d5edb57331b8d93efa4c1f20941ebf580a3d7.jpg) +(c) Simplification neighborhood of 2. +Figure 2. Neighbor method for simplification. + +Other methods were based on server instead of client. Client sent via TCP connection his viewpoint. The server recomputes the particles structure and recreates the entire structure. With this solution, it is possible to produce a point cloud resolution depending on hardware. Figure 3(a) presents particles rendering with a distance of 2 from the camera. Figure 3(b) is the decimation produced with a distance of 3 and Figure 3(c) is a distance of 1. + +Another method was based on Voronoi diffusion of temperature. The bandwidth for transmitting data is limited. We developed Voronoi temperature diffusion to solve this communication. In this approach, we update data using sphere expansion. Each time, we update particles depending on their distance from sensors. The more particles are distant from sensors the later they will be refreshed. This method sends only modified particles. The bandwidth is saved and the visualization gives a flow effect. Figure 4(a) represents values at time 0. At time 1, values of sensors change, 4(b). After time 2, we update a first range of particles 4(c) and finally the second range 4(d). + +![](images/7eac2440a2fedd66d943c82b390c3a9df5d95d019d523a29c7a3e2f9c7218928.jpg) +(a) Particles server $(\mathrm{D} = 2)$ +Figure 3. Particle simplification using server and distance. + +![](images/c4777a0507eee1c1beaa13c04e351827098bc588e6ced9d8de6cff01430e3df5.jpg) +(b) Particles produce server $(\mathrm{D} = 3)$ + +![](images/5dabdfa6b0129921b2abb27785be9608573a25f26e4646d20c6b92250fad1414.jpg) +(c) Particles produce by server $(\mathrm{D} = 1)$ + +![](images/e340d9807d4ed601eee3bd74351618a9adfda026a8dbd720f9d2aa40857dad26.jpg) +(a) Particles and sensors (T = 0). + +![](images/92c09c0b767ea453098777786fb815e8c2f833a8abc790807397f42e9a6f6887.jpg) +(b) Sensors update $(\mathrm{T} = 1)$ . + +![](images/416dbc943d118d6834e80c8c5ad759b34f940700b96fd0b87bb23c082f10cf10.jpg) +(c) First range $(\mathrm{T} = 2)$ . +Figure 4. Simplification using bandwidth size. + +![](images/a79e8f2104ab8c32df4137808fc6b4ce9b7465bfa15c2080bd3aabb2c58986d0.jpg) +(d) Second range $(\mathrm{T} = 3)$ . + +# 4. EXPERIMENTAL RESULTS + +The data are extracted from two rooms of the IBM data center. Firstly, we present our method for rendering the room, and later we present our results using Level Of Detail methods. + +# 4.1 Data visualization + +We want to visualize and manage the consumption of a data center. For the visualization, we want to use an IFC viewer. But the IFC model for GDC is not available yet. Data center extraction of the room space is for the moment done by hand. The room is empty and was represent by a simple shape a box with 4 meters length, 3 meters width and 2.5 meters height. We use point cloud visualization based on particle paradigm. We use the two rooms of the data center and we put the same number of particles (30000) and 35 sensors distributed on three layers at 1 meter; 2 meter and on the ground. We define high and low temperature regarding the real sensors value. Figure 5(a) presents temperature color scale, Figure 5(b) and Figure 5(c) present data center sensors. + +The next step is to interpolate data from sensors. For this, we extract the sensor mesh. We use QHULL to produce a soup of tetrahedrons. Particles need to be located. We can determine which tetrahedron is the nearest, we extract the box hull of tetrahedron and we apply for each particle the norms of each tetrahedron face. If these rays cut three or more faces, then particle is inside the tetrahedron. With this method, we can determine exactly the location of each particles regarding to the tetrahedrons, a weight is given to them easily. It was used to apply a coefficient to the value of each vertex of tetrahedron. For the outside particles, another solution was used: Voronoi cells. This method is based on a discrete extraction of Voronoi cells. We use our own method because other method like Voro ++ or QHull extract Voronoi diagram in a continuous way. + +![](images/7c729f1706460b0bf0adde7a8dbd071f9ae77f611ef3f85b3d951fb62795016c.jpg) + +![](images/8c16ad7a59f34e5974a4322466cec0149f8905a1a1b1cfadff910a3fec50d004.jpg) +(b) Room one. + +![](images/2eb0e7b79700cbbd72a8a244992163f567dbbd68f9d3db6bad3e5808cda4dabb.jpg) +(c) Room two. + +# 4.2 Level of details + +In the earlier days of this project, first solution proposed gives a low frame rates, about 15 FPS (Frame Per Second): visualization was not in real-time (real-time is about 24 FPS). For solving this problem, we define a client server paradigm. This solution allows to produce a real-time rendering on the client. Figure ?? gives an example of LOD for particles. We use Openscenegraph $^{20}$ as a 3D engine. It owns several features useful in LOD. A special object is defined to manage multi-resolution model. It calculates the distance of the object from the camera. For our experimentation we use five resolutions of mesh. The first mesh was the original mesh, it is set at 0 to 500. The next mesh was set at 500 to 1000, the next at 1000 to 1500 and the other at 1500 to 2000. These three meshes were constructed by specific LOD methods: clustering and significant vertices. Clustering defines a 3D grid inside the room. The size of each cell depends on the viewpoint location. The size of the cluster depends on the visibility of the clustered particles. First results are given Figure 6(a) and 6(b). Value of cluster is an average of clustered value. The number of points of the final mesh depends on the grid size. Table 1 shows the results at several distances. + +
D = 0 to 500D = 500 to 1000D = 1000 to 1500D = 1500 to 2000
C = X30000390024036
+ +![](images/9fbe1142edda7eab13f0abb9592b84e63d1611257e7426d7d569a68b9df82b38.jpg) +Figure 5. Data use to model the system. +(a) $\mathrm{D} = 500$ to 1000. +Figure 6. Clustering visualization algorithms. + +![](images/69ae709df8988787ff641b5b6eb5a608ca0636bba7110a3a4735064168d117da.jpg) +Table 1. Results of clustering simplification. +(b) $D = 1000$ to 1500. + +Significant points method extracts the neighbors for each particle. We extract the highest and lowest temperatures, by exploring the neighborhood of a particle, in order to have significant vertices of the model. For the first step of simplified model we explore neighbor. For the second model, we explore neighbor and neighbor of neighbor, etc. This solution simplifies drastically the model. First results are given Figure ??-??. Table 2 shows the number of vertices at several distance. + +
D = 0 to 500D = 500 to 1000D = 1000 to 1500D = 1500 to 2000
C = X300002295045543524
+ +![](images/4050e8f862ac40be7bc6d5c239997192325936e244911b8d69fe60a4ab8810b8.jpg) +(a) Neighborhood 1. +Figure 7. Clustering visualization algorithms using neighbor. + +![](images/dafb2130c8265a91a7faed53db3de1181cd524770896acda68d5a953df004f87.jpg) +(b) Neighborhood 2. + +The first server solution receives orders from client as presented Section 3.4. We calculate the viewpoint distance and we send data according to it. A new structure is recalculated if the camera is too far from the object. After the recomputing, we send the new data. This solution allows the user to receive more or less data according to its distance to the object. Table 3 shows some different resolutions produced with this method. + +Table 2. Results of neighbor simplification. + +
D = 0 to 500D = 500 to 1000D = 1000 to 1500D = 1500 to 2000
C = X1200003000075001875
+ +Table 3. Several resolution of model. + +Another solution is to use bandwidth latency. We send data at several times, we do not send the entire set of data but only modified particles. We send at first time the sensors data, and subsequently we send a range of data (the nearest). After few minutes, all data are sent. This solution gives good results, and simulates a thermal diffusion in the whole structure of particles. Figure 8(a)-8(c) illustrate this method. + +![](images/2fcee6efa3b1d3c44144e2639f3857a15c4079f838c4f9132953fd427a8b1be9.jpg) +(a) $\mathrm{T} = 0$ + +![](images/b2be70bd4cc7d261ac38e8bc653a0c09c537d49d2a5612ed5c067058a98ef463.jpg) +(b) $\mathrm{T} = 1$ + +![](images/0d4e22f04d3ea25761125c93fc6d70fe856bf840ef353c043f32d21c9633724e.jpg) +(c) $\mathrm{T} = 4$ + +# 5. CONCLUSION + +In this paper, we have presented a method to visualize sensors data extracted from a Green Data Center. This approach produces interpolation visualization for managing and visualizing data. This interpolation used a Delaunay triangulation and a cell extraction based on Voronoi. An unusual way of use particles helps to process data. First results present the solution proposed to visualize the inside of a GDC space. The second results proposed in this paper aim to improve the rendering. + +For this, first step introduces a client/server protocol a second step illustrates methods to simplify the model. With these different approaches we improve the rendering time, preserving most important data are kept. In future works, we will work on data "dressing". We want to find a way to improve rendering of the scene using meatballs or marching cube algorithms. A main constraint of this work is real-time computation. Future work also concern to add rooms to the visualization. At present, we only visualize a single room. We want to visualize building, and complex form, by using an IFC loader. + +# ACKNOWLEDGMENTS + +We want to thanks the PSSC (Products and Solutions Support Center) team of IBM Montpellier for having provided the necessary equipment and data need for this experimentation. And we thank the FUI (Fonds Unique Interministriel) for their financial support. + +# REFERENCES + +[1] Clark, J. H., "Hierarchical geometric models for visible surface algorithms," Communications of the ACM 19(10), 547-554 (1976). +[2] Damon, M., Kameyama, M., Knox, M., Porter, D., Yuen, D., and Sevre, E., "Interactive visualization of 3d mantle convection," Visual Geosciences (2008). +[3] Jordan, K. E., Yuen, D. A., Reuteler, D. M., Zhang, S., and Haimes, R., "Parallel interactive visualization of 3d mantle convection," IEEE Comput. Sci. Eng. 3(4), 29-37 (1996). +[4] Reeves, W. T., "Particle systems - a technique for modeling a class of fuzzy objects," ACM Transactions on Graphics 2, 359-376 (1983). +[5] Latta, L., "Building a million particle system," (2004). +[6] Kapferer, W. and Riser, T., "Visualization needs and techniques for astrophysical simulations," New Journal of Physics 10(12), 125008 (15pp) (2008). +[7] Schroeder, W. J., Zarge, J. A., and Lorensen, W. E., "Decimation of triangle meshes," 65-70 (1992). +[8] Luebke, D., "A survey of polygonal simplification algorithms," (1997). +[9] He, T., Hong, L., Kaufman, A., Varshney, A., and Wang, S., "Voxel based object simplification," in [Proc. SIGGRAPH Symposium on Interactive 3D Graphics], 296-303 (1995). +[10] Lorensen, W. E. and Cline, H. E., "Marching cubes: A high resolution 3d surface construction algorithm," SIGGRAPH Comput. Graph. 21(4), 163-169 (1987). +[11] Pauly, M., Gross, M., and Kobbelt, L. P., "Efficient simplification of point-sampled surfaces," (2002). +[12] Moenning, C., , Moenning, C., and Dodgson, N. A., "Intrinsic point cloud simplification," + +(2004). +[13] Song, H. and Feng, H.-Y., "A progressive point cloud simplification algorithm with preserved sharp edge data," The International Journal of Advanced Manufacturing Technology 45, 583-592 (November 2009). +[14] Buschmann, C., Pfisterer, D., Fischer, S., Fekete, S. P., and Kröller, A., "Spyglass: a wireless sensor network visualizer," SIGBED Rev. 2(1), 1-6 (2005). +[15] Avis, D. and Bhattacharya, B., "Algorithms for computing d-dimensional voronoi diagrams and their duals," 1, 159-180 (1983). +[16] Rycroft, C. H., "Voro++: a three-dimensional voronoi cell library in $c++$ ," Chaos 19 (2009). Lawrence Berkeley National Laboratory. +[17] Barber, C. B., Dobkin, D. P., and Huhdanpaa, H., "The quickhull algorithm for convex hulls," ACM Trans. Math. Softw. 22(4), 469-483 (1996). +[18] Snyder, J. M. and Barr, A. H., "Ray tracing complex models containing surface tessellations," SIGGRAPH Comput. Graph. 21(4), 119-128 (1987). +[19] Hoppe, H., "Progressive meshes. computer graphics," SIGGRAPH96 Proceedings, 99108 (1996). +[20] Burns, D. and Osfield, R., "Open scene graph a: Introduction, b: Examples and applications," 265 (2004). + +Further author information: + +Lange B.: E-mail: benoit.lange@lirmm.fr + +Rodriguez N.: E-mail: nancy.rodriguez@lirmm.fr + +Puech W.: E-mail: william.puech@lirmm.fr + +Rey H.: E-mail:REYHERVE@fr.ibm.com + +Vasques X.: E-mail: xaviervasques@fr.ibm.com \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09198/images/0d4e22f04d3ea25761125c93fc6d70fe856bf840ef353c043f32d21c9633724e.jpg b/data/2025/2503_09xxx/2503.09198/images/0d4e22f04d3ea25761125c93fc6d70fe856bf840ef353c043f32d21c9633724e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5def2780305cf01e1e03ab140e1d88418bc6b996 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/0d4e22f04d3ea25761125c93fc6d70fe856bf840ef353c043f32d21c9633724e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf7e340500a490b65d2688bf223d909c6737c0004e03a8c25c3268252594ecc7 +size 34741 diff --git a/data/2025/2503_09xxx/2503.09198/images/0e41ba0aeddc8af5f2b6664c64abea63f9b32bfd4692c38f916d07f501d537e5.jpg b/data/2025/2503_09xxx/2503.09198/images/0e41ba0aeddc8af5f2b6664c64abea63f9b32bfd4692c38f916d07f501d537e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a5b03811a6490ded134d68456e16d767d9ffc6df --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/0e41ba0aeddc8af5f2b6664c64abea63f9b32bfd4692c38f916d07f501d537e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3efd0355d2dac805df8cba18a192cb15f554a0a3792f6fcc95d009a756e1df36 +size 17157 diff --git a/data/2025/2503_09xxx/2503.09198/images/23944f9963b290146f2ac445e8970545bcf00b2949e999067a13468653d749cc.jpg b/data/2025/2503_09xxx/2503.09198/images/23944f9963b290146f2ac445e8970545bcf00b2949e999067a13468653d749cc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bdebce89971d9fc9c9cd7e6d88699704c53e1a10 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/23944f9963b290146f2ac445e8970545bcf00b2949e999067a13468653d749cc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad59ce52b1339fa2422de7b607779ed92905c68ed504166a359c043e65068066 +size 24279 diff --git a/data/2025/2503_09xxx/2503.09198/images/2eb0e7b79700cbbd72a8a244992163f567dbbd68f9d3db6bad3e5808cda4dabb.jpg b/data/2025/2503_09xxx/2503.09198/images/2eb0e7b79700cbbd72a8a244992163f567dbbd68f9d3db6bad3e5808cda4dabb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a87748e4921ac1b401ce8f5fb83c6450663f2e23 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/2eb0e7b79700cbbd72a8a244992163f567dbbd68f9d3db6bad3e5808cda4dabb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0cc5b8716be148a81386a4a3491760489d52969532406c2b3ce187c298874b6 +size 12151 diff --git a/data/2025/2503_09xxx/2503.09198/images/2fcee6efa3b1d3c44144e2639f3857a15c4079f838c4f9132953fd427a8b1be9.jpg b/data/2025/2503_09xxx/2503.09198/images/2fcee6efa3b1d3c44144e2639f3857a15c4079f838c4f9132953fd427a8b1be9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e277368f0ed42af13eac13d0fde138ab5e9054e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/2fcee6efa3b1d3c44144e2639f3857a15c4079f838c4f9132953fd427a8b1be9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:400d3a13ccb2dd73e84139b0b82ce35113a3fa7de376702bbc830c07781089f1 +size 34831 diff --git a/data/2025/2503_09xxx/2503.09198/images/389ef0f63a90171c14db5cd9a925792e376222c8c56a5801cacb3dcc99360c96.jpg b/data/2025/2503_09xxx/2503.09198/images/389ef0f63a90171c14db5cd9a925792e376222c8c56a5801cacb3dcc99360c96.jpg new file mode 100644 index 0000000000000000000000000000000000000000..96551ad02601696eae358b7deac5442eade10d36 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/389ef0f63a90171c14db5cd9a925792e376222c8c56a5801cacb3dcc99360c96.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8b738346a1efcc6898c06533ca3daada4bff2aa0246d1d3cd68916688104538 +size 31522 diff --git a/data/2025/2503_09xxx/2503.09198/images/3cd8a951d8771e165427c2faca0d2508e4c8cef94fb244dc3a04f2ca031996a3.jpg b/data/2025/2503_09xxx/2503.09198/images/3cd8a951d8771e165427c2faca0d2508e4c8cef94fb244dc3a04f2ca031996a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ea13a14d362ba93774ab550288a24c83c612f84 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/3cd8a951d8771e165427c2faca0d2508e4c8cef94fb244dc3a04f2ca031996a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04f04e80907c04f390e6173e4d289e9da12908156d708d45ab3302dbdb9f472d +size 32103 diff --git a/data/2025/2503_09xxx/2503.09198/images/4050e8f862ac40be7bc6d5c239997192325936e244911b8d69fe60a4ab8810b8.jpg b/data/2025/2503_09xxx/2503.09198/images/4050e8f862ac40be7bc6d5c239997192325936e244911b8d69fe60a4ab8810b8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31e3952d1730fa27f6a0c0b0651924330e058925 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/4050e8f862ac40be7bc6d5c239997192325936e244911b8d69fe60a4ab8810b8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a9493880228b324d76d4487b363d729497150389462f44ce103af95579902ce +size 16633 diff --git a/data/2025/2503_09xxx/2503.09198/images/416dbc943d118d6834e80c8c5ad759b34f940700b96fd0b87bb23c082f10cf10.jpg b/data/2025/2503_09xxx/2503.09198/images/416dbc943d118d6834e80c8c5ad759b34f940700b96fd0b87bb23c082f10cf10.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7199f4a8365a4cdee15c5df80e3a545af0ca70d1 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/416dbc943d118d6834e80c8c5ad759b34f940700b96fd0b87bb23c082f10cf10.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c00fd40686c2a8fff4d3aadc242a79b56905fd554a2190cdd43eb2aacae337a +size 16277 diff --git a/data/2025/2503_09xxx/2503.09198/images/5dabdfa6b0129921b2abb27785be9608573a25f26e4646d20c6b92250fad1414.jpg b/data/2025/2503_09xxx/2503.09198/images/5dabdfa6b0129921b2abb27785be9608573a25f26e4646d20c6b92250fad1414.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d0298e9a1300a369f98906ab623f72903b5c4e1 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/5dabdfa6b0129921b2abb27785be9608573a25f26e4646d20c6b92250fad1414.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b032e369f23e84e1b6cdefe7a578e9e4c9af816e82319f6dd16ae6e7d89b12f +size 27916 diff --git a/data/2025/2503_09xxx/2503.09198/images/69ae709df8988787ff641b5b6eb5a608ca0636bba7110a3a4735064168d117da.jpg b/data/2025/2503_09xxx/2503.09198/images/69ae709df8988787ff641b5b6eb5a608ca0636bba7110a3a4735064168d117da.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e750556cbc361bf1698e3e3f5101ad1dae5b895f --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/69ae709df8988787ff641b5b6eb5a608ca0636bba7110a3a4735064168d117da.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:019c407be9bb0ac0482f857c4d085e3abc7c84b5ae8979f1c6e57e6e125851a8 +size 9111 diff --git a/data/2025/2503_09xxx/2503.09198/images/7c729f1706460b0bf0adde7a8dbd071f9ae77f611ef3f85b3d951fb62795016c.jpg b/data/2025/2503_09xxx/2503.09198/images/7c729f1706460b0bf0adde7a8dbd071f9ae77f611ef3f85b3d951fb62795016c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2b513d7789b253998694f0886ca2db62a1bed326 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/7c729f1706460b0bf0adde7a8dbd071f9ae77f611ef3f85b3d951fb62795016c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98e1ad9e5115b540fc0696197655dfaa11633c7ea86f3e03b7bc5b0d272179c9 +size 6912 diff --git a/data/2025/2503_09xxx/2503.09198/images/7eac2440a2fedd66d943c82b390c3a9df5d95d019d523a29c7a3e2f9c7218928.jpg b/data/2025/2503_09xxx/2503.09198/images/7eac2440a2fedd66d943c82b390c3a9df5d95d019d523a29c7a3e2f9c7218928.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec9b587b171b42715690b731cfb04970fd00bca1 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/7eac2440a2fedd66d943c82b390c3a9df5d95d019d523a29c7a3e2f9c7218928.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e08c2cf2b6203cf6bdf72ce3dffec14230ad891158e9d6084a128b81ee05e509 +size 21178 diff --git a/data/2025/2503_09xxx/2503.09198/images/8c16ad7a59f34e5974a4322466cec0149f8905a1a1b1cfadff910a3fec50d004.jpg b/data/2025/2503_09xxx/2503.09198/images/8c16ad7a59f34e5974a4322466cec0149f8905a1a1b1cfadff910a3fec50d004.jpg new file mode 100644 index 0000000000000000000000000000000000000000..af30ea23f77387fe267a09bd51caab7cf5a6c84f --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/8c16ad7a59f34e5974a4322466cec0149f8905a1a1b1cfadff910a3fec50d004.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:731dd1a62eaa8d2eaa857aae65e3ee2e9192a6926a695aee23576aa5e8a20b5e +size 11462 diff --git a/data/2025/2503_09xxx/2503.09198/images/92c09c0b767ea453098777786fb815e8c2f833a8abc790807397f42e9a6f6887.jpg b/data/2025/2503_09xxx/2503.09198/images/92c09c0b767ea453098777786fb815e8c2f833a8abc790807397f42e9a6f6887.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3a964dfcf82ca980c97c07a1f4cfaff5d632090 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/92c09c0b767ea453098777786fb815e8c2f833a8abc790807397f42e9a6f6887.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41d48cada424b2775d6bf44d047caf32c7b73dcbf8f7e66d8747dc16c68a86f3 +size 15219 diff --git a/data/2025/2503_09xxx/2503.09198/images/9434d9911bf9b1660b16b84b9672b9a68ac66eec53e61ed383a2c752f12025a2.jpg b/data/2025/2503_09xxx/2503.09198/images/9434d9911bf9b1660b16b84b9672b9a68ac66eec53e61ed383a2c752f12025a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a738b1f0efe7cce440a1d7c3509757bbf14d4be --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/9434d9911bf9b1660b16b84b9672b9a68ac66eec53e61ed383a2c752f12025a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cb00e7d0c7623129f10b29bde5541b581b822797cf9931d71ccc2a982033acc +size 29508 diff --git a/data/2025/2503_09xxx/2503.09198/images/96bce3148486d00ea374e73aeb6caf0bb16a8ee9cc4e78a0b161de5fdfe93db0.jpg b/data/2025/2503_09xxx/2503.09198/images/96bce3148486d00ea374e73aeb6caf0bb16a8ee9cc4e78a0b161de5fdfe93db0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04cb2571961a20ae69393e7334c82ff370a34a80 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/96bce3148486d00ea374e73aeb6caf0bb16a8ee9cc4e78a0b161de5fdfe93db0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7af20572861a11bc105528052554644ee1498a83b4edc5dffbc22ef56ea8d5e2 +size 7099 diff --git a/data/2025/2503_09xxx/2503.09198/images/9fbe1142edda7eab13f0abb9592b84e63d1611257e7426d7d569a68b9df82b38.jpg b/data/2025/2503_09xxx/2503.09198/images/9fbe1142edda7eab13f0abb9592b84e63d1611257e7426d7d569a68b9df82b38.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38d0e608c19f9689211e1bfbbfc44ff62f9a7996 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/9fbe1142edda7eab13f0abb9592b84e63d1611257e7426d7d569a68b9df82b38.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de6cebf164edb2891a07edbc84c78aa50280c3e635d5bfd4052726b8dccb39f7 +size 22411 diff --git a/data/2025/2503_09xxx/2503.09198/images/a79e8f2104ab8c32df4137808fc6b4ce9b7465bfa15c2080bd3aabb2c58986d0.jpg b/data/2025/2503_09xxx/2503.09198/images/a79e8f2104ab8c32df4137808fc6b4ce9b7465bfa15c2080bd3aabb2c58986d0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..60dda839a4bf33321a0390f94a60d991ed1d8fce --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/a79e8f2104ab8c32df4137808fc6b4ce9b7465bfa15c2080bd3aabb2c58986d0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6b06c8d2d6cd471cda093eaf39e2d5d0c63e0aff07451fe0c40fe6f1cf7dc64 +size 16622 diff --git a/data/2025/2503_09xxx/2503.09198/images/ae6cbd294176dc493b50c73240c504ea8c9fc09c9e3151fba6f0be709b525025.jpg b/data/2025/2503_09xxx/2503.09198/images/ae6cbd294176dc493b50c73240c504ea8c9fc09c9e3151fba6f0be709b525025.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4d1086e588e6825ab5d3b3e9363f98c7a984fc6 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/ae6cbd294176dc493b50c73240c504ea8c9fc09c9e3151fba6f0be709b525025.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71a23be94e3bd66349bf0fe54e7f9dbe1fa895084379ba3abc884c4b594642fd +size 62964 diff --git a/data/2025/2503_09xxx/2503.09198/images/b2be70bd4cc7d261ac38e8bc653a0c09c537d49d2a5612ed5c067058a98ef463.jpg b/data/2025/2503_09xxx/2503.09198/images/b2be70bd4cc7d261ac38e8bc653a0c09c537d49d2a5612ed5c067058a98ef463.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a6801e8e6ae9855d69cdb750a6ebca879ac69866 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/b2be70bd4cc7d261ac38e8bc653a0c09c537d49d2a5612ed5c067058a98ef463.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3ea0bd49ae7cadf8442e74183ad2868cbb5895d8e01bf7ac447b8f8b2644c4a +size 34926 diff --git a/data/2025/2503_09xxx/2503.09198/images/c01c12cfcf72e187db35ff49ad4d5edb57331b8d93efa4c1f20941ebf580a3d7.jpg b/data/2025/2503_09xxx/2503.09198/images/c01c12cfcf72e187db35ff49ad4d5edb57331b8d93efa4c1f20941ebf580a3d7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f658aee384d2ee564b09d980d057b27eaea4921 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/c01c12cfcf72e187db35ff49ad4d5edb57331b8d93efa4c1f20941ebf580a3d7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f8a16edfd96a7f182f3af008b5df688bc01c33d54446fca81a5938b1240f4e0 +size 11351 diff --git a/data/2025/2503_09xxx/2503.09198/images/c4777a0507eee1c1beaa13c04e351827098bc588e6ced9d8de6cff01430e3df5.jpg b/data/2025/2503_09xxx/2503.09198/images/c4777a0507eee1c1beaa13c04e351827098bc588e6ced9d8de6cff01430e3df5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..51e543d528182041c4be72a75859ca2f2dadedff --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/c4777a0507eee1c1beaa13c04e351827098bc588e6ced9d8de6cff01430e3df5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c4c7666de1b6ad8baee76238d1564d8706bf328b472b5d2fb87d036faccd007 +size 14853 diff --git a/data/2025/2503_09xxx/2503.09198/images/dafb2130c8265a91a7faed53db3de1181cd524770896acda68d5a953df004f87.jpg b/data/2025/2503_09xxx/2503.09198/images/dafb2130c8265a91a7faed53db3de1181cd524770896acda68d5a953df004f87.jpg new file mode 100644 index 0000000000000000000000000000000000000000..daf1774b5c27327e96d49161bdd80995efb77592 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/dafb2130c8265a91a7faed53db3de1181cd524770896acda68d5a953df004f87.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a84b4a6a3529fed77797976201af5558ff5dbeacdf32412d344e05f0f2967c7 +size 9761 diff --git a/data/2025/2503_09xxx/2503.09198/images/e340d9807d4ed601eee3bd74351618a9adfda026a8dbd720f9d2aa40857dad26.jpg b/data/2025/2503_09xxx/2503.09198/images/e340d9807d4ed601eee3bd74351618a9adfda026a8dbd720f9d2aa40857dad26.jpg new file mode 100644 index 0000000000000000000000000000000000000000..854749bec15a1f5947cec99bfc6cacffad286f24 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/images/e340d9807d4ed601eee3bd74351618a9adfda026a8dbd720f9d2aa40857dad26.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ed573289de5a8027fe1cd2eed24ba46dbaded0a23e69de96f100559bfef7307 +size 15184 diff --git a/data/2025/2503_09xxx/2503.09198/layout.json b/data/2025/2503_09xxx/2503.09198/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..a700093981ef2fc6ea874d7c9a3df1edd7eac2fe --- /dev/null +++ b/data/2025/2503_09xxx/2503.09198/layout.json @@ -0,0 +1,6101 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 119, + 89, + 492, + 139 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 89, + 492, + 139 + ], + "spans": [ + { + "bbox": [ + 119, + 89, + 492, + 139 + ], + "type": "text", + "content": "A 3D particle visualization system for temperature management" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 204, + 159, + 405, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 204, + 159, + 405, + 185 + ], + "spans": [ + { + "bbox": [ + 204, + 159, + 405, + 185 + ], + "type": "text", + "content": "Lange B. " + }, + { + "bbox": [ + 204, + 159, + 405, + 185 + ], + "type": "inline_equation", + "content": "^{a}" + }, + { + "bbox": [ + 204, + 159, + 405, + 185 + ], + "type": "text", + "content": ", Rodriguez N. " + }, + { + "bbox": [ + 204, + 159, + 405, + 185 + ], + "type": "inline_equation", + "content": "^{a}" + }, + { + "bbox": [ + 204, + 159, + 405, + 185 + ], + "type": "text", + "content": ", Puech W. " + }, + { + "bbox": [ + 204, + 159, + 405, + 185 + ], + "type": "inline_equation", + "content": "^{a}" + }, + { + "bbox": [ + 204, + 159, + 405, + 185 + ], + "type": "text", + "content": ", Rey H. " + }, + { + "bbox": [ + 204, + 159, + 405, + 185 + ], + "type": "inline_equation", + "content": "^{b}" + }, + { + "bbox": [ + 204, + 159, + 405, + 185 + ], + "type": "text", + "content": " and Vasques X. " + }, + { + "bbox": [ + 204, + 159, + 405, + 185 + ], + "type": "inline_equation", + "content": "^{b}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 202, + 186, + 408, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 186, + 408, + 199 + ], + "spans": [ + { + "bbox": [ + 202, + 186, + 408, + 199 + ], + "type": "inline_equation", + "content": "^{a}" + }, + { + "bbox": [ + 202, + 186, + 408, + 199 + ], + "type": "text", + "content": " LIRMM, 141 rue ADA, Montpellier, France;" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 207, + 200, + 403, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 200, + 403, + 213 + ], + "spans": [ + { + "bbox": [ + 207, + 200, + 403, + 213 + ], + "type": "text", + "content": "b IBM, Rue de la vieille poste, Montpellier," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 288, + 214, + 323, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 288, + 214, + 323, + 225 + ], + "spans": [ + { + "bbox": [ + 288, + 214, + 323, + 225 + ], + "type": "text", + "content": "France" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 285, + 249, + 325, + 260 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 285, + 249, + 325, + 260 + ], + "spans": [ + { + "bbox": [ + 285, + 249, + 325, + 260 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 267, + 481, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 267, + 481, + 407 + ], + "spans": [ + { + "bbox": [ + 131, + 267, + 481, + 407 + ], + "type": "text", + "content": "This paper deals with a 3D visualization technique proposed to analyze and manage energy efficiency from a data center. Data are extracted from sensors located in the IBM Green Data Center in Montpellier France. These sensors measure different information such as hygrometry, pressure and temperature. We want to visualize in real-time the large among of data produced by these sensors. A visualization engine has been designed, based on particles system and a client server paradigm. In order to solve performance problems, a Level Of Detail solution has been developed. These methods are based on the earlier work introduced by J. Clark in " + }, + { + "bbox": [ + 131, + 267, + 481, + 407 + ], + "type": "inline_equation", + "content": "1976^{1}" + }, + { + "bbox": [ + 131, + 267, + 481, + 407 + ], + "type": "text", + "content": ". In this paper we introduce a particle method used for this work and subsequently we explain different simplification methods applied to improve our solution." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 88, + 417, + 449, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 417, + 449, + 429 + ], + "spans": [ + { + "bbox": [ + 88, + 417, + 449, + 429 + ], + "type": "text", + "content": "Keywords: 3D Visualization, Sensors, Particles, Client/Server, Level Of Details" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 89, + 456, + 227, + 472 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 456, + 227, + 472 + ], + "spans": [ + { + "bbox": [ + 89, + 456, + 227, + 472 + ], + "type": "text", + "content": "1. INTRODUCTION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 87, + 483, + 515, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 483, + 515, + 685 + ], + "spans": [ + { + "bbox": [ + 87, + 483, + 515, + 685 + ], + "type": "text", + "content": "In this paper, we present a method to produce a 3D visualization for analyzing and managing temperature. Data are extracted from sensors located in the IBM Green Data Center in Montpellier, which provides many different types of information like temperature, pressure or hygrometry. In our system, sensors are placed in a virtual room and the internal space is modeled using particles. The main constraint here is to produce a real-time rendering. However, latency appears du to the number of vertices. In this paper, we use a solution called LOD (Level Of Detail) to produce multi resolution 3D objects. This solution has been introduced in 1976 by J. Clark1. In this paper, J. Clark introduces the use of several mesh resolutions to simplify the 3D scene complexity. In our work, we use various simplification methods to provide interactive rendering and allows rendering the most important part of data extracted from sensors. In this paper, we describe how we create a room, and the methods used to produce different resolution visualization. In Section 2, we introduce related work on particles systems and LOD. In Section 3, we expose our solution to simplify particles system. In Section 4 we give some results and finally, in Section 5 we present our conclusions and future work." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 769, + 315, + 776 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 769, + 315, + 776 + ], + "spans": [ + { + "bbox": [ + 296, + 769, + 315, + 776 + ], + "type": "text", + "content": "1/10" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 89, + 55, + 230, + 70 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 55, + 230, + 70 + ], + "spans": [ + { + "bbox": [ + 89, + 55, + 230, + 70 + ], + "type": "text", + "content": "2. RELATED WORK" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 88, + 83, + 507, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 83, + 507, + 108 + ], + "spans": [ + { + "bbox": [ + 88, + 83, + 507, + 108 + ], + "type": "text", + "content": "In this section we present several previous works concerning data visualization, particle systems and level of detail methods." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 88, + 110, + 512, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 110, + 512, + 176 + ], + "spans": [ + { + "bbox": [ + 88, + 110, + 512, + 176 + ], + "type": "text", + "content": "Some previous work present solutions to visualize large data flow extracted from mantle convection. M. Damon et al. " + }, + { + "bbox": [ + 88, + 110, + 512, + 176 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 88, + 110, + 512, + 176 + ], + "type": "text", + "content": " and K. E. Jordan et al. " + }, + { + "bbox": [ + 88, + 110, + 512, + 176 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 88, + 110, + 512, + 176 + ], + "type": "text", + "content": " present interactive viewers for this kind of data. These data are computed by using Hight Performance Computing (HPC) and visualized on a large display. The rendering is calculated by using another HPC. The data flow is very important and a real-time 3D simulation is hard to obtain. W. Kapfer and" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 88, + 186, + 521, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 186, + 521, + 320 + ], + "spans": [ + { + "bbox": [ + 88, + 186, + 521, + 320 + ], + "type": "text", + "content": "T. Riser6 introduce how to use particle system to visualize astronomic simulation, particles representing space objects. The number of particles is extremely important for computing motion in real-time. GPU computing is preferred to render instead of a common HPC solution. To display their data, they have developed their own 3D graphical engine. The space objects are represented by point sprite instead of sphere. Lights are used to give a spherical aspect to the point sprite. This solution allows to render more stars than spherical object method. The 3D engine provides different rendering methods to group space objects: cell simplification or extraction of isosurface. The use of GPU seems quite well for a particle solution, parallel processing allows to render large data; the astrological data seems to be well suited." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 88, + 322, + 521, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 322, + 521, + 536 + ], + "spans": [ + { + "bbox": [ + 88, + 322, + 521, + 536 + ], + "type": "text", + "content": "In 1976, J. Clark introduces Level Of Detail (LOD) concept1. LOD consists of several resolution meshes for using them at different distances from the camera. Firstly, designer produces these meshes. First algorithms, in 1992 Schroeder et al. developed a method by decimation for simplify the mesh7. It analyses mesh geometry and evaluates the complexity of triangles. Vertices are removed if only constraints set by the user are respected. Vertices are removed and gaps are filled using triangulation. These algorithms of simplification are not enough to simplify mesh efficiently because shape is not always totally respected. D. Luebke, in 1997, has proposed a taxonomy of mesh simplification8. He presented the most used algorithms. He extracted different ways to use each algorithm. But in this paper, only one solution works with volumetric mesh9. T. He et al. propose a method based on voxel simplification by using a grid for clustering voxels. A marching cube10 algorithm was applied to produce a surface mesh. But this simplification algorithm did not preserve the shape of the mesh. In our work, we look for point cloud simplification. Indeed, previous methods which deal with simplification for surface point cloud like11-13 are not adapted to our case. All of these methods produce LOD for surface mesh and point cloud is extracted from scanner." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 89, + 563, + 277, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 563, + 277, + 578 + ], + "spans": [ + { + "bbox": [ + 89, + 563, + 277, + 578 + ], + "type": "text", + "content": "3. PROPOSED APPROACH" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 88, + 590, + 517, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 590, + 517, + 671 + ], + "spans": [ + { + "bbox": [ + 88, + 590, + 517, + 671 + ], + "type": "text", + "content": "This section presents the different methods that are used to visualize a kind of data from Green Data Center (GDC). The main goal is to be able to visualize in real-time the evolution of temperature in the data center. For this, we use a special particle method. Particles are located using a segmentation algorithm based on Voronoi cell extraction and Delaunay triangulation. The latency due to the large flow of particles is avoided by using a client server paradigm. We improve our solution by using LOD methods to simplify rendering." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 769, + 315, + 776 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 769, + 315, + 776 + ], + "spans": [ + { + "bbox": [ + 296, + 769, + 315, + 776 + ], + "type": "text", + "content": "2/10" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 87, + 54, + 204, + 68 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 54, + 204, + 68 + ], + "spans": [ + { + "bbox": [ + 87, + 54, + 204, + 68 + ], + "type": "text", + "content": "3.1 Particle systems" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "spans": [ + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "text", + "content": "Rooms are the bases of our study. For modeling a room, we extract the shape of the space representation which is composed by a box with three measures: length " + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "inline_equation", + "content": "(l \\in \\mathbb{R})" + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "text", + "content": ", width " + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "inline_equation", + "content": "(w \\in \\mathbb{R})" + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "text", + "content": ", height " + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "inline_equation", + "content": "(h \\in \\mathbb{R})" + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "text", + "content": ". Sensors are represented by " + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "inline_equation", + "content": "S = \\{\\mathrm{S}_1, \\dots, \\mathrm{S}_M\\}" + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "text", + "content": " is the number of sensors. Sensors " + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "inline_equation", + "content": "\\mathrm{S}_i (\\mathrm{i} \\in \\{1, \\dots, M\\})" + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "text", + "content": " are placed on the space on a layer " + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "inline_equation", + "content": "\\mathbf{L} \\in \\mathbb{N}" + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "text", + "content": " and have a location represented by: " + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{X}_i, \\mathbf{Y}_i, \\mathbf{L}_j\\}" + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_i \\in \\mathbb{R}" + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}_i \\in \\mathbb{R}" + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "text", + "content": " is the layer used. For modeling the space inside a room, we use a particle system instead of 2D map representations which have some lacks." + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "inline_equation", + "content": "^{14}" + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "text", + "content": " Actually 2D map does not allow having a real visualization of space. A particle visualization gives a better efficiency for modeling space. We use a large number of particles to represent the entire space. " + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "inline_equation", + "content": "\\mathbf{N} \\in \\mathbb{N}" + }, + { + "bbox": [ + 85, + 77, + 517, + 214 + ], + "type": "text", + "content": " represents the number of particles in the room. It can be calculated using:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 220, + 321, + 251 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 220, + 321, + 251 + ], + "spans": [ + { + "bbox": [ + 110, + 220, + 321, + 251 + ], + "type": "interline_equation", + "content": "N = \\frac {\\left(\\left(l + 1\\right) \\times (h + 1) \\times (w + 1)\\right)}{\\delta^ {3}} \\tag {1}", + "image_path": "96bce3148486d00ea374e73aeb6caf0bb16a8ee9cc4e78a0b161de5fdfe93db0.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 85, + 261, + 515, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 261, + 515, + 313 + ], + "spans": [ + { + "bbox": [ + 85, + 261, + 515, + 313 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 85, + 261, + 515, + 313 + ], + "type": "inline_equation", + "content": "\\delta \\in \\mathbb{R}" + }, + { + "bbox": [ + 85, + 261, + 515, + 313 + ], + "type": "text", + "content": " is the space between particles. The particle grid is regular. In this model, three layers of temperature sensors compose rooms. They are defined according to their real locations in the data center. Figure ?? presents the different layers of sensors in the data center." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 85, + 316, + 512, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 316, + 512, + 343 + ], + "spans": [ + { + "bbox": [ + 85, + 316, + 512, + 343 + ], + "type": "text", + "content": "Particles carry information, and flow motion can be simulated if needed by changing the value of particles and the computational cost is inferior." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 86, + 355, + 256, + 370 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 355, + 256, + 370 + ], + "spans": [ + { + "bbox": [ + 86, + 355, + 256, + 370 + ], + "type": "text", + "content": "3.2 Segmentation algorithms" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 85, + 378, + 511, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 378, + 511, + 418 + ], + "spans": [ + { + "bbox": [ + 85, + 378, + 511, + 418 + ], + "type": "text", + "content": "In our solution, each sensors has an influence on surrounding particules. To calculate the set of particles in the sensor range, we use two methods: Voronoi cells extraction and Delaunay triangulation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 85, + 418, + 523, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 418, + 523, + 609 + ], + "spans": [ + { + "bbox": [ + 85, + 418, + 523, + 609 + ], + "type": "text", + "content": "Voronoi cells is a method to extract a partition of space " + }, + { + "bbox": [ + 85, + 418, + 523, + 609 + ], + "type": "inline_equation", + "content": "^{15}" + }, + { + "bbox": [ + 85, + 418, + 523, + 609 + ], + "type": "text", + "content": ". This method is available for " + }, + { + "bbox": [ + 85, + 418, + 523, + 609 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 85, + 418, + 523, + 609 + ], + "type": "text", + "content": " dimensions where " + }, + { + "bbox": [ + 85, + 418, + 523, + 609 + ], + "type": "inline_equation", + "content": "\\phi \\in [1, +\\infty]" + }, + { + "bbox": [ + 85, + 418, + 523, + 609 + ], + "type": "text", + "content": ", but most of implementations are done in 2D. Tools for extracting 3D Voronoi diagrams exist: Voro++ and QHull but particles are discrete and these solutions are not suitable because they extract Voronoi diagram in a continuous way. Then we designed our own method based on sphere expansion. We search nearest sensors for each particle. This part allows to weight particles outside the sensors mesh. A second method to weight the interior of the sensors mesh is used. We extract the mesh tetrahedron of sensors using the Delaunay triangulation implemented in QHull. This method was used to analyze the location of particle. We compute the exact location using ray tracing on the soup of tetrahedron. First, we search the nearest particles inside the hull of each tetrahedron. We extract the normal of each face of tetrahedron and we apply these normals on each particle. If the ray cuts three faces or more, the particle is inside the tetrahedron. This method is cost expensive and done in preprocessing. Moreover, particles are static and position didn't need to be update." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 86, + 620, + 244, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 620, + 244, + 635 + ], + "spans": [ + { + "bbox": [ + 86, + 620, + 244, + 635 + ], + "type": "text", + "content": "3.3 Client server paradigm" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 85, + 643, + 520, + 738 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 643, + 520, + 738 + ], + "spans": [ + { + "bbox": [ + 85, + 643, + 520, + 738 + ], + "type": "text", + "content": "To improve computation, a client server paradigm is used. We define a low cost communication protocol to transfer data from a server to a client. Server computes the modification of particles and the client displays the results. This protocol works in five steps. These steps are: sending header, sending sensor data, sending particle data, sending footer and receiving acknowledgment/language command from client. At each step, the server waits the acknowledgment from the client. We develop two ways to send data. The first sends the entire point cloud (sensors and particles). The biggest problem of this method is the" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 768, + 317, + 776 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 768, + 317, + 776 + ], + "spans": [ + { + "bbox": [ + 294, + 768, + 317, + 776 + ], + "type": "text", + "content": "3/10" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 85, + 52, + 508, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 52, + 508, + 133 + ], + "spans": [ + { + "bbox": [ + 85, + 52, + 508, + 133 + ], + "type": "text", + "content": "transmission of data. Sensors are sent with their coordinates and their value. We encode these data in bit words. For the particles data, the same method was used. The footer was sent for closing the communication. The second method is used to reduce efficiently the communication cost. We only send modified sensors and particles. The id and the new value is sent instead of coordinates. The last step is the command sent by the client. It allows the user to interact with the server. We use it to modify the camera viewpoint." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 87, + 145, + 266, + 159 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 145, + 266, + 159 + ], + "spans": [ + { + "bbox": [ + 87, + 145, + 266, + 159 + ], + "type": "text", + "content": "3.4 Level of detail for particles" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 85, + 167, + 519, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 167, + 519, + 288 + ], + "spans": [ + { + "bbox": [ + 85, + 167, + 519, + 288 + ], + "type": "text", + "content": "Level of detail (LOD) is one of the most important methods in computer graphics. It allows to solve rendering problems or performance problems. This method consists by producing several resolution of a 3D object. In our works, we use some features to define the object resolution: hardware and viewpoint. Hardware and viewpoint do not need the same data structure and we need to recompute it for each modification of the viewpoint or when hardware changes. LOD was defined by two problems statement. The first one uses a sample of original points, the second one uses a new point data set. In this part, we define six methods to produce LOD. The four first methods are for the client, the other are for the server." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 86, + 300, + 188, + 311 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 300, + 188, + 311 + ], + "spans": [ + { + "bbox": [ + 86, + 300, + 188, + 311 + ], + "type": "text", + "content": "Problems statement:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "spans": [ + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "text", + "content": "For this two approaches, we have a set " + }, + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "text", + "content": " of Vertices " + }, + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "inline_equation", + "content": "V = \\{V_1, \\ldots, V_\\omega\\}" + }, + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "text", + "content": ". Each vertex is defined in " + }, + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^3" + }, + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "text", + "content": ". Simplify a mesh using a sample vertex means " + }, + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "inline_equation", + "content": "\\omega > \\omega 2" + }, + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "inline_equation", + "content": "\\omega 2" + }, + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "text", + "content": " is the size of the second data set. For approach 1, we obtain a new object " + }, + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "inline_equation", + "content": "\\mathrm{V}2 = \\{\\mathrm{V}2_1, \\ldots, \\mathrm{V}2_\\omega\\}" + }, + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "text", + "content": " with fewer points than V but V 2 is a subset of V. For approach 2, we obtain a new object " + }, + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "inline_equation", + "content": "\\mathrm{V}3 = \\{\\mathrm{V}3_1, \\ldots, \\mathrm{V}3_\\omega\\}" + }, + { + "bbox": [ + 85, + 312, + 523, + 381 + ], + "type": "text", + "content": " with fewer points than V but each point in V 3 is a new vertex." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 85, + 391, + 519, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 391, + 519, + 513 + ], + "spans": [ + { + "bbox": [ + 85, + 391, + 519, + 513 + ], + "type": "text", + "content": "In Section 2 we have presented methods to produce simplification. A few were designed for volumetric simplification. In this section, we propose several methods to produce different volumetric simplifications on our client. We develop four approaches to simplify 3D objects: clustering, neighbor simplification and two approaches based on server. Clustering method was based on He et al. " + }, + { + "bbox": [ + 85, + 391, + 519, + 513 + ], + "type": "inline_equation", + "content": "^{9}" + }, + { + "bbox": [ + 85, + 391, + 519, + 513 + ], + "type": "text", + "content": " works, it consists of clustering particles using a 3D grid. Cells sizes of grid are set depending to the viewpoint of the camera. Clusters were being weight with the average of the different values of particles. The position is the barycenter of these particles. Figures 1(a)-1(e) give some examples of simplification using clustering solution. Figure 1(a) present the original point of cloud mesh. Figure" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 85, + 521, + 520, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 521, + 520, + 548 + ], + "spans": [ + { + "bbox": [ + 85, + 521, + 520, + 548 + ], + "type": "text", + "content": "1(b) and 1(d) give two different methods for clustering. And finally, Figure 1(c) and 1(e) give the results of clustering methods." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 91, + 552, + 520, + 651 + ], + "blocks": [ + { + "bbox": [ + 91, + 552, + 520, + 651 + ], + "lines": [ + { + "bbox": [ + 91, + 552, + 520, + 651 + ], + "spans": [ + { + "bbox": [ + 91, + 552, + 520, + 651 + ], + "type": "image", + "image_path": "ae6cbd294176dc493b50c73240c504ea8c9fc09c9e3151fba6f0be709b525025.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 85, + 667, + 362, + 680 + ], + "lines": [ + { + "bbox": [ + 85, + 667, + 362, + 680 + ], + "spans": [ + { + "bbox": [ + 85, + 667, + 362, + 680 + ], + "type": "text", + "content": "Figure 1. Clustering method for simplification point cloud." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 85, + 681, + 522, + 735 + ], + "lines": [ + { + "bbox": [ + 85, + 681, + 522, + 735 + ], + "spans": [ + { + "bbox": [ + 85, + 681, + 522, + 735 + ], + "type": "text", + "content": "The second solution used is based on neighborhood extraction. Before runtime, we extract all neighbors of a particle. We measure the distance between each particle. Some optimization can help to decrease complexity: we can estimate easily in our structure which particle is closer to another one (using the fact that particle grid is regular). After this," + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 768, + 317, + 776 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 768, + 317, + 776 + ], + "spans": [ + { + "bbox": [ + 294, + 768, + 317, + 776 + ], + "type": "text", + "content": "4/10" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 86, + 52, + 517, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 52, + 517, + 133 + ], + "spans": [ + { + "bbox": [ + 86, + 52, + 517, + 133 + ], + "type": "text", + "content": "we extract the main value of particles. We explore each neighbor of particles and we keep the most important. In some cases, the most important can be the high values, in other the low values and in other both of them. This solution is able to produce a low resolution model with the most important information structure. Several low resolution models are created by exploring deeper in neighborhood. Figures 2(a)-2(c) illustrate a neighbor, and two simplifications of this mesh." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 91, + 137, + 232, + 269 + ], + "blocks": [ + { + "bbox": [ + 91, + 137, + 232, + 269 + ], + "lines": [ + { + "bbox": [ + 91, + 137, + 232, + 269 + ], + "spans": [ + { + "bbox": [ + 91, + 137, + 232, + 269 + ], + "type": "image", + "image_path": "23944f9963b290146f2ac445e8970545bcf00b2949e999067a13468653d749cc.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 91, + 270, + 181, + 294 + ], + "lines": [ + { + "bbox": [ + 91, + 270, + 181, + 294 + ], + "spans": [ + { + "bbox": [ + 91, + 270, + 181, + 294 + ], + "type": "text", + "content": "(a) Neighborhood cloud." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 235, + 137, + 375, + 267 + ], + "blocks": [ + { + "bbox": [ + 235, + 137, + 375, + 267 + ], + "lines": [ + { + "bbox": [ + 235, + 137, + 375, + 267 + ], + "spans": [ + { + "bbox": [ + 235, + 137, + 375, + 267 + ], + "type": "image", + "image_path": "0e41ba0aeddc8af5f2b6664c64abea63f9b32bfd4692c38f916d07f501d537e5.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 235, + 270, + 325, + 295 + ], + "lines": [ + { + "bbox": [ + 235, + 270, + 325, + 295 + ], + "spans": [ + { + "bbox": [ + 235, + 270, + 325, + 295 + ], + "type": "text", + "content": "(b) Simplification neighborhood of 1." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 378, + 137, + 518, + 268 + ], + "blocks": [ + { + "bbox": [ + 378, + 137, + 518, + 268 + ], + "lines": [ + { + "bbox": [ + 378, + 137, + 518, + 268 + ], + "spans": [ + { + "bbox": [ + 378, + 137, + 518, + 268 + ], + "type": "image", + "image_path": "c01c12cfcf72e187db35ff49ad4d5edb57331b8d93efa4c1f20941ebf580a3d7.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 378, + 270, + 468, + 295 + ], + "lines": [ + { + "bbox": [ + 378, + 270, + 468, + 295 + ], + "spans": [ + { + "bbox": [ + 378, + 270, + 468, + 295 + ], + "type": "text", + "content": "(c) Simplification neighborhood of 2." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 86, + 311, + 301, + 323 + ], + "lines": [ + { + "bbox": [ + 86, + 311, + 301, + 323 + ], + "spans": [ + { + "bbox": [ + 86, + 311, + 301, + 323 + ], + "type": "text", + "content": "Figure 2. Neighbor method for simplification." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 85, + 324, + 522, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 324, + 522, + 390 + ], + "spans": [ + { + "bbox": [ + 85, + 324, + 522, + 390 + ], + "type": "text", + "content": "Other methods were based on server instead of client. Client sent via TCP connection his viewpoint. The server recomputes the particles structure and recreates the entire structure. With this solution, it is possible to produce a point cloud resolution depending on hardware. Figure 3(a) presents particles rendering with a distance of 2 from the camera. Figure 3(b) is the decimation produced with a distance of 3 and Figure 3(c) is a distance of 1." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 85, + 401, + 524, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 401, + 524, + 509 + ], + "spans": [ + { + "bbox": [ + 85, + 401, + 524, + 509 + ], + "type": "text", + "content": "Another method was based on Voronoi diffusion of temperature. The bandwidth for transmitting data is limited. We developed Voronoi temperature diffusion to solve this communication. In this approach, we update data using sphere expansion. Each time, we update particles depending on their distance from sensors. The more particles are distant from sensors the later they will be refreshed. This method sends only modified particles. The bandwidth is saved and the visualization gives a flow effect. Figure 4(a) represents values at time 0. At time 1, values of sensors change, 4(b). After time 2, we update a first range of particles 4(c) and finally the second range 4(d)." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 96, + 515, + 233, + 646 + ], + "blocks": [ + { + "bbox": [ + 96, + 515, + 233, + 646 + ], + "lines": [ + { + "bbox": [ + 96, + 515, + 233, + 646 + ], + "spans": [ + { + "bbox": [ + 96, + 515, + 233, + 646 + ], + "type": "image", + "image_path": "7eac2440a2fedd66d943c82b390c3a9df5d95d019d523a29c7a3e2f9c7218928.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 95, + 647, + 169, + 672 + ], + "lines": [ + { + "bbox": [ + 95, + 647, + 169, + 672 + ], + "spans": [ + { + "bbox": [ + 95, + 647, + 169, + 672 + ], + "type": "text", + "content": "(a) Particles server " + }, + { + "bbox": [ + 95, + 647, + 169, + 672 + ], + "type": "inline_equation", + "content": "(\\mathrm{D} = 2)" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 86, + 687, + 364, + 700 + ], + "lines": [ + { + "bbox": [ + 86, + 687, + 364, + 700 + ], + "spans": [ + { + "bbox": [ + 86, + 687, + 364, + 700 + ], + "type": "text", + "content": "Figure 3. Particle simplification using server and distance." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 238, + 515, + 375, + 646 + ], + "blocks": [ + { + "bbox": [ + 238, + 515, + 375, + 646 + ], + "lines": [ + { + "bbox": [ + 238, + 515, + 375, + 646 + ], + "spans": [ + { + "bbox": [ + 238, + 515, + 375, + 646 + ], + "type": "image", + "image_path": "c4777a0507eee1c1beaa13c04e351827098bc588e6ced9d8de6cff01430e3df5.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 238, + 647, + 353, + 672 + ], + "lines": [ + { + "bbox": [ + 238, + 647, + 353, + 672 + ], + "spans": [ + { + "bbox": [ + 238, + 647, + 353, + 672 + ], + "type": "text", + "content": "(b) Particles produce server " + }, + { + "bbox": [ + 238, + 647, + 353, + 672 + ], + "type": "inline_equation", + "content": "(\\mathrm{D} = 3)" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 380, + 515, + 517, + 646 + ], + "blocks": [ + { + "bbox": [ + 380, + 515, + 517, + 646 + ], + "lines": [ + { + "bbox": [ + 380, + 515, + 517, + 646 + ], + "spans": [ + { + "bbox": [ + 380, + 515, + 517, + 646 + ], + "type": "image", + "image_path": "5dabdfa6b0129921b2abb27785be9608573a25f26e4646d20c6b92250fad1414.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 378, + 647, + 518, + 672 + ], + "lines": [ + { + "bbox": [ + 378, + 647, + 518, + 672 + ], + "spans": [ + { + "bbox": [ + 378, + 647, + 518, + 672 + ], + "type": "text", + "content": "(c) Particles produce by server " + }, + { + "bbox": [ + 378, + 647, + 518, + 672 + ], + "type": "inline_equation", + "content": "(\\mathrm{D} = 1)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 768, + 315, + 776 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 768, + 315, + 776 + ], + "spans": [ + { + "bbox": [ + 295, + 768, + 315, + 776 + ], + "type": "text", + "content": "5/10" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 92, + 53, + 197, + 153 + ], + "blocks": [ + { + "bbox": [ + 92, + 53, + 197, + 153 + ], + "lines": [ + { + "bbox": [ + 92, + 53, + 197, + 153 + ], + "spans": [ + { + "bbox": [ + 92, + 53, + 197, + 153 + ], + "type": "image", + "image_path": "e340d9807d4ed601eee3bd74351618a9adfda026a8dbd720f9d2aa40857dad26.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 91, + 155, + 198, + 174 + ], + "lines": [ + { + "bbox": [ + 91, + 155, + 198, + 174 + ], + "spans": [ + { + "bbox": [ + 91, + 155, + 198, + 174 + ], + "type": "text", + "content": "(a) Particles and sensors (T = 0)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 201, + 53, + 304, + 154 + ], + "blocks": [ + { + "bbox": [ + 201, + 53, + 304, + 154 + ], + "lines": [ + { + "bbox": [ + 201, + 53, + 304, + 154 + ], + "spans": [ + { + "bbox": [ + 201, + 53, + 304, + 154 + ], + "type": "image", + "image_path": "92c09c0b767ea453098777786fb815e8c2f833a8abc790807397f42e9a6f6887.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 203, + 155, + 301, + 163 + ], + "lines": [ + { + "bbox": [ + 203, + 155, + 301, + 163 + ], + "spans": [ + { + "bbox": [ + 203, + 155, + 301, + 163 + ], + "type": "text", + "content": "(b) Sensors update " + }, + { + "bbox": [ + 203, + 155, + 301, + 163 + ], + "type": "inline_equation", + "content": "(\\mathrm{T} = 1)" + }, + { + "bbox": [ + 203, + 155, + 301, + 163 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 308, + 54, + 410, + 154 + ], + "blocks": [ + { + "bbox": [ + 308, + 54, + 410, + 154 + ], + "lines": [ + { + "bbox": [ + 308, + 54, + 410, + 154 + ], + "spans": [ + { + "bbox": [ + 308, + 54, + 410, + 154 + ], + "type": "image", + "image_path": "416dbc943d118d6834e80c8c5ad759b34f940700b96fd0b87bb23c082f10cf10.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 316, + 155, + 402, + 164 + ], + "lines": [ + { + "bbox": [ + 316, + 155, + 402, + 164 + ], + "spans": [ + { + "bbox": [ + 316, + 155, + 402, + 164 + ], + "type": "text", + "content": "(c) First range " + }, + { + "bbox": [ + 316, + 155, + 402, + 164 + ], + "type": "inline_equation", + "content": "(\\mathrm{T} = 2)" + }, + { + "bbox": [ + 316, + 155, + 402, + 164 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 85, + 188, + 303, + 201 + ], + "lines": [ + { + "bbox": [ + 85, + 188, + 303, + 201 + ], + "spans": [ + { + "bbox": [ + 85, + 188, + 303, + 201 + ], + "type": "text", + "content": "Figure 4. Simplification using bandwidth size." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 413, + 54, + 519, + 154 + ], + "blocks": [ + { + "bbox": [ + 413, + 54, + 519, + 154 + ], + "lines": [ + { + "bbox": [ + 413, + 54, + 519, + 154 + ], + "spans": [ + { + "bbox": [ + 413, + 54, + 519, + 154 + ], + "type": "image", + "image_path": "a79e8f2104ab8c32df4137808fc6b4ce9b7465bfa15c2080bd3aabb2c58986d0.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 419, + 155, + 514, + 164 + ], + "lines": [ + { + "bbox": [ + 419, + 155, + 514, + 164 + ], + "spans": [ + { + "bbox": [ + 419, + 155, + 514, + 164 + ], + "type": "text", + "content": "(d) Second range " + }, + { + "bbox": [ + 419, + 155, + 514, + 164 + ], + "type": "inline_equation", + "content": "(\\mathrm{T} = 3)" + }, + { + "bbox": [ + 419, + 155, + 514, + 164 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 86, + 228, + 300, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 228, + 300, + 243 + ], + "spans": [ + { + "bbox": [ + 86, + 228, + 300, + 243 + ], + "type": "text", + "content": "4. EXPERIMENTAL RESULTS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 85, + 255, + 493, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 255, + 493, + 294 + ], + "spans": [ + { + "bbox": [ + 85, + 255, + 493, + 294 + ], + "type": "text", + "content": "The data are extracted from two rooms of the IBM data center. Firstly, we present our method for rendering the room, and later we present our results using Level Of Detail methods." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 86, + 308, + 212, + 322 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 308, + 212, + 322 + ], + "spans": [ + { + "bbox": [ + 86, + 308, + 212, + 322 + ], + "type": "text", + "content": "4.1 Data visualization" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 85, + 331, + 519, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 331, + 519, + 453 + ], + "spans": [ + { + "bbox": [ + 85, + 331, + 519, + 453 + ], + "type": "text", + "content": "We want to visualize and manage the consumption of a data center. For the visualization, we want to use an IFC viewer. But the IFC model for GDC is not available yet. Data center extraction of the room space is for the moment done by hand. The room is empty and was represent by a simple shape a box with 4 meters length, 3 meters width and 2.5 meters height. We use point cloud visualization based on particle paradigm. We use the two rooms of the data center and we put the same number of particles (30000) and 35 sensors distributed on three layers at 1 meter; 2 meter and on the ground. We define high and low temperature regarding the real sensors value. Figure 5(a) presents temperature color scale, Figure 5(b) and Figure 5(c) present data center sensors." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 85, + 463, + 522, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 463, + 522, + 598 + ], + "spans": [ + { + "bbox": [ + 85, + 463, + 522, + 598 + ], + "type": "text", + "content": "The next step is to interpolate data from sensors. For this, we extract the sensor mesh. We use QHULL to produce a soup of tetrahedrons. Particles need to be located. We can determine which tetrahedron is the nearest, we extract the box hull of tetrahedron and we apply for each particle the norms of each tetrahedron face. If these rays cut three or more faces, then particle is inside the tetrahedron. With this method, we can determine exactly the location of each particles regarding to the tetrahedrons, a weight is given to them easily. It was used to apply a coefficient to the value of each vertex of tetrahedron. For the outside particles, another solution was used: Voronoi cells. This method is based on a discrete extraction of Voronoi cells. We use our own method because other method like Voro ++ or QHull extract Voronoi diagram in a continuous way." + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 93, + 696, + 225, + 731 + ], + "blocks": [ + { + "bbox": [ + 93, + 696, + 225, + 731 + ], + "lines": [ + { + "bbox": [ + 93, + 696, + 225, + 731 + ], + "spans": [ + { + "bbox": [ + 93, + 696, + 225, + 731 + ], + "type": "image", + "image_path": "7c729f1706460b0bf0adde7a8dbd071f9ae77f611ef3f85b3d951fb62795016c.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 244, + 605, + 371, + 712 + ], + "blocks": [ + { + "bbox": [ + 244, + 605, + 371, + 712 + ], + "lines": [ + { + "bbox": [ + 244, + 605, + 371, + 712 + ], + "spans": [ + { + "bbox": [ + 244, + 605, + 371, + 712 + ], + "type": "image", + "image_path": "8c16ad7a59f34e5974a4322466cec0149f8905a1a1b1cfadff910a3fec50d004.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 268, + 718, + 334, + 730 + ], + "lines": [ + { + "bbox": [ + 268, + 718, + 334, + 730 + ], + "spans": [ + { + "bbox": [ + 268, + 718, + 334, + 730 + ], + "type": "text", + "content": "(b) Room one." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 394, + 606, + 520, + 712 + ], + "blocks": [ + { + "bbox": [ + 394, + 606, + 520, + 712 + ], + "lines": [ + { + "bbox": [ + 394, + 606, + 520, + 712 + ], + "spans": [ + { + "bbox": [ + 394, + 606, + 520, + 712 + ], + "type": "image", + "image_path": "2eb0e7b79700cbbd72a8a244992163f567dbbd68f9d3db6bad3e5808cda4dabb.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 423, + 718, + 489, + 730 + ], + "lines": [ + { + "bbox": [ + 423, + 718, + 489, + 730 + ], + "spans": [ + { + "bbox": [ + 423, + 718, + 489, + 730 + ], + "type": "text", + "content": "(c) Room two." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 768, + 315, + 776 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 768, + 315, + 776 + ], + "spans": [ + { + "bbox": [ + 295, + 768, + 315, + 776 + ], + "type": "text", + "content": "6/10" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 87, + 78, + 199, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 78, + 199, + 92 + ], + "spans": [ + { + "bbox": [ + 87, + 78, + 199, + 92 + ], + "type": "text", + "content": "4.2 Level of details" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 85, + 100, + 525, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 100, + 525, + 291 + ], + "spans": [ + { + "bbox": [ + 85, + 100, + 525, + 291 + ], + "type": "text", + "content": "In the earlier days of this project, first solution proposed gives a low frame rates, about 15 FPS (Frame Per Second): visualization was not in real-time (real-time is about 24 FPS). For solving this problem, we define a client server paradigm. This solution allows to produce a real-time rendering on the client. Figure ?? gives an example of LOD for particles. We use Openscenegraph " + }, + { + "bbox": [ + 85, + 100, + 525, + 291 + ], + "type": "inline_equation", + "content": "^{20}" + }, + { + "bbox": [ + 85, + 100, + 525, + 291 + ], + "type": "text", + "content": " as a 3D engine. It owns several features useful in LOD. A special object is defined to manage multi-resolution model. It calculates the distance of the object from the camera. For our experimentation we use five resolutions of mesh. The first mesh was the original mesh, it is set at 0 to 500. The next mesh was set at 500 to 1000, the next at 1000 to 1500 and the other at 1500 to 2000. These three meshes were constructed by specific LOD methods: clustering and significant vertices. Clustering defines a 3D grid inside the room. The size of each cell depends on the viewpoint location. The size of the cluster depends on the visibility of the clustered particles. First results are given Figure 6(a) and 6(b). Value of cluster is an average of clustered value. The number of points of the final mesh depends on the grid size. Table 1 shows the results at several distances." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 88, + 297, + 523, + 357 + ], + "blocks": [ + { + "bbox": [ + 88, + 297, + 523, + 357 + ], + "lines": [ + { + "bbox": [ + 88, + 297, + 523, + 357 + ], + "spans": [ + { + "bbox": [ + 88, + 297, + 523, + 357 + ], + "type": "table", + "html": "
D = 0 to 500D = 500 to 1000D = 1000 to 1500D = 1500 to 2000
C = X30000390024036
", + "image_path": "389ef0f63a90171c14db5cd9a925792e376222c8c56a5801cacb3dcc99360c96.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 97, + 403, + 306, + 559 + ], + "blocks": [ + { + "bbox": [ + 86, + 52, + 272, + 65 + ], + "lines": [ + { + "bbox": [ + 86, + 52, + 272, + 65 + ], + "spans": [ + { + "bbox": [ + 86, + 52, + 272, + 65 + ], + "type": "text", + "content": "Figure 5. Data use to model the system." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 97, + 403, + 306, + 559 + ], + "lines": [ + { + "bbox": [ + 97, + 403, + 306, + 559 + ], + "spans": [ + { + "bbox": [ + 97, + 403, + 306, + 559 + ], + "type": "image", + "image_path": "9fbe1142edda7eab13f0abb9592b84e63d1611257e7426d7d569a68b9df82b38.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 144, + 559, + 260, + 571 + ], + "lines": [ + { + "bbox": [ + 144, + 559, + 260, + 571 + ], + "spans": [ + { + "bbox": [ + 144, + 559, + 260, + 571 + ], + "type": "text", + "content": "(a) " + }, + { + "bbox": [ + 144, + 559, + 260, + 571 + ], + "type": "inline_equation", + "content": "\\mathrm{D} = 500" + }, + { + "bbox": [ + 144, + 559, + 260, + 571 + ], + "type": "text", + "content": " to 1000." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 85, + 586, + 301, + 599 + ], + "lines": [ + { + "bbox": [ + 85, + 586, + 301, + 599 + ], + "spans": [ + { + "bbox": [ + 85, + 586, + 301, + 599 + ], + "type": "text", + "content": "Figure 6. Clustering visualization algorithms." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 309, + 403, + 518, + 559 + ], + "blocks": [ + { + "bbox": [ + 86, + 383, + 294, + 396 + ], + "lines": [ + { + "bbox": [ + 86, + 383, + 294, + 396 + ], + "spans": [ + { + "bbox": [ + 86, + 383, + 294, + 396 + ], + "type": "text", + "content": "Table 1. Results of clustering simplification." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 309, + 403, + 518, + 559 + ], + "lines": [ + { + "bbox": [ + 309, + 403, + 518, + 559 + ], + "spans": [ + { + "bbox": [ + 309, + 403, + 518, + 559 + ], + "type": "image", + "image_path": "69ae709df8988787ff641b5b6eb5a608ca0636bba7110a3a4735064168d117da.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 352, + 559, + 474, + 571 + ], + "lines": [ + { + "bbox": [ + 352, + 559, + 474, + 571 + ], + "spans": [ + { + "bbox": [ + 352, + 559, + 474, + 571 + ], + "type": "text", + "content": "(b) " + }, + { + "bbox": [ + 352, + 559, + 474, + 571 + ], + "type": "inline_equation", + "content": "D = 1000" + }, + { + "bbox": [ + 352, + 559, + 474, + 571 + ], + "type": "text", + "content": " to 1500." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 85, + 599, + 516, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 599, + 516, + 681 + ], + "spans": [ + { + "bbox": [ + 85, + 599, + 516, + 681 + ], + "type": "text", + "content": "Significant points method extracts the neighbors for each particle. We extract the highest and lowest temperatures, by exploring the neighborhood of a particle, in order to have significant vertices of the model. For the first step of simplified model we explore neighbor. For the second model, we explore neighbor and neighbor of neighbor, etc. This solution simplifies drastically the model. First results are given Figure ??-??. Table 2 shows the number of vertices at several distance." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 768, + 316, + 776 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 768, + 316, + 776 + ], + "spans": [ + { + "bbox": [ + 295, + 768, + 316, + 776 + ], + "type": "text", + "content": "7/10" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 88, + 49, + 523, + 109 + ], + "blocks": [ + { + "bbox": [ + 88, + 49, + 523, + 109 + ], + "lines": [ + { + "bbox": [ + 88, + 49, + 523, + 109 + ], + "spans": [ + { + "bbox": [ + 88, + 49, + 523, + 109 + ], + "type": "table", + "html": "
D = 0 to 500D = 500 to 1000D = 1000 to 1500D = 1500 to 2000
C = X300002295045543524
", + "image_path": "3cd8a951d8771e165427c2faca0d2508e4c8cef94fb244dc3a04f2ca031996a3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 94, + 155, + 304, + 319 + ], + "blocks": [ + { + "bbox": [ + 94, + 155, + 304, + 319 + ], + "lines": [ + { + "bbox": [ + 94, + 155, + 304, + 319 + ], + "spans": [ + { + "bbox": [ + 94, + 155, + 304, + 319 + ], + "type": "image", + "image_path": "4050e8f862ac40be7bc6d5c239997192325936e244911b8d69fe60a4ab8810b8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 139, + 319, + 258, + 332 + ], + "lines": [ + { + "bbox": [ + 139, + 319, + 258, + 332 + ], + "spans": [ + { + "bbox": [ + 139, + 319, + 258, + 332 + ], + "type": "text", + "content": "(a) Neighborhood 1." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 85, + 349, + 371, + 363 + ], + "lines": [ + { + "bbox": [ + 85, + 349, + 371, + 363 + ], + "spans": [ + { + "bbox": [ + 85, + 349, + 371, + 363 + ], + "type": "text", + "content": "Figure 7. Clustering visualization algorithms using neighbor." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 306, + 155, + 516, + 319 + ], + "blocks": [ + { + "bbox": [ + 306, + 155, + 516, + 319 + ], + "lines": [ + { + "bbox": [ + 306, + 155, + 516, + 319 + ], + "spans": [ + { + "bbox": [ + 306, + 155, + 516, + 319 + ], + "type": "image", + "image_path": "dafb2130c8265a91a7faed53db3de1181cd524770896acda68d5a953df004f87.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 351, + 319, + 470, + 332 + ], + "lines": [ + { + "bbox": [ + 351, + 319, + 470, + 332 + ], + "spans": [ + { + "bbox": [ + 351, + 319, + 470, + 332 + ], + "type": "text", + "content": "(b) Neighborhood 2." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 85, + 373, + 517, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 373, + 517, + 440 + ], + "spans": [ + { + "bbox": [ + 85, + 373, + 517, + 440 + ], + "type": "text", + "content": "The first server solution receives orders from client as presented Section 3.4. We calculate the viewpoint distance and we send data according to it. A new structure is recalculated if the camera is too far from the object. After the recomputing, we send the new data. This solution allows the user to receive more or less data according to its distance to the object. Table 3 shows some different resolutions produced with this method." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 88, + 449, + 523, + 507 + ], + "blocks": [ + { + "bbox": [ + 85, + 135, + 290, + 148 + ], + "lines": [ + { + "bbox": [ + 85, + 135, + 290, + 148 + ], + "spans": [ + { + "bbox": [ + 85, + 135, + 290, + 148 + ], + "type": "text", + "content": "Table 2. Results of neighbor simplification." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 88, + 449, + 523, + 507 + ], + "lines": [ + { + "bbox": [ + 88, + 449, + 523, + 507 + ], + "spans": [ + { + "bbox": [ + 88, + 449, + 523, + 507 + ], + "type": "table", + "html": "
D = 0 to 500D = 500 to 1000D = 1000 to 1500D = 1500 to 2000
C = X1200003000075001875
", + "image_path": "9434d9911bf9b1660b16b84b9672b9a68ac66eec53e61ed383a2c752f12025a2.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 85, + 534, + 260, + 545 + ], + "lines": [ + { + "bbox": [ + 85, + 534, + 260, + 545 + ], + "spans": [ + { + "bbox": [ + 85, + 534, + 260, + 545 + ], + "type": "text", + "content": "Table 3. Several resolution of model." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 85, + 548, + 522, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 548, + 522, + 614 + ], + "spans": [ + { + "bbox": [ + 85, + 548, + 522, + 614 + ], + "type": "text", + "content": "Another solution is to use bandwidth latency. We send data at several times, we do not send the entire set of data but only modified particles. We send at first time the sensors data, and subsequently we send a range of data (the nearest). After few minutes, all data are sent. This solution gives good results, and simulates a thermal diffusion in the whole structure of particles. Figure 8(a)-8(c) illustrate this method." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 91, + 617, + 233, + 723 + ], + "blocks": [ + { + "bbox": [ + 91, + 617, + 233, + 723 + ], + "lines": [ + { + "bbox": [ + 91, + 617, + 233, + 723 + ], + "spans": [ + { + "bbox": [ + 91, + 617, + 233, + 723 + ], + "type": "image", + "image_path": "2fcee6efa3b1d3c44144e2639f3857a15c4079f838c4f9132953fd427a8b1be9.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 143, + 722, + 182, + 731 + ], + "lines": [ + { + "bbox": [ + 143, + 722, + 182, + 731 + ], + "spans": [ + { + "bbox": [ + 143, + 722, + 182, + 731 + ], + "type": "text", + "content": "(a) " + }, + { + "bbox": [ + 143, + 722, + 182, + 731 + ], + "type": "inline_equation", + "content": "\\mathrm{T} = 0" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 235, + 617, + 377, + 723 + ], + "blocks": [ + { + "bbox": [ + 235, + 617, + 377, + 723 + ], + "lines": [ + { + "bbox": [ + 235, + 617, + 377, + 723 + ], + "spans": [ + { + "bbox": [ + 235, + 617, + 377, + 723 + ], + "type": "image", + "image_path": "b2be70bd4cc7d261ac38e8bc653a0c09c537d49d2a5612ed5c067058a98ef463.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 286, + 722, + 325, + 731 + ], + "lines": [ + { + "bbox": [ + 286, + 722, + 325, + 731 + ], + "spans": [ + { + "bbox": [ + 286, + 722, + 325, + 731 + ], + "type": "text", + "content": "(b) " + }, + { + "bbox": [ + 286, + 722, + 325, + 731 + ], + "type": "inline_equation", + "content": "\\mathrm{T} = 1" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 378, + 617, + 520, + 723 + ], + "blocks": [ + { + "bbox": [ + 378, + 617, + 520, + 723 + ], + "lines": [ + { + "bbox": [ + 378, + 617, + 520, + 723 + ], + "spans": [ + { + "bbox": [ + 378, + 617, + 520, + 723 + ], + "type": "image", + "image_path": "0d4e22f04d3ea25761125c93fc6d70fe856bf840ef353c043f32d21c9633724e.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 430, + 722, + 468, + 731 + ], + "lines": [ + { + "bbox": [ + 430, + 722, + 468, + 731 + ], + "spans": [ + { + "bbox": [ + 430, + 722, + 468, + 731 + ], + "type": "text", + "content": "(c) " + }, + { + "bbox": [ + 430, + 722, + 468, + 731 + ], + "type": "inline_equation", + "content": "\\mathrm{T} = 4" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 768, + 315, + 776 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 768, + 315, + 776 + ], + "spans": [ + { + "bbox": [ + 295, + 768, + 315, + 776 + ], + "type": "text", + "content": "8/10" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 87, + 91, + 208, + 107 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 91, + 208, + 107 + ], + "spans": [ + { + "bbox": [ + 87, + 91, + 208, + 107 + ], + "type": "text", + "content": "5. CONCLUSION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 86, + 118, + 520, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 118, + 520, + 199 + ], + "spans": [ + { + "bbox": [ + 86, + 118, + 520, + 199 + ], + "type": "text", + "content": "In this paper, we have presented a method to visualize sensors data extracted from a Green Data Center. This approach produces interpolation visualization for managing and visualizing data. This interpolation used a Delaunay triangulation and a cell extraction based on Voronoi. An unusual way of use particles helps to process data. First results present the solution proposed to visualize the inside of a GDC space. The second results proposed in this paper aim to improve the rendering." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 86, + 200, + 522, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 200, + 522, + 294 + ], + "spans": [ + { + "bbox": [ + 86, + 200, + 522, + 294 + ], + "type": "text", + "content": "For this, first step introduces a client/server protocol a second step illustrates methods to simplify the model. With these different approaches we improve the rendering time, preserving most important data are kept. In future works, we will work on data \"dressing\". We want to find a way to improve rendering of the scene using meatballs or marching cube algorithms. A main constraint of this work is real-time computation. Future work also concern to add rooms to the visualization. At present, we only visualize a single room. We want to visualize building, and complex form, by using an IFC loader." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 87, + 319, + 257, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 319, + 257, + 335 + ], + "spans": [ + { + "bbox": [ + 87, + 319, + 257, + 335 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 85, + 347, + 512, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 347, + 512, + 401 + ], + "spans": [ + { + "bbox": [ + 85, + 347, + 512, + 401 + ], + "type": "text", + "content": "We want to thanks the PSSC (Products and Solutions Support Center) team of IBM Montpellier for having provided the necessary equipment and data need for this experimentation. And we thank the FUI (Fonds Unique Interministriel) for their financial support." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 87, + 427, + 190, + 442 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 427, + 190, + 442 + ], + "spans": [ + { + "bbox": [ + 87, + 427, + 190, + 442 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 86, + 454, + 522, + 738 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 86, + 454, + 455, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 454, + 455, + 479 + ], + "spans": [ + { + "bbox": [ + 86, + 454, + 455, + 479 + ], + "type": "text", + "content": "[1] Clark, J. H., \"Hierarchical geometric models for visible surface algorithms,\" Communications of the ACM 19(10), 547-554 (1976)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 87, + 482, + 495, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 482, + 495, + 507 + ], + "spans": [ + { + "bbox": [ + 87, + 482, + 495, + 507 + ], + "type": "text", + "content": "[2] Damon, M., Kameyama, M., Knox, M., Porter, D., Yuen, D., and Sevre, E., \"Interactive visualization of 3d mantle convection,\" Visual Geosciences (2008)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 87, + 509, + 508, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 509, + 508, + 534 + ], + "spans": [ + { + "bbox": [ + 87, + 509, + 508, + 534 + ], + "type": "text", + "content": "[3] Jordan, K. E., Yuen, D. A., Reuteler, D. M., Zhang, S., and Haimes, R., \"Parallel interactive visualization of 3d mantle convection,\" IEEE Comput. Sci. Eng. 3(4), 29-37 (1996)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 87, + 536, + 516, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 536, + 516, + 562 + ], + "spans": [ + { + "bbox": [ + 87, + 536, + 516, + 562 + ], + "type": "text", + "content": "[4] Reeves, W. T., \"Particle systems - a technique for modeling a class of fuzzy objects,\" ACM Transactions on Graphics 2, 359-376 (1983)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 88, + 563, + 349, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 563, + 349, + 575 + ], + "spans": [ + { + "bbox": [ + 88, + 563, + 349, + 575 + ], + "type": "text", + "content": "[5] Latta, L., \"Building a million particle system,\" (2004)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 87, + 576, + 479, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 576, + 479, + 602 + ], + "spans": [ + { + "bbox": [ + 87, + 576, + 479, + 602 + ], + "type": "text", + "content": "[6] Kapferer, W. and Riser, T., \"Visualization needs and techniques for astrophysical simulations,\" New Journal of Physics 10(12), 125008 (15pp) (2008)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 87, + 604, + 512, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 604, + 512, + 628 + ], + "spans": [ + { + "bbox": [ + 87, + 604, + 512, + 628 + ], + "type": "text", + "content": "[7] Schroeder, W. J., Zarge, J. A., and Lorensen, W. E., \"Decimation of triangle meshes,\" 65-70 (1992)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 87, + 631, + 425, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 631, + 425, + 643 + ], + "spans": [ + { + "bbox": [ + 87, + 631, + 425, + 643 + ], + "type": "text", + "content": "[8] Luebke, D., \"A survey of polygonal simplification algorithms,\" (1997)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 87, + 644, + 522, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 644, + 522, + 670 + ], + "spans": [ + { + "bbox": [ + 87, + 644, + 522, + 670 + ], + "type": "text", + "content": "[9] He, T., Hong, L., Kaufman, A., Varshney, A., and Wang, S., \"Voxel based object simplification,\" in [Proc. SIGGRAPH Symposium on Interactive 3D Graphics], 296-303 (1995)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 87, + 671, + 474, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 671, + 474, + 696 + ], + "spans": [ + { + "bbox": [ + 87, + 671, + 474, + 696 + ], + "type": "text", + "content": "[10] Lorensen, W. E. and Cline, H. E., \"Marching cubes: A high resolution 3d surface construction algorithm,\" SIGGRAPH Comput. Graph. 21(4), 163-169 (1987)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 87, + 698, + 484, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 698, + 484, + 723 + ], + "spans": [ + { + "bbox": [ + 87, + 698, + 484, + 723 + ], + "type": "text", + "content": "[11] Pauly, M., Gross, M., and Kobbelt, L. P., \"Efficient simplification of point-sampled surfaces,\" (2002)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 87, + 725, + 516, + 738 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 725, + 516, + 738 + ], + "spans": [ + { + "bbox": [ + 87, + 725, + 516, + 738 + ], + "type": "text", + "content": "[12] Moenning, C., , Moenning, C., and Dodgson, N. A., \"Intrinsic point cloud simplification,\"" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 86, + 52, + 253, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 52, + 253, + 64 + ], + "spans": [ + { + "bbox": [ + 86, + 52, + 253, + 64 + ], + "type": "text", + "content": "Figure 8. Bandwidth simplification." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 768, + 315, + 776 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 768, + 315, + 776 + ], + "spans": [ + { + "bbox": [ + 294, + 768, + 315, + 776 + ], + "type": "text", + "content": "9/10" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 87, + 52, + 520, + 294 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 87, + 52, + 119, + 63 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 52, + 119, + 63 + ], + "spans": [ + { + "bbox": [ + 87, + 52, + 119, + 63 + ], + "type": "text", + "content": "(2004)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 87, + 65, + 488, + 104 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 65, + 488, + 104 + ], + "spans": [ + { + "bbox": [ + 87, + 65, + 488, + 104 + ], + "type": "text", + "content": "[13] Song, H. and Feng, H.-Y., \"A progressive point cloud simplification algorithm with preserved sharp edge data,\" The International Journal of Advanced Manufacturing Technology 45, 583-592 (November 2009)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 87, + 106, + 514, + 131 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 106, + 514, + 131 + ], + "spans": [ + { + "bbox": [ + 87, + 106, + 514, + 131 + ], + "type": "text", + "content": "[14] Buschmann, C., Pfisterer, D., Fischer, S., Fekete, S. P., and Kröller, A., \"Spyglass: a wireless sensor network visualizer,\" SIGBED Rev. 2(1), 1-6 (2005)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 87, + 133, + 488, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 133, + 488, + 158 + ], + "spans": [ + { + "bbox": [ + 87, + 133, + 488, + 158 + ], + "type": "text", + "content": "[15] Avis, D. and Bhattacharya, B., \"Algorithms for computing d-dimensional voronoi diagrams and their duals,\" 1, 159-180 (1983)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 88, + 159, + 520, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 159, + 520, + 186 + ], + "spans": [ + { + "bbox": [ + 88, + 159, + 520, + 186 + ], + "type": "text", + "content": "[16] Rycroft, C. H., \"Voro++: a three-dimensional voronoi cell library in " + }, + { + "bbox": [ + 88, + 159, + 520, + 186 + ], + "type": "inline_equation", + "content": "c++" + }, + { + "bbox": [ + 88, + 159, + 520, + 186 + ], + "type": "text", + "content": ",\" Chaos 19 (2009). Lawrence Berkeley National Laboratory." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 87, + 187, + 501, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 187, + 501, + 212 + ], + "spans": [ + { + "bbox": [ + 87, + 187, + 501, + 212 + ], + "type": "text", + "content": "[17] Barber, C. B., Dobkin, D. P., and Huhdanpaa, H., \"The quickhull algorithm for convex hulls,\" ACM Trans. Math. Softw. 22(4), 469-483 (1996)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 87, + 214, + 473, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 214, + 473, + 239 + ], + "spans": [ + { + "bbox": [ + 87, + 214, + 473, + 239 + ], + "type": "text", + "content": "[18] Snyder, J. M. and Barr, A. H., \"Ray tracing complex models containing surface tessellations,\" SIGGRAPH Comput. Graph. 21(4), 119-128 (1987)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 88, + 241, + 511, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 241, + 511, + 266 + ], + "spans": [ + { + "bbox": [ + 88, + 241, + 511, + 266 + ], + "type": "text", + "content": "[19] Hoppe, H., \"Progressive meshes. computer graphics,\" SIGGRAPH96 Proceedings, 99108 (1996)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 87, + 268, + 473, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 268, + 473, + 294 + ], + "spans": [ + { + "bbox": [ + 87, + 268, + 473, + 294 + ], + "type": "text", + "content": "[20] Burns, D. and Osfield, R., \"Open scene graph a: Introduction, b: Examples and applications,\" 265 (2004)." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 102, + 312, + 238, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 312, + 238, + 323 + ], + "spans": [ + { + "bbox": [ + 102, + 312, + 238, + 323 + ], + "type": "text", + "content": "Further author information:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 102, + 326, + 294, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 326, + 294, + 338 + ], + "spans": [ + { + "bbox": [ + 102, + 326, + 294, + 338 + ], + "type": "text", + "content": "Lange B.: E-mail: benoit.lange@lirmm.fr" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 102, + 339, + 332, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 339, + 332, + 351 + ], + "spans": [ + { + "bbox": [ + 102, + 339, + 332, + 351 + ], + "type": "text", + "content": "Rodriguez N.: E-mail: nancy.rodriguez@lirmm.fr" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 102, + 353, + 304, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 353, + 304, + 364 + ], + "spans": [ + { + "bbox": [ + 102, + 353, + 304, + 364 + ], + "type": "text", + "content": "Puech W.: E-mail: william.puech@lirmm.fr" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 102, + 366, + 286, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 366, + 286, + 378 + ], + "spans": [ + { + "bbox": [ + 102, + 366, + 286, + 378 + ], + "type": "text", + "content": "Rey H.: E-mail:REYHERVE@fr.ibm.com" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 102, + 380, + 323, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 380, + 323, + 392 + ], + "spans": [ + { + "bbox": [ + 102, + 380, + 323, + 392 + ], + "type": "text", + "content": "Vasques X.: E-mail: xaviervasques@fr.ibm.com" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 768, + 317, + 776 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 768, + 317, + 776 + ], + "spans": [ + { + "bbox": [ + 294, + 768, + 317, + 776 + ], + "type": "text", + "content": "10/10" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09277/39fed2ad-9645-4fde-a1c3-86b0c99b7b36_content_list.json b/data/2025/2503_09xxx/2503.09277/39fed2ad-9645-4fde-a1c3-86b0c99b7b36_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..b405f0da66db10cb3e2fb23117aa4614922f68d3 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/39fed2ad-9645-4fde-a1c3-86b0c99b7b36_content_list.json @@ -0,0 +1,1878 @@ +[ + { + "type": "text", + "text": "UniCombine: Unified Multi-Conditional Combination with Diffusion Transformer", + "text_level": 1, + "bbox": [ + 225, + 130, + 772, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Haoxuan Wang $^{1\\dagger}$ , Jinlong Peng $^{2\\dagger}$ , Qingdong He $^{2}$ , Hao Yang $^{3}$ , Ying Jin $^{1}$ , Jiafu Wu $^{2}$ , Xiaobin Hu $^{2}$ , Yanjie Pan $^{1}$ , Zhenye Gan $^{2}$ , Mingmin Chi $^{1*}$ , Bo Peng $^{4*}$ , Yabiao Wang $^{2,5*}$", + "bbox": [ + 156, + 189, + 836, + 226 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Fudan University, $^{2}$ Tencent Youtu Lab, $^{3}$ Shanghai Jiao Tong University, $^{4}$ Shanghai Ocean University $^{5}$ Zhejiang University", + "bbox": [ + 132, + 227, + 864, + 244 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://github.com/Xuan-World/UniCombine", + "bbox": [ + 256, + 262, + 732, + 279 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/2c8007f4046d1a3e78dbaf014b7a491dbbbf9b78ef27cf90ee2c58035b246a98.jpg", + "image_caption": [ + "Figure 1. Fantastic results of our proposed UniCombine on multi-conditional controllable generation: (a) Subject-Insertion task. (b) and (c) Subject-Spatial task. (d) Multi-Spatial task. Our unified framework effectively handles any combination of input conditions and achieves remarkable alignment with all of them, including but not limited to text prompts, spatial maps, and subject images." + ], + "image_footnote": [], + "bbox": [ + 96, + 292, + 898, + 684 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 248, + 747, + 326, + 762 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "With the rapid development of diffusion models in image generation, the demand for more powerful and flexible controllable frameworks is increasing. Although existing methods can guide generation beyond text prompts, the challenge of effectively combining multiple conditional inputs", + "bbox": [ + 89, + 781, + 482, + 857 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "while maintaining consistency with all of them remains unsolved. To address this, we introduce UniCombine, a DiT-based multi-conditional controllable generative framework capable of handling any combination of conditions, including but not limited to text prompts, spatial maps, and subject images. Specifically, we introduce a novel Conditional MMDiT Attention mechanism and incorporate a trainable LoRA module to build both the training-free and training-based versions. Additionally, we propose a new pipeline to construct SubjectSpatial200K, the first dataset", + "bbox": [ + 511, + 748, + 906, + 900 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.09277v2 [cs.CV] 8 Jul 2025", + "bbox": [ + 22, + 287, + 57, + 709 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$\\dagger$ Equal contribution.", + "bbox": [ + 114, + 875, + 227, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Corresponding author.", + "bbox": [ + 114, + 888, + 245, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "designed for multi-conditional generative tasks covering both the subject-driven and spatially-aligned conditions. Extensive experimental results on multi-conditional generation demonstrate the outstanding universality and powerful capability of our approach with state-of-the-art performance.", + "bbox": [ + 88, + 90, + 480, + 181 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 89, + 210, + 222, + 226 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "With the advancement of diffusion-based [13, 42] text-to-image generative technology, a series of single-conditional controllable generative frameworks like ControlNet [59], T2I-Adapter [31], IP-Adapter [58], and InstantID [47] have expanded the scope of the control signals from text prompts to image conditions. It allows users to control more plentiful aspects of the generated images, such as layout, style, characteristics, etc. These conventional approaches are specifically designed for the UNet [38] backbone of Latent Diffusion Models (LDM) [37] with dedicated control networks. Besides, some recent approaches, such as Omini-Control [45], integrate control signals into the Diffusion Transformer (DiT) [7, 23] architecture, which demonstrates superior performance compared to the UNet in LDM.", + "bbox": [ + 91, + 234, + 483, + 446 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Although the methods mentioned above have achieved a promising single-conditional performance, the challenge of multi-conditional controllable generation is still unsolved. Previous multi-conditional generative methods like UniControl [35] and UniControlNet [60] are generally restricted to handling spatial conditions like Canny or Depth maps and fail to accommodate subject conditions, resulting in limited applicable scenarios. Despite the recently proposed Ctrl-X [27] features controlling structure and appearance together, its performance is unsatisfactory and supports only a limited combination of conditions.", + "bbox": [ + 91, + 448, + 483, + 613 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Moreover, we assume that many existing generative tasks can be viewed as a multi-conditional generation, such as virtual try-on [5, 17], object insertion [3, 51], style transfer [15, 33, 52], spatially-aligned customization [20, 21, 25, 27], etc. Consequently, there is a need for a unified framework to encompass these generative tasks in a way of multi-conditional generation. This framework should ensure consistency with all input constraints, including subject ID preservation, spatial structural alignment, background coherence, and style uniformity.", + "bbox": [ + 91, + 614, + 483, + 763 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To achieve this, we propose UniCombine, a powerful and universal framework that offers several key advantages: Firstly, our framework is capable of simultaneously handling any combination of conditions, including but not limited to text prompts, spatial maps, and subject images. Specifically, we introduce a novel Conditional MMDiT Attention mechanism and incorporate a trainable Denoising-LoRA module to build both the training-free and training-based versions. By integrating multiple pre", + "bbox": [ + 89, + 765, + 480, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "trained Condition-LoRA module weights into the conditional branches, UniCombine achieves excellent training-free performance, which can be improved further after training on the task-specific multi-conditional dataset. Secondly, due to the lack of a publicly available dataset for multi-conditional generative tasks, we build the SubjectSpatial200K dataset to serve as the training dataset and the testing benchmark. Specifically, we generate the subject grounding annotations and spatial map annotations for all the data samples from Subjects200K [45] and therefore formulate our SubjectSpatial200K dataset. Thirdly, our UniCombine can achieve many unprecedented multi-conditional combinations, as shown in Fig. 1, such as combining a reference subject image with the inpainting area of a background image or with the layout guidance of a depth (or canny) map while imposing precise control via text prompt. Furthermore, extensive experiments on Subject-Insertion, Subject-Spatial, and Multi-Spatial conditional generation demonstrate the outstanding universality and powerful capability of our method against other existing specialized approaches.", + "bbox": [ + 511, + 90, + 903, + 409 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, we highlight our contributions as follows:", + "bbox": [ + 529, + 411, + 893, + 426 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We present UniCombine, a DiT-based multi-conditional controllable generative framework capable of handling any combination of conditions, including but not limited to text prompts, spatial maps, and subject images.", + "- We construct the SubjectSpatial200K dataset, which encompasses both subject-driven and spatially-aligned conditions for all text-image sample pairs. It addresses the absence of a publicly available dataset for training and testing multi-conditional controllable generative models.", + "- We conduct extensive experiments on Subject-Insertion, Subject-Spatial, and Multi-Spatial conditional generative tasks. The experimental results demonstrate the state-of-the-art performance of our UniCombine, which effectively aligns with all conditions harmoniously." + ], + "bbox": [ + 513, + 430, + 903, + 642 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 513, + 665, + 653, + 681 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Diffusion-Based Models", + "text_level": 1, + "bbox": [ + 513, + 693, + 733, + 709 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Diffusion-based [13, 42] models have demonstrated superior performance than GAN-based [9] ones across various domains, including controllable generation [18, 31, 47, 58, 59], image editing [11, 30, 39], customized generation [8, 22, 40], object insertion [4, 43, 56], mask-guided inpainting [19, 48, 61], and so on. These breakthroughs begin with the LDM [37] and are further advanced with the DiT [32] architecture. The latest text-to-image generative models, SD3 [7] and FLUX [23], have attained state-of-the-art results by employing the Rectified Flow [28, 29] training strategy, the RPE [44] positional embedding and the MultiModal Diffusion Transformer (MMDiT) [7] architecture.", + "bbox": [ + 511, + 719, + 903, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e21e3bf9a4a78294b155fc81f3e6b7e3e4251e119c112d81fb0f47a4822439d5.jpg", + "image_caption": [ + "(a) Overall Framework" + ], + "image_footnote": [], + "bbox": [ + 93, + 87, + 321, + 335 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/3162d24f03eacb746bd4ecce492028784582c1beb179bc5cf91d96a185a56eec.jpg", + "image_caption": [ + "(b) Single-Conditional Setting" + ], + "image_footnote": [], + "bbox": [ + 328, + 87, + 545, + 335 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/7fa892b46091963fb1ee53c8e239a5cf468404dfb70774813a489096638da93b.jpg", + "image_caption": [ + "(c) Multi-Conditional Setting", + "Figure 2. Overview of our proposed UniCombine. (a) The overall framework. We regard the MMDiT-based diffusion models as consisting of the text branch and the denoising branch. Based on it, our UniCombine introduces multiple conditional branches to process the input conditions. (b) The single-conditional setting of our UniCombine. It is equivalent to OminiControl [45] which is a special case of our proposed UniCombine framework under a single-conditional setting. (c) The multi-conditional setting of our UniCombine. Our LoRA Switching module adaptively activates the pre-trained Condition-LoRA modules on the weights of the denoising branch according to the conditional types. The proposed Conditional MMDiT Attention mechanism is used to replace the original MMDiT Attention mechanism for handling the unified multi-conditional input sequence. Whether to load the optional Denoising-LoRA module is the difference between the training-free and training-based versions." + ], + "image_footnote": [], + "bbox": [ + 552, + 88, + 905, + 335 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Controllable Generation", + "text_level": 1, + "bbox": [ + 89, + 484, + 313, + 498 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Controllable generation allows for customizing the desired spatial layout, filter style, or subject appearance in the generated images. A series of methods such as ControlNet [59], T2I-Adapter [31], GLIGEN [26], and ZestGuide [6] successfully introduce the spatial conditions into controllable generation, enabling models to control the spatial layout of generated images. Another series of methods, such as IP-Adapter [58], InstantID [47], BLIP-Diffusion [24], and StyleDrop [41] incorporate the subject conditions into controllable generation, ensuring consistency between generated images and reference images in style, characteristics, subject appearance, etc. To unify these two tasks, OminiControl [45] proposes a novel MMDiT-based controllable framework to handle various conditions with a unified pipeline. Unfortunately, it lacks the capability to control generation with multiple conditions. To this end, we propose UniCombine, which successfully extends this framework to multi-conditional scenarios.", + "bbox": [ + 91, + 506, + 483, + 777 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Multi-Conditional Controllable Generation", + "text_level": 1, + "bbox": [ + 89, + 787, + 459, + 801 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As controllable generation advances, merely providing a single condition to guide the image generation no longer satisfies the needs. As a result, research on multi-conditional controllable generation has emerged. Existing methods like UniControl [35], UniControlNet [60] and Cocktail [14] exhibit acceptable performance when simul", + "bbox": [ + 89, + 810, + 483, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "taneously leveraging multiple spatial conditions for image generation. However, there is a lack of multi-conditional generative models that support utilizing both spatial conditions and subject conditions to guide the generative process together. Although the recently proposed method Ctrl-X [27] features controlling the appearance and structure simultaneously, its performance remains unsatisfactory with a limited combination of conditions and it is not compatible with the Diffusion Transformer architecture. To address the aforementioned limitations, we propose UniCombine to enable the flexible combination of various control signals.", + "bbox": [ + 511, + 484, + 906, + 651 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 513, + 678, + 604, + 694 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Preliminary", + "text_level": 1, + "bbox": [ + 511, + 708, + 640, + 723 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this work, we mainly explore the latest generative models that utilize the Rectified Flow (RF) [28, 29] training strategy and the MMDiT [7] backbone architecture, like FLUX [23] and SD3 [7]. For the source noise distribution $X_0 \\sim p_{\\mathrm{noise}}$ and the target image distribution $X_1 \\sim p_{\\mathrm{data}}$ , the RF defines a linear interpolation between them as $X_t = (1 - t)X_0 + tX_1$ for $t \\in [0,1]$ . The training objective is to learn a time-dependent vector field $v_t(X_t, t; \\theta)$ that describes the trajectory of the ODE $dX_t = v_t(X_t, t; \\theta)dt$ . Specifically, $v_t(X_t, t; \\theta)$ is optimized to approximate the constant velocity $X_1 - X_0$ , leading to the loss function as Eq. (1).", + "bbox": [ + 511, + 734, + 906, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {R F}} (\\theta) = \\mathbb {E} _ {X _ {1} \\sim p _ {\\text {d a t a}}, X _ {0} \\sim p _ {\\text {n o i s e}}, t \\sim U [ 0, 1 ]} \\left[ \\| (X _ {1} - X _ {0}) - v _ {t} (X _ {t}, t; \\theta) \\| ^ {2} \\right] \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 107, + 483, + 138 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this paper, we propose a concept of branch to differentiate the processing flows of input embeddings from different modalities in MMDiT-based models. As shown in Fig. 2 (a), instead of the single-branch architecture [37] where the text prompt is injected into the denoising branch via crossattention, MMDiT uses two independent transformers to construct the text branch and the denoising branch. Based on it, OminiControl [45] incorporates a Condition-LoRA module onto the weights of the denoising branch to process the input conditional embedding, thus forming its Conditional Branch, as depicted in Fig. 2 (b). It is worth noting that, OminiControl [45] can be regarded as a special case of our proposed UniCombine framework under the single-conditional setting. It provides the pre-trained Condition-LoRA modules to meet the need for our multi-conditional settings. In the single-conditional setting, the text branch embedding $T$ , the denoising branch embedding $X$ , and the conditional branch embedding $C$ are concatenated to form a unified sequence $[T;X;C]$ to be processed in the MMDiT Attention mechanism.", + "bbox": [ + 89, + 138, + 483, + 441 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. UniCombine", + "text_level": 1, + "bbox": [ + 89, + 450, + 225, + 464 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Building upon the MMDiT-based text-to-image generative model FLUX [23], we propose UniCombine, a multi-conditional controllable generative framework consisting of various conditional branches. Each conditional branch is in charge of processing one conditional embedding, thus forming a unified embedding sequence $S$ as presented in Eq. (2).", + "bbox": [ + 89, + 470, + 483, + 564 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nS = [ T; X; C _ {1}; \\dots ; C _ {N} ] \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 214, + 566, + 480, + 580 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given that the single-conditional setting of our UniCombine is equivalent to OmniControl [45], we only focus on the multi-conditional setting in this section. Firstly, we introduce a LoRA Switching module to manage multiple conditional branches effectively. Secondly, we introduce a novel Conditional MMDiT Attention mechanism to process the unified sequence $S$ in the multi-conditional setting. Thirdly, we present an insight analysis of our training-free strategy, which leverages the pre-trained Condition-LoRA module weights to perform a training-free multi-conditional controllable generation. Lastly, we present a feasible training-based strategy, which utilizes a trainable Denoising-LoRA module to enhance the performance further after training on a task-specific multi-conditional dataset.", + "bbox": [ + 89, + 584, + 483, + 808 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "LoRA Switching Module. Before denoising with multiple input conditions, the Condition-LoRA modules pre-trained under single-conditional settings should be loaded onto the weights of the denoising branch, like $[CondLoRA_1, CondLoRA_2, \\ldots]$ . Then the LoRA Switching module determines which one of them should be", + "bbox": [ + 89, + 810, + 483, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "activated according to the type of input conditions, forming a one-hot gating mechanism $[0,1,0,\\dots,0]$ , as shown in Fig. 2 (c). Subsequently, different conditional branches with different activated Condition-LoRA modules are used for processing different conditional embeddings, resulting in a minimal number of additional parameters introduced for different conditions. Unlike the single-conditional setting in Fig. 2 (b), which only needs loading LoRA modules, the LoRA Switching module in Fig. 2 (c) enables adaptive selection among multiple LoRA modules to provide the matching conditional branches for each conditional embeddings, granting our framework greater flexibility and adaptability to handle diverse conditional combinations.", + "bbox": [ + 511, + 90, + 903, + 286 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Conditional MMDiT Attention. After concatenating the output embeddings from these $N$ conditional branches, the unified sequence $S$ cannot be processed through the original MMDiT Attention mechanism due to two major challenges: (1) The computational complexity scales quadratically as $O(N^2)$ with respect to the number of conditions, which becomes especially problematic when handling multiple high-resolution conditions. (2) When performing MMDiT Attention on the unified sequence $S$ , different condition signals interfere with each other during the attention calculation, making it difficult to effectively utilize the pre-trained Condition-LoRA module weights for the denoising process.", + "bbox": [ + 511, + 287, + 903, + 469 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To address these challenges, we introduce a novel Conditional MMDiT Attention mechanism (CMMDiT Attention) as depicted in Fig. 2 (c) to replace the original MMDiT Attention. Instead of feeding the entire unified sequence $S$ into the MMDiT Attention at once, CMMDiT Attention follows distinct computational mechanisms according to which branch is serving as queries. The core idea is that the branch serving as a query aggregates the information from different scopes of the unified sequence $S$ depending on its type. Specifically, when the denoising branch $X$ and the text branch $T$ serve as queries, their scope of keys and values correspond to the entire unified sequence $S$ , granting them a global receptive field and the ability to aggregate information from all conditional branches. In contrast, when the conditional branches $C_i$ serve as queries, their receptive fields do not encompass one another. Their scope of keys and values are restricted to the subsequence $S_i$ as presented in Eq. (3), which prevents feature exchange and avoids information entanglement between different conditions.", + "bbox": [ + 511, + 469, + 903, + 755 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nS _ {i} = [ T; X; C _ {i} ] \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 661, + 760, + 903, + 775 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Furthermore, the CMMDiT Attention reduces computational complexity from $O(N^2)$ to $O(N)$ as the number of conditions increases, making it more scalable.", + "bbox": [ + 511, + 779, + 906, + 824 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Training-free Strategy. The following analyses provide a detailed explanation of why our UniCombine is capable of seamlessly integrating and effectively reusing the pretrained Condition-LoRA module weights to tackle multi-conditional challenges in a training-free manner.", + "bbox": [ + 511, + 825, + 903, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/8c57a3c424f01738f8a53f6d6f0c8688e8016cc7a73bf6ea21df63ffdafe3eaa.jpg", + "image_caption": [ + "Figure 3. Average $\\mathrm{X} \\rightarrow$ Subject cross-attention map of the insertion area." + ], + "image_footnote": [], + "bbox": [ + 94, + 87, + 482, + 167 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "On the one hand, when the conditional embeddings $C_i$ serve as queries in CMMDiT, they follow the same attention computational paradigm as in the MMDiT of single-conditional settings, as indicated in Eq. (4).", + "bbox": [ + 89, + 215, + 483, + 277 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\operatorname {C M M D i T} \\left(Q = C _ {i} ^ {q}, K = \\left[ T ^ {k}, X ^ {k}, C _ {i} ^ {k} \\right], V = \\left[ T ^ {v}, X ^ {v}, C _ {i} ^ {v} \\right]\\right) \\\\ = \\operatorname {M M D i T} (Q = C ^ {q}, K = [ T ^ {k}, X ^ {k}, C ^ {k} ], V = [ T ^ {v}, X ^ {v}, C ^ {v} ]) \\tag {4} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 107, + 280, + 480, + 314 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This consistent computational paradigm enables the conditional branches to share the same feature extraction capability between the multi-conditional setting and the single-conditional setting.", + "bbox": [ + 89, + 321, + 483, + 381 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "On the other hand, when the denoising embedding $X$ and the text prompt embedding $T$ serve as queries in CMMDiT, their attention computational paradigm diverges from the single-conditional settings. As illustrated in Eq. (5), when the denoising embedding $X$ is used as a query for attention computation with multiple conditional embeddings in CMMDiT, the attention score matrix is computed between $X$ and all the conditional embeddings.", + "bbox": [ + 89, + 381, + 483, + 502 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathrm {C M M D i T} (Q = X ^ {q}, K / V = [ X ^ {k / v}, T ^ {k / v}, C _ {1} ^ {k / v}, \\dots , C _ {N} ^ {k / v} ]) \\\\ = \\operatorname {s o f t m a x} \\left(\\frac {1}{\\sqrt {d i m}} X ^ {q} \\left[ X ^ {k}, T ^ {k}, C _ {1} ^ {k}, \\dots , C _ {N} ^ {k} \\right] ^ {\\top}\\right) \\left[ X ^ {v}, T ^ {v}, C _ {1} ^ {v}, \\dots , C _ {N} ^ {v} \\right] \\tag {5} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 91, + 503, + 488, + 563 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "It allows $X$ to extract and integrate information from each of the conditional embeddings separately and fusion them. This divide-and-conquer computational paradigm enables the text branch and denoising branch to fuse the conditional features effectively.", + "bbox": [ + 89, + 568, + 483, + 643 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "By leveraging the computational paradigms mentioned above, our UniCombine is able to perform a training-free multi-conditional controllable generation with the pretrained Condition-LoRA modules.", + "bbox": [ + 89, + 643, + 483, + 703 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training-based Strategy. However, due to the lack of training, solely relying on the softmax operation in Eq. (5) to balance the attention score distribution across multiple conditional embeddings may result in an undesirable feature fusion result, making our training-free version unsatisfactory in some cases. To address this issue, we introduce a trainable Denoising-LoRA module within the denoising branch to rectify the distribution of attention scores in Eq. (5). During training, we keep all the Condition-LoRA modules frozen to preserve the conditional extracting capability and train the Denoising-LoRA module solely on the task-specific multi-conditional dataset, as shown in Fig. 2 (c). After training, the denoising embedding $X$ learns to", + "bbox": [ + 89, + 704, + 483, + 901 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/d04a32c8e874aab106f9fa111d4a7b255e65e731a4acd2452f053c1c30949d06.jpg", + "image_caption": [ + "Figure 4. SubjectSpatial200K dataset construction pipeline." + ], + "image_footnote": [], + "bbox": [ + 519, + 88, + 901, + 287 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "better aggregate the appropriate information during the CM-MDiT Attention operation. As presented in Fig. 3, the average $\\mathrm{X} \\rightarrow$ Subject attention map within the inpainting area is more concentrated on the subject area in the training-based version.", + "bbox": [ + 511, + 323, + 906, + 398 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. SubjectSpatial200K dataset", + "text_level": 1, + "bbox": [ + 511, + 409, + 761, + 425 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our SubjectSpatial200K dataset aims to address the lack of a publicly available dataset for multi-conditional generative tasks. Existing datasets fail to include both the subject-driven and spatially-aligned annotations. Recently, the Subjects200K [45] dataset provides a publicly accessible dataset for subject-driven generation. Based on it, we introduce the SubjectSpatial200K dataset, which is a unified high-quality dataset designed for training and testing multi-conditional controllable generative models. This dataset includes comprehensive annotations as elaborated below. Besides, the construction pipeline is detailed in Fig. 4.", + "bbox": [ + 511, + 431, + 906, + 598 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Subject Grounding Annotation. The subject grounding annotation is significantly necessary for many generative tasks like instance-level inpainting [19, 61], instance-level controllable generation [26, 49], and object insertion [4, 43]. By leveraging the open-vocabulary object detection model Mamba-YOLO-World [46] on Subjects200K, we detect bounding boxes for all subjects according to their category descriptions and subsequently derive the corresponding mask regions.", + "bbox": [ + 511, + 598, + 905, + 733 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Spatial Map Annotation. The spatial map annotation further extends the applicable scope of our dataset to spatially-aligned synthesis tasks. Specifically, we employ the Depth-Anything [57] model and the OpenCV [1] library on Subjects200K to derive the Depth and Canny maps.", + "bbox": [ + 511, + 734, + 905, + 810 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiment", + "text_level": 1, + "bbox": [ + 511, + 823, + 637, + 840 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Setup", + "text_level": 1, + "bbox": [ + 511, + 848, + 594, + 864 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation. We use the FLUX.1-schnell [23] as our base model and the weights provided by OminiControl [45]", + "bbox": [ + 511, + 869, + 905, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/a34bbcb900321fe501a520855202f376e6664d9faf88390dd30b55aa8d6f1b53.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TaskMethodGenerative QualityControllabilitySubject ConsistencyText Consistency
FID ↓SSIM ↑F1 ↑MSE ↓CLIP-I ↑DINO ↑CLIP-T ↑
Multi-SpatialUniControl44.170.320.071346.02--30.28
UniControlNet20.960.280.091231.06--32.74
UniCombine (training-free)10.350.540.18519.53--33.70
UniCombine (training-based)6.820.640.24165.90--33.45
Subject-InsertionObjectStitch26.860.37--93.0582.3432.25
AnyDoor26.070.37--94.8886.0432.55
UniCombine (training-free)6.370.76--95.6089.0133.11
UniCombine (training-based)4.550.81--97.1492.9633.08
Subject-DepthControlNet w. IP-Adapter29.930.34-1295.8080.4162.2632.94
Ctrl-X52.370.36-2644.9078.0850.8330.20
UniCombine (training-free)10.030.48-507.4091.1585.7333.41
UniCombine (training-based)6.660.55-196.6594.4790.3133.30
Subject-CannyControlNet w. IP-Adapter30.380.380.09-79.8060.1932.85
Ctrl-X47.890.360.05-79.3554.3130.34
UniCombine (training-free)10.220.490.17-91.8486.8833.21
UniCombine (training-based)6.010.610.24-95.2692.5933.30
", + "bbox": [ + 120, + 88, + 874, + 338 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1. Quantitative comparison of our method with existing approaches on Multi-Spatial, Subject-Insertion, Subject-Depth, and Subject-Canny conditional generative tasks. The bold and underlined figures represent the optimal and sub-optimal results, respectively.", + "bbox": [ + 89, + 344, + 903, + 375 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/1e06b7a6c1ae6b58e94df98cc9be3d1764e9ac9f480e954299aa319606949d55.jpg", + "image_caption": [ + "Figure 5. Qualitative comparison on Multi-Spatial generation." + ], + "image_footnote": [], + "bbox": [ + 93, + 387, + 480, + 676 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "as our pre-trained Condition-LoRA module weights. During the training of our Denoising-LoRA module, we use a rank of 4, consistent with the Condition-LoRA. We choose the Adam optimizer with a learning rate of $1e^{-4}$ and set the weight decay to 0.01. Our models are trained for 30,000 steps on 16 NVIDIA V100 GPUs at a resolution of $512 \\times 512$ .", + "bbox": [ + 89, + 715, + 482, + 821 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "**Benchmarks.** We evaluate the performance of our method in both training-free and training-based versions. The training and testing datasets are partitioned from the SubjectSpatial200K dataset based on image quality assessment scores evaluated by ChatGPT-4o, with details provided in Sec. A1.", + "bbox": [ + 89, + 824, + 483, + 902 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d60c55bfaeeeedfd5aefc1bba87bd491b67f9a1c54a3f0b04bece1daafa5fa08.jpg", + "image_caption": [ + "Figure 6. Qualitative comparison on Subject-Insertion generation." + ], + "image_footnote": [], + "bbox": [ + 514, + 387, + 903, + 676 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Importantly, the dataset partitioning scheme remains consistent in all experiments.", + "bbox": [ + 511, + 717, + 903, + 747 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Metrics. To evaluate the subject consistency, we calculate the CLIP-I [36] score and DINO [2] score between the generated images and the ground truth images. To assess the generative quality, we compute the FID [12] and SSIM [50] between the generated image set and the ground truth image set. To measure the controllability, we compute the F1 Score for edge conditions and the MSE score for depth conditions between the extracted maps from generated images and the original conditions. Additionally, we adopt the CLIP-T [36] score to estimate the text consistency between", + "bbox": [ + 511, + 750, + 906, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 925, + 503, + 935 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/506fe3547ab1943e7faa90d0f349eb3f11dc59ed4a47d7cf36007f74a6ca38f0.jpg", + "image_caption": [ + "Figure 7. Qualitative comparison on Subject-Depth generation." + ], + "image_footnote": [], + "bbox": [ + 94, + 88, + 480, + 377 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "the generated images and the text prompts.", + "bbox": [ + 89, + 412, + 374, + 428 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. Main Result", + "text_level": 1, + "bbox": [ + 89, + 435, + 220, + 450 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We conduct extensive and comprehensive comparative experiments on the Multi-Spatial, Subject-Insertion, and Subject-Spatial conditional generative tasks.", + "bbox": [ + 89, + 457, + 482, + 502 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2.1. Multi-Spatial Conditional Generation", + "text_level": 1, + "bbox": [ + 89, + 508, + 398, + 523 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The Multi-Spatial conditional generation aims to generate images adhering to the collective layout constraints of diverse spatial conditions. This requires the model to achieve a more comprehensive layout control based on input conditions in a complementary manner. The comparative results in Tab. 1 and Fig. 5 demonstrate that our method outperforms existing multi-spatial conditional generation approaches in generative quality and controllability.", + "bbox": [ + 89, + 527, + 482, + 648 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2.2. Subject-Insertion Conditional Generation", + "text_level": 1, + "bbox": [ + 89, + 655, + 426, + 670 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The Subject-Insertion conditional generation requires the model to generate images where the reference subject is inserted into the masked region of the target background. As illustrated in Tab. 1 and Fig. 6, our UniCombine demonstrates superior performance compared to previous methods with three advantages: Firstly, our method ensures that the reference subject is inserted into the background with high consistency and harmonious integration. Secondly, our method excels in open-world object insertion without requiring test-time tuning, unlike conventional customization methods [22, 40]. Finally, our method demonstrates strong semantic comprehension capabilities, enabling it to extract the desired object from a complex subject image with a non-white background, rather than simply pasting the entire subject image into the masked region.", + "bbox": [ + 89, + 674, + 482, + 900 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/44ebde2dd6cfbcb637ef99847a469c9c5e74ddeb4368792fe3d6809b1e91856e.jpg", + "image_caption": [ + "Figure 8. Qualitative comparison on Subject-Canny generation." + ], + "image_footnote": [], + "bbox": [ + 516, + 87, + 903, + 377 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2.3. Subject-Spatial Conditional Generation", + "text_level": 1, + "bbox": [ + 511, + 414, + 834, + 429 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The Subject-Spatial conditional generation focuses on generating images of the reference subject while ensuring the layout aligns with specified spatial conditions. We compare our method with Ctrl-X [27] and a simple baseline model. Ctrl-X is a recently proposed model based on SDXL [34] that simultaneously controls structure and appearance. The baseline model is constructed by integrating the FLUX ControlNet [53, 54] and FLUX IP-Adapter [55] into the FLUX.1-dev [23] base model. Specifically, we divided the Subject-Spatial generative task into different experimental groups based on the type of spatial conditions, referred to as Subject-Depth and Subject-Canny, respectively. As presented in Fig. 7, Fig. 8, and Tab. 1, the experimental results demonstrate the superior performance of our UniCombine: Firstly, our method exhibits stronger semantic comprehension capability, generating the reference subject in the accurate localization of the spatial conditions without confusing appearance features. Secondly, our method demonstrates greater adaptability, generating the reference subject with reasonable morphological transformations to align with the guidance of spatial conditions and text prompts. Lastly, our method achieves superior subject consistency while maintaining excellent spatial coherence.", + "bbox": [ + 511, + 434, + 906, + 781 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2.4. Textual Guidance", + "text_level": 1, + "bbox": [ + 511, + 790, + 684, + 804 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As shown in Fig. 1 and Tab. 1, our method not only allows for controllable generation by combining multiple conditions but also enables precise textual guidance simultaneously. By utilizing a unified input sequence $S = [T; X; C_1; \\ldots; C_N]$ during the denoising process, our UniCombine effectively aligns the descriptive words in $T$ with", + "bbox": [ + 511, + 809, + 906, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/8b5b9183e06014b6e81f79d2b0e6e78e929ba4f810448e1f8a953e9f0afcf401.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodCLIP-I ↑DINO ↑CLIP-T ↑AttnOps ↓
Ours w/o CMMDiT95.4788.4233.10732.17M
Ours w/ CMMDiT95.6089.0133.11612.63M
", + "bbox": [ + 91, + 88, + 493, + 141 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/2a92bbe800381328c2717f661d17a8523810856178e0fb0febb65310f9baa364.jpg", + "table_caption": [ + "Table 2. Quantitative ablation of CMMDiT Attention mechanism on training-free Subject-Insertion task. AttnOps is short for the number of attention operations." + ], + "table_footnote": [], + "table_body": "
Background\nSubjectTraining-free\nw/o CMMDiTTraining-free\nw/ CMMDiTBackground\nSubjectTraining-free\nw/o CMMDiTTraining-free\nw/ CMMDiT
inconsistentsuccessinconsistentsuccess
failsuccessfailsuccess
", + "bbox": [ + 91, + 198, + 480, + 335 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/b5701621428ee2941e72b32e1a85040f321539ed97d657923eef29882e89bdfd.jpg", + "table_caption": [ + "Figure 9. Qualitative ablation of CMMDiT Attention mechanism on training-free Subject-Insertion task." + ], + "table_footnote": [], + "table_body": "
MethodCLIP-I ↑DINO ↑CLIP-T ↑
Ours w/ Text-LoRA96.9792.3233.10
Ours w/ Denoising-LoRA97.1492.9633.08
", + "bbox": [ + 106, + 378, + 465, + 431 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/fc8bc88e1ec09551aeba8132d1ccff2d2a1eb574ea62259a59e0357472255443.jpg", + "image_caption": [ + "Figure 10. Qualitative ablation of trainable LoRA on training-based Subject-Insertion task." + ], + "image_footnote": [], + "bbox": [ + 91, + 473, + 480, + 612 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "the relevant features in $C_i$ and the corresponding patches in $X$ , thereby achieving a remarkable text-guided multi-conditional controllable generation.", + "bbox": [ + 89, + 657, + 482, + 704 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3. Ablation Study", + "text_level": 1, + "bbox": [ + 89, + 712, + 243, + 729 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We exhibit the ablation study results conducted on the Subject-Insertion task in this section, while more results on the other tasks are provided in Sec. A2.", + "bbox": [ + 89, + 734, + 482, + 779 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Effect of Conditional MMDiT Attention. To evaluate the effectiveness of our proposed Conditional MMDiT Attention mechanism, we replace the CMMDiT Attention with the original MMDiT Attention and test its training-free performance to avoid the influence of training data. As shown in Tab. 2 and Fig. 9, our framework attains superior performance with fewer attention operations when employing the CMMDiT Attention mechanism.", + "bbox": [ + 89, + 779, + 482, + 898 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/3618030c54b9a80305073bef94f2394a26e7580cbe554afe3dc981094168e421.jpg", + "table_caption": [ + "Table 3. Quantitative ablation of trainable LoRA on training-based Subject-Insertion task." + ], + "table_footnote": [], + "table_body": "
MethodCLIP-I ↑DINO ↑CLIP-T ↑
Ours w/ DSB only96.8592.3833.07
Ours w/ DSB and SSB97.1492.9633.08
", + "bbox": [ + 537, + 88, + 879, + 141 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/e87e6f8b3e137a3f2fcae475f15efcbe2b56a7113615b229b8cb075304866a5d.jpg", + "table_caption": [ + "Table 4. Quantitative ablation of training strategy on training-based Subject-Insertion task. DSB: Dual-Stream Blocks. SSB: Single-Stream Blocks." + ], + "table_footnote": [], + "table_body": "
Background\nSubjectTraining-based\nw/ DSB onlyTraining-based\nw/ DSB + SSBBackground\nSubjectTraining-based\nw/ DSB onlyTraining-based\nw/ DSB + SSB
inconsistentsuccessinconsistentsuccessinconsistentsuccess
inconsistentsuccessfailsuccessinconsistentinconsistent
", + "bbox": [ + 514, + 199, + 903, + 335 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/a4f5c4fc9d8ce66afbdd123ab7fca8f063e5af00acb1254be7e07f83f23cd544.jpg", + "table_caption": [ + "Figure 11. Qualitative ablation of training strategy on training-based Subject-Insertion task. DSB: Dual-Stream Blocks. SSB: Single-Stream Blocks." + ], + "table_footnote": [], + "table_body": "
ModelGPU Memory ↓Add Params ↓
FLUX (bf16, base model)32933M-
CN, 1 cond35235M744M
IP, 1 cond35325M918M
CN + IP, 2 cond36753M1662M
Ours (training-free), 2 cond33323M29M
Ours (training-based), 2 cond33349M44M
", + "bbox": [ + 521, + 396, + 895, + 502 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 5. Comparison of inference GPU memory cost and additionally introduced parameters. CN: ControlNet. IP: IP-Adapter.", + "bbox": [ + 511, + 503, + 903, + 532 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Different Options for Trainable LoRA. To evaluate whether the trainable LoRA module can be applied to the text branch instead of the denoising branch, we load a Text-LoRA in the text branch, with a configuration identical to that of the Denoising-LoRA. The Tab. 3 and Fig. 10 indicate that applying the trainable LoRA module to the denoising branch better modulates the feature aggregation operation across multiple conditional branches.", + "bbox": [ + 511, + 550, + 906, + 671 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Training Strategy. As the parameter scale of the base model increases, the FLUX adaptations of ControlNet [53, 54] and IP-adapter [55] provided by the HuggingFace [16] community inject conditional features only into the dual-stream MMDiT blocks, rather than the entire network, to save memory. In contrast, since our Denoising-LoRA module introduces only a small number of parameters, we incorporate it into both the dual-stream and single-stream blocks to achieve better performance. The results in Tab. 4 and Fig. 11 confirm the validity of our choice.", + "bbox": [ + 511, + 672, + 906, + 823 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Computational Cost. The overheads of our approach in terms of inference GPU memory cost and additionally introduced parameters are minimal. The comparison results against the FLUX ControlNet [53, 54] and FLUX IP-Adapter [55] are shown in Tab. 5.", + "bbox": [ + 511, + 824, + 903, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "More Conditional Branches. Our model places no restrictions on the number of supported conditions. The results shown in Fig. 12 demonstrate our model's strong scalability. As the number of conditional branches increases, the level of control becomes finer.", + "bbox": [ + 89, + 90, + 480, + 165 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/a621a99e80843ff5996990493bd8940b3ce3197ae924350ffd10503c3b7c6b1c.jpg", + "image_caption": [ + "Figure 12. From left to right are training-free multi-conditional combination tasks under: $1/2/3/4$ conditions." + ], + "image_footnote": [], + "bbox": [ + 94, + 181, + 478, + 344 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "More Application Scenarios. Our UniCombine can be easily extended to new scenarios, such as reference-based image stylization. After training a new Condition-LoRA on StyleBooth [10] dataset, our UniCombine is able to integrate the style of the reference image with other conditions successfully, as demonstrated in Fig. 13.", + "bbox": [ + 89, + 396, + 483, + 488 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/697b9a0d3b2d71d9ac73a42626a46546416d486f912cfdb2949db0c5a79882ad.jpg", + "image_caption": [ + "Figure 13. Training-free Spatial-Style combination task." + ], + "image_footnote": [], + "bbox": [ + 94, + 503, + 478, + 635 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 89, + 691, + 209, + 707 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We present UniCombine, a DiT-based multi-conditional controllable generative framework capable of handling any combination of conditions, including but not limited to text prompts, spatial maps, and subject images. Extensive experiments on Subject-Insertion, Subject-Spatial, and Multi-Spatial conditional generative tasks demonstrate the state-of-the-art performance of our UniCombine in both training-free and training-based versions. Additionally, we propose the SubjectSpatial200K dataset to address the lack of a publicly available dataset for training and testing multi-conditional generative models. We believe our work can advance the development of the controllable generation field.", + "bbox": [ + 89, + 719, + 483, + 900 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 514, + 90, + 609, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] G. Bradski. The OpenCV Library. Dr. Dobb's Journal of Software Tools, 2000. 5", + "[2] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9650-9660, 2021. 6", + "[3] Jiaxuan Chen, Bo Zhang, Qingdong He, Jinlong Peng, and Li Niu. Mureobjectstitch: Multi-reference image composition. arXiv preprint arXiv:2411.07462, 2024. 2", + "[4] Xi Chen, Lianghua Huang, Yu Liu, Yujun Shen, Deli Zhao, and Hengshuang Zhao. Anydoor: Zero-shot object-level image customization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6593-6602, 2024. 2, 5", + "[5] Zheng Chong, Xiao Dong, Haoxiang Li, Shiyue Zhang, Wenqing Zhang, Xujie Zhang, Hanqing Zhao, Dongmei Jiang, and Xiaodan Liang. Catvton: Concatenation is all you need for virtual try-on with diffusion models. arXiv preprint arXiv:2407.15886, 2024. 2", + "[6] Guillaume Couairon, Marlene Careil, Matthieu Cord, Stephane Lathuiliere, and Jakob Verbeek. Zero-shot spatial layout conditioning for text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2174-2183, 2023. 3", + "[7] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis, 2024. URL https://arxiv.org/abs/2403.03206, 2.2, 3", + "[8] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022. 2", + "[9] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks. Communications of the ACM, 63(11):139-144, 2020. 2", + "[10] Zhen Han, Chaojie Mao, Zeyinzi Jiang, Yulin Pan, and Jingfeng Zhang. Stylebooth: Image style editing with multimodal instruction. arXiv preprint arXiv:2404.12154, 2024.9", + "[11] Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022. 2", + "[12] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 6", + "[13] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 2" + ], + "bbox": [ + 514, + 114, + 906, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Minghui Hu, Jianbin Zheng, Daqing Liu, Chuanxia Zheng, Chaoyue Wang, Dacheng Tao, and Tat-Jen Cham. Cocktail: Mixing multi-modality control for text-conditional image generation. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 3", + "[15] Teng Hu, Ran Yi, Haokun Zhu, Liang Liu, Jinlong Peng, Yabiao Wang, Chengjie Wang, and Lizhuang Ma. Stroke-based neural painting and stylization with dynamically predicted painting region. In Proceedings of the 31st ACM International Conference on Multimedia, pages 7470-7480, 2023. 2", + "[16] HuggingFace. Diffusers: State-of-the-art diffusion models. https://github.com/huggingface/diffusers, 2023.8", + "[17] Boyuan Jiang, Xiaobin Hu, Donghao Luo, Qingdong He, Chengming Xu, Jinlong Peng, Jiangning Zhang, Chengjie Wang, Yunsheng Wu, and Yanwei Fu. Fitdit: Advancing the authentic garment details for high-fidelity virtual try-on. arXiv preprint arXiv:2411.10499, 2024. 2", + "[18] Ying Jin, Jinlong Peng, Qingdong He, Teng Hu, Hao Chen, Jiafu Wu, Wenbing Zhu, Mingmin Chi, Jun Liu, Yabiao Wang, et al. Dualanodiff: Dual-interrelated diffusion model for few-shot anomaly image generation. Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2025. 2", + "[19] Xuan Ju, Xian Liu, Xintao Wang, Yuxuan Bian, Ying Shan, and Qiang Xu. Brushnet: A plug-and-play image inpainting model with decomposed dual-branch diffusion. arXiv preprint arXiv:2403.06976, 2024. 2, 5", + "[20] Chanran Kim, Jeongin Lee, Shichang Joung, Bongmo Kim, and Yeul-Min Baek. Instantfamily: Masked attention for zero-shot multi-id image generation. arXiv preprint arXiv:2404.19427, 2024. 2", + "[21] Lingjie Kong, Kai Wu, Xiaobin Hu, Wenhui Han, Jinlong Peng, Chengming Xu, Donghao Luo, Jiangning Zhang, Chengjie Wang, and Yanwei Fu. Anymaker: Zero-shot general object customization via decoupled dual-level id injection. Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2025. 2", + "[22] Nupur Kumari, Bingliang Zhang, Richard Zhang, Eli Shechtman, and Jun-Yan Zhu. Multi-concept customization of text-to-image diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1931-1941, 2023. 2, 7", + "[23] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2023. 2, 3, 4, 5, 7", + "[24] Dongxu Li, Junnan Li, and Steven Hoi. Blip-diffusion: Pretrained subject representation for controllable text-to-image generation and editing. Advances in Neural Information Processing Systems, 36:30146-30166, 2023. 3", + "[25] Pengzhi Li, Qiang Nie, Ying Chen, Xi Jiang, Kai Wu, Yuhuan Lin, Yong Liu, Jinlong Peng, Chengjie Wang, and Feng Zheng. Tuning-free image customization with image and text guidance. In European Conference on Computer Vision, pages 233-250. Springer, 2024. 2", + "[26] Yuheng Li, Haotian Liu, Qingyang Wu, Fangzhou Mu, Jianwei Yang, Jianfeng Gao, Chunyuan Li, and Yong Jae Lee." + ], + "bbox": [ + 91, + 90, + 480, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Gligen: Open-set grounded text-to-image generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22511-22521, 2023. 3, 5", + "[27] Kuan Heng Lin, Sicheng Mo, Ben Klingher, Fangzhou Mu, and Bolei Zhou. Ctrl-x: Controlling structure and appearance for text-to-image generation without guidance. Advances in Neural Information Processing Systems, 37: 128911-128939, 2025. 2, 3, 7", + "[28] Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le. Flow matching for generative modeling. arXiv preprint arXiv:2210.02747, 2022. 2, 3", + "[29] Xingchao Liu, Chengyue Gong, and Qiang Liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022. 2, 3", + "[30] Ron Mokady, Amir Hertz, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Null-text inversion for editing real images using guided diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6038–6047, 2023. 2", + "[31] Chong Mou, Xintao Wang, Liangbin Xie, Yanze Wu, Jian Zhang, Zhongang Qi, and Ying Shan. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 4296-4304, 2024. 2, 3", + "[32] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 2", + "[33] Jinlong Peng, Zekun Luo, Liang Liu, and Boshen Zhang. Frih: fine-grained region-aware image harmonization. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 4478-4486, 2024. 2", + "[34] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023. 7", + "[35] Can Qin, Shu Zhang, Ning Yu, Yihao Feng, Xinyi Yang, Yingbo Zhou, Huan Wang, Juan Carlos Niebles, Caiming Xiong, Silvio Savarese, et al. Unicontrol: A unified diffusion model for controllable visual generation in the wild. arXiv preprint arXiv:2305.11147, 2023. 2, 3", + "[36] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 6", + "[37] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2, 4", + "[38] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international conference," + ], + "bbox": [ + 516, + 92, + 906, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 925, + 508, + 936 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Munich, Germany, October 5-9, 2015, proceedings, part III 18, pages 234-241. Springer, 2015. 2", + "[39] Litu Rout, Yujia Chen, Nataniel Ruiz, Constantine Caramanis, Sanjay Shakkottai, and Wen-Sheng Chu. Semantic image inversion and editing using rectified stochastic differential equations. arXiv preprint arXiv:2410.10792, 2024. 2", + "[40] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22500-22510, 2023. 2, 7", + "[41] Kihyuk Sohn, Nataniel Ruiz, Kimin Lee, Daniel Castro Chin, Irina Blok, Huiwen Chang, Jarred Barber, Lu Jiang, Glenn Entis, Yuanzhen Li, et al. Styledrop: Text-to-image generation in any style. arXiv preprint arXiv:2306.00983, 2023. 3", + "[42] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502, 2020. 2", + "[43] Yizhi Song, Zhifei Zhang, Zhe Lin, Scott Cohen, Brian Price, Jianming Zhang, Soo Ye Kim, and Daniel Aliaga. Objectstitch: Generative object compositing. arXiv preprint arXiv:2212.00932, 2022. 2, 5", + "[44] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. 2", + "[45] Zhenxiong Tan, Songhua Liu, Xingyi Yang, Qiaochu Xue, and Xinchao Wang. *Omnicontrol: Minimal and universal control for diffusion transformer.* arXiv preprint arXiv:2411.15098, 3, 2024. 2, 3, 4, 5, 12", + "[46] Haoxuan Wang, Qingdong He, Jinlong Peng, Hao Yang, Mingmin Chi, and Yabiao Wang. Mamba-yolo-world: Marrying yolo-world with mamba for open-vocabulary detection. IEEE International Conference on Acoustics, Speech, and Signal Processing, 2025. 5", + "[47] Qixun Wang, Xu Bai, Haofan Wang, Zekui Qin, Anthony Chen, Huaxia Li, Xu Tang, and Yao Hu. Instantid: Zero-shot identity-preserving generation in seconds. arXiv preprint arXiv:2401.07519, 2024. 2, 3", + "[48] Su Wang, Chitwan Sahara, Ceslee Montgomery, Jordi Pont-Tuset, Shai Noy, Stefano Pellegrini, Yasumasa Onoe, Sarah Laszlo, David J Fleet, Radu Soricut, et al. Imagen editor and editbench: Advancing and evaluating text-guided image inpainting. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 18359-18369, 2023. 2", + "[49] Xudong Wang, Trevor Darrell, Sai Saketh Rambhatla, Rohit Girdhar, and Ishan Misra. Instancediffusion: Instance-level control for image generation, 2024. 5", + "[50] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 6", + "[51] Daniel Winter, Asaf Shul, Matan Cohen, Dana Berman, Yael Pritch, Alex Rav-Acha, and Yedid Hoshen. Objectmate: A" + ], + "bbox": [ + 91, + 90, + 483, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "recurrence prior for object insertion and subject-driven generation. arXiv preprint arXiv:2412.08645, 2024. 2", + "[52] Peng Xing, Haofan Wang, Yanpeng Sun, Qixun Wang, Xu Bai, Hao Ai, Renyuan Huang, and Zechao Li. Csgo: Content-style composition in text-to-image generation. arXiv preprint arXiv:2408.16766, 2024. 2", + "[53] XLabs-AI. Flux-controlnet-canny-diffusers. https://huggingface.co/XLabs-AI/flux-controlnet-canny-diffusers,2024.7,8", + "[54] XLabs-AI. Flux-controlnet-depth-diffusers. https://huggingface.co/XLabs-AI/flux-controlnet-depth-diffusers,2024.7,8", + "[55] XLabs-AI. Flux-ip-adapter. https://huggingface.co/XLabs-AI/flux-ip-adapter, 2024.7,8", + "[56] Binxin Yang, Shuyang Gu, Bo Zhang, Ting Zhang, Xuejin Chen, Xiaoyan Sun, Dong Chen, and Fang Wen. Paint by example: Exemplar-based image editing with diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18381-18391, 2023. 2", + "[57] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In CVPR, 2024. 5", + "[58] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. arXiv preprint arXiv:2308.06721, 2023. 2, 3", + "[59] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836-3847, 2023. 2, 3", + "[60] Shihao Zhao, Dongdong Chen, Yen-Chun Chen, Jianmin Bao, Shaozhe Hao, Lu Yuan, and Kwan-Yee K Wong. Uni-controlnet: All-in-one control to text-to-image diffusion models. Advances in Neural Information Processing Systems, 36, 2024. 2, 3", + "[61] Junhao Zhuang, Yanhong Zeng, Wenran Liu, Chun Yuan, and Kai Chen. A task is worth one word: Learning with task prompts for high-quality versatile image inpainting. In European Conference on Computer Vision, pages 195-211. Springer, 2025. 2, 5" + ], + "bbox": [ + 516, + 92, + 903, + 672 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "UniCombine: Unified Multi-Conditional Combination with Diffusion Transformer", + "text_level": 1, + "bbox": [ + 225, + 85, + 772, + 128 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Supplementary Material", + "bbox": [ + 380, + 141, + 614, + 162 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A1. Dataset Partitioning Scheme", + "text_level": 1, + "bbox": [ + 89, + 178, + 367, + 195 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In our proposed SubjectSpatial200K dataset, we utilize the ChatGPT-4o assessment scores provided by Subjects200K [45] on Subject Consistency, Composition Structure, and Image Quality to guide the dataset partitioning in our experiments.", + "bbox": [ + 89, + 203, + 483, + 277 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Subject Consistency: Ensuring the identity of the subject image is consistent with that of the ground truth image.", + "- Composition Structure: Verifying a reasonable composition of the subject and ground truth images.", + "- Image Quality: Confirming each image pair maintains high resolution and visual fidelity." + ], + "bbox": [ + 89, + 279, + 482, + 369 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We partition the dataset into 139,403 training samples and 5,827 testing samples through Algorithm 1.", + "bbox": [ + 89, + 369, + 482, + 400 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/081e7579449455ce9a0a6c20f089187297d01d022d3027a0bfa4afb2c5a31aa9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Algorithm 1: Dataset Partitioning Scheme
Input: example
Output: train or test
cs← example["Composite Structure"]
iq← example["Image Quality"]
sc← example["Subject Consistency"]
scores← [cs, iq, sc]
if all(s==5 for s in scores) then
return train;
else if cs≥3 and iq==5 and sc==5 then
return test;
", + "bbox": [ + 93, + 404, + 480, + 588 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A2. More Ablation on CMMDiT Attention", + "text_level": 1, + "bbox": [ + 89, + 609, + 450, + 625 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "More quantitative and qualitative ablation results on the other multi-conditional generative tasks are provided here. The comprehensive ablation results in Tab. A1, Tab. A2, Tab. A3, Fig. A1, Fig. A2, and Fig. A3 demonstrate that the UniCombine performs better with our proposed CMMDiT Attention.", + "bbox": [ + 89, + 633, + 483, + 724 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/7780b4ae3fc30805750b62e7bb1b1c2e1322a555efcac56b2f8f7632150cf53d.jpg", + "table_caption": [], + "table_footnote": [ + "Table A1. Quantitative ablation of CMMDiT Attention mechanism on training-free Subject-Canny task" + ], + "table_body": "
MethodCLIP-I ↑DINO ↑CLIP-T ↑F1 ↑
Ours w/o CMMDiT91.5186.3133.200.16
Ours w/ CMMDiT91.8486.8833.210.17
", + "bbox": [ + 99, + 734, + 470, + 792 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A3. More Qualitative Results", + "text_level": 1, + "bbox": [ + 89, + 844, + 339, + 861 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "More qualitative results are presented in Fig. A4 and Fig. A5.", + "bbox": [ + 89, + 869, + 482, + 901 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/c8c9030826d874847297d6aad99732ff7992fa1efed8eaf2d42146029239dcfe.jpg", + "image_caption": [ + "Figure A1. Qualitative ablation of CMMDiT Attention mechanism on training-free Subject-Canny task" + ], + "image_footnote": [], + "bbox": [ + 516, + 181, + 903, + 321 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/719ea07bb1cd1e89b41905beec203585fbe544e5a1ee5e9ab3f601d287ad2795.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodCLIP-I ↑DINO ↑CLIP-T ↑MSE ↓
Ours w/o CMMDiT90.8385.3833.38547.63
Ours w/ CMMDiT91.1585.7333.41507.40
", + "bbox": [ + 516, + 362, + 900, + 421 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/eec53f0d14c8df15f39c097595e64b40514c9c785b2e319a4f3ff0334abfd869.jpg", + "image_caption": [ + "Table A2. Quantitative ablation of CMMDiT Attention mechanism on training-free Subject-Depth task", + "Figure A2. Qualitative ablation of CMMDiT Attention mechanism on training-free Subject-Depth task" + ], + "image_footnote": [], + "bbox": [ + 516, + 458, + 903, + 595 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/825bdf58c053f99cc780532dc82a60487da430bb38579c85ad77eb49436315d5.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodCLIP-T ↑F1 ↑MSE ↓
Ours w/o CMMDiT33.700.17524.04
Ours w/ CMMDiT33.700.18519.53
", + "bbox": [ + 558, + 638, + 857, + 698 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Table A3. Quantitative ablation of CMMDiT Attention mechanism on training-free Multi-Spatial task", + "bbox": [ + 511, + 699, + 903, + 727 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/4009d7abc995632fbc8e9c0c98b070e0d250aff3285f29dd8f899b9c93e1e20e.jpg", + "image_caption": [ + "Figure A3. Qualitative ablation of CMMDiT Attention mechanism on training-free Multi-Spatial task" + ], + "image_footnote": [], + "bbox": [ + 516, + 736, + 903, + 875 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/adfac4a9eebfdf05d1126a0a351b1aaafa113e3c17e6ad1d0d3ccee8c001ec91.jpg", + "image_caption": [ + "Figure A4. More qualitative results on Multi-Spatial and Subject-Insertion tasks." + ], + "image_footnote": [], + "bbox": [ + 93, + 97, + 475, + 858 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/721139a8e4ed891aeab8a9eecb8d33d046e4e4574b78dabb5cd359d3ae06e6b6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 521, + 97, + 903, + 858 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/179cb411321ca8ecf6e101c92a53a0ef8cbc9728a7f08f18d3c37f263493e513.jpg", + "image_caption": [ + "Figure A5. More qualitative results on Subject-Depth and Subject-Canny tasks." + ], + "image_footnote": [], + "bbox": [ + 89, + 97, + 475, + 859 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/6cc95ee7d4dc5bf05bbb25255f5081767689dccf861e6597d6fd8d83a62792f9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 521, + 97, + 903, + 859 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 13 + } +] \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09277/39fed2ad-9645-4fde-a1c3-86b0c99b7b36_model.json b/data/2025/2503_09xxx/2503.09277/39fed2ad-9645-4fde-a1c3-86b0c99b7b36_model.json new file mode 100644 index 0000000000000000000000000000000000000000..2c5367ed245beba45df157301f2d59ea1e0942fe --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/39fed2ad-9645-4fde-a1c3-86b0c99b7b36_model.json @@ -0,0 +1,2736 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.288, + 0.058, + 0.71 + ], + "angle": 270, + "content": "arXiv:2503.09277v2 [cs.CV] 8 Jul 2025" + }, + { + "type": "title", + "bbox": [ + 0.226, + 0.131, + 0.773, + 0.174 + ], + "angle": 0, + "content": "UniCombine: Unified Multi-Conditional Combination with Diffusion Transformer" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.19, + 0.837, + 0.227 + ], + "angle": 0, + "content": "Haoxuan Wang\\(^{1\\dagger}\\), Jinlong Peng\\(^{2\\dagger}\\), Qingdong He\\(^{2}\\), Hao Yang\\(^{3}\\), Ying Jin\\(^{1}\\), Jiafu Wu\\(^{2}\\), Xiaobin Hu\\(^{2}\\), Yanjie Pan\\(^{1}\\), Zhenye Gan\\(^{2}\\), Mingmin Chi\\(^{1*}\\), Bo Peng\\(^{4*}\\), Yabiao Wang\\(^{2,5*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.228, + 0.866, + 0.245 + ], + "angle": 0, + "content": "\\(^{1}\\)Fudan University, \\(^{2}\\)Tencent Youtu Lab, \\(^{3}\\)Shanghai Jiao Tong University, \\(^{4}\\)Shanghai Ocean University \\(^{5}\\)Zhejiang University" + }, + { + "type": "text", + "bbox": [ + 0.258, + 0.263, + 0.733, + 0.28 + ], + "angle": 0, + "content": "https://github.com/Xuan-World/UniCombine" + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.293, + 0.9, + 0.685 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.693, + 0.908, + 0.737 + ], + "angle": 0, + "content": "Figure 1. Fantastic results of our proposed UniCombine on multi-conditional controllable generation: (a) Subject-Insertion task. (b) and (c) Subject-Spatial task. (d) Multi-Spatial task. Our unified framework effectively handles any combination of input conditions and achieves remarkable alignment with all of them, including but not limited to text prompts, spatial maps, and subject images." + }, + { + "type": "title", + "bbox": [ + 0.249, + 0.748, + 0.327, + 0.763 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.782, + 0.483, + 0.858 + ], + "angle": 0, + "content": "With the rapid development of diffusion models in image generation, the demand for more powerful and flexible controllable frameworks is increasing. Although existing methods can guide generation beyond text prompts, the challenge of effectively combining multiple conditional inputs" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.749, + 0.907, + 0.901 + ], + "angle": 0, + "content": "while maintaining consistency with all of them remains unsolved. To address this, we introduce UniCombine, a DiT-based multi-conditional controllable generative framework capable of handling any combination of conditions, including but not limited to text prompts, spatial maps, and subject images. Specifically, we introduce a novel Conditional MMDiT Attention mechanism and incorporate a trainable LoRA module to build both the training-free and training-based versions. Additionally, we propose a new pipeline to construct SubjectSpatial200K, the first dataset" + }, + { + "type": "page_footnote", + "bbox": [ + 0.115, + 0.876, + 0.228, + 0.888 + ], + "angle": 0, + "content": "\\(\\dagger\\) Equal contribution." + }, + { + "type": "page_footnote", + "bbox": [ + 0.116, + 0.889, + 0.246, + 0.9 + ], + "angle": 0, + "content": "* Corresponding author." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.876, + 0.246, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.089, + 0.092, + 0.482, + 0.182 + ], + "angle": 0, + "content": "designed for multi-conditional generative tasks covering both the subject-driven and spatially-aligned conditions. Extensive experimental results on multi-conditional generation demonstrate the outstanding universality and powerful capability of our approach with state-of-the-art performance." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.211, + 0.223, + 0.227 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.236, + 0.485, + 0.447 + ], + "angle": 0, + "content": "With the advancement of diffusion-based [13, 42] text-to-image generative technology, a series of single-conditional controllable generative frameworks like ControlNet [59], T2I-Adapter [31], IP-Adapter [58], and InstantID [47] have expanded the scope of the control signals from text prompts to image conditions. It allows users to control more plentiful aspects of the generated images, such as layout, style, characteristics, etc. These conventional approaches are specifically designed for the UNet [38] backbone of Latent Diffusion Models (LDM) [37] with dedicated control networks. Besides, some recent approaches, such as Omini-Control [45], integrate control signals into the Diffusion Transformer (DiT) [7, 23] architecture, which demonstrates superior performance compared to the UNet in LDM." + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.449, + 0.485, + 0.614 + ], + "angle": 0, + "content": "Although the methods mentioned above have achieved a promising single-conditional performance, the challenge of multi-conditional controllable generation is still unsolved. Previous multi-conditional generative methods like UniControl [35] and UniControlNet [60] are generally restricted to handling spatial conditions like Canny or Depth maps and fail to accommodate subject conditions, resulting in limited applicable scenarios. Despite the recently proposed Ctrl-X [27] features controlling structure and appearance together, its performance is unsatisfactory and supports only a limited combination of conditions." + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.615, + 0.485, + 0.765 + ], + "angle": 0, + "content": "Moreover, we assume that many existing generative tasks can be viewed as a multi-conditional generation, such as virtual try-on [5, 17], object insertion [3, 51], style transfer [15, 33, 52], spatially-aligned customization [20, 21, 25, 27], etc. Consequently, there is a need for a unified framework to encompass these generative tasks in a way of multi-conditional generation. This framework should ensure consistency with all input constraints, including subject ID preservation, spatial structural alignment, background coherence, and style uniformity." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.766, + 0.482, + 0.902 + ], + "angle": 0, + "content": "To achieve this, we propose UniCombine, a powerful and universal framework that offers several key advantages: Firstly, our framework is capable of simultaneously handling any combination of conditions, including but not limited to text prompts, spatial maps, and subject images. Specifically, we introduce a novel Conditional MMDiT Attention mechanism and incorporate a trainable Denoising-LoRA module to build both the training-free and training-based versions. By integrating multiple pre" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.41 + ], + "angle": 0, + "content": "trained Condition-LoRA module weights into the conditional branches, UniCombine achieves excellent training-free performance, which can be improved further after training on the task-specific multi-conditional dataset. Secondly, due to the lack of a publicly available dataset for multi-conditional generative tasks, we build the SubjectSpatial200K dataset to serve as the training dataset and the testing benchmark. Specifically, we generate the subject grounding annotations and spatial map annotations for all the data samples from Subjects200K [45] and therefore formulate our SubjectSpatial200K dataset. Thirdly, our UniCombine can achieve many unprecedented multi-conditional combinations, as shown in Fig. 1, such as combining a reference subject image with the inpainting area of a background image or with the layout guidance of a depth (or canny) map while imposing precise control via text prompt. Furthermore, extensive experiments on Subject-Insertion, Subject-Spatial, and Multi-Spatial conditional generation demonstrate the outstanding universality and powerful capability of our method against other existing specialized approaches." + }, + { + "type": "text", + "bbox": [ + 0.531, + 0.412, + 0.895, + 0.427 + ], + "angle": 0, + "content": "In summary, we highlight our contributions as follows:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.431, + 0.905, + 0.491 + ], + "angle": 0, + "content": "- We present UniCombine, a DiT-based multi-conditional controllable generative framework capable of handling any combination of conditions, including but not limited to text prompts, spatial maps, and subject images." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.491, + 0.905, + 0.567 + ], + "angle": 0, + "content": "- We construct the SubjectSpatial200K dataset, which encompasses both subject-driven and spatially-aligned conditions for all text-image sample pairs. It addresses the absence of a publicly available dataset for training and testing multi-conditional controllable generative models." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.567, + 0.905, + 0.643 + ], + "angle": 0, + "content": "- We conduct extensive experiments on Subject-Insertion, Subject-Spatial, and Multi-Spatial conditional generative tasks. The experimental results demonstrate the state-of-the-art performance of our UniCombine, which effectively aligns with all conditions harmoniously." + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.431, + 0.905, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.666, + 0.655, + 0.682 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.694, + 0.734, + 0.71 + ], + "angle": 0, + "content": "2.1. Diffusion-Based Models" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.72, + 0.905, + 0.901 + ], + "angle": 0, + "content": "Diffusion-based [13, 42] models have demonstrated superior performance than GAN-based [9] ones across various domains, including controllable generation [18, 31, 47, 58, 59], image editing [11, 30, 39], customized generation [8, 22, 40], object insertion [4, 43, 56], mask-guided inpainting [19, 48, 61], and so on. These breakthroughs begin with the LDM [37] and are further advanced with the DiT [32] architecture. The latest text-to-image generative models, SD3 [7] and FLUX [23], have attained state-of-the-art results by employing the Rectified Flow [28, 29] training strategy, the RPE [44] positional embedding and the MultiModal Diffusion Transformer (MMDiT) [7] architecture." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.094, + 0.088, + 0.322, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.143, + 0.34, + 0.271, + 0.352 + ], + "angle": 0, + "content": "(a) Overall Framework" + }, + { + "type": "image", + "bbox": [ + 0.329, + 0.088, + 0.547, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.353, + 0.34, + 0.522, + 0.353 + ], + "angle": 0, + "content": "(b) Single-Conditional Setting" + }, + { + "type": "image", + "bbox": [ + 0.553, + 0.089, + 0.906, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.649, + 0.34, + 0.811, + 0.353 + ], + "angle": 0, + "content": "(c) Multi-Conditional Setting" + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.36, + 0.907, + 0.472 + ], + "angle": 0, + "content": "Figure 2. Overview of our proposed UniCombine. (a) The overall framework. We regard the MMDiT-based diffusion models as consisting of the text branch and the denoising branch. Based on it, our UniCombine introduces multiple conditional branches to process the input conditions. (b) The single-conditional setting of our UniCombine. It is equivalent to OminiControl [45] which is a special case of our proposed UniCombine framework under a single-conditional setting. (c) The multi-conditional setting of our UniCombine. Our LoRA Switching module adaptively activates the pre-trained Condition-LoRA modules on the weights of the denoising branch according to the conditional types. The proposed Conditional MMDiT Attention mechanism is used to replace the original MMDiT Attention mechanism for handling the unified multi-conditional input sequence. Whether to load the optional Denoising-LoRA module is the difference between the training-free and training-based versions." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.485, + 0.315, + 0.499 + ], + "angle": 0, + "content": "2.2. Controllable Generation" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.507, + 0.485, + 0.778 + ], + "angle": 0, + "content": "Controllable generation allows for customizing the desired spatial layout, filter style, or subject appearance in the generated images. A series of methods such as ControlNet [59], T2I-Adapter [31], GLIGEN [26], and ZestGuide [6] successfully introduce the spatial conditions into controllable generation, enabling models to control the spatial layout of generated images. Another series of methods, such as IP-Adapter [58], InstantID [47], BLIP-Diffusion [24], and StyleDrop [41] incorporate the subject conditions into controllable generation, ensuring consistency between generated images and reference images in style, characteristics, subject appearance, etc. To unify these two tasks, OminiControl [45] proposes a novel MMDiT-based controllable framework to handle various conditions with a unified pipeline. Unfortunately, it lacks the capability to control generation with multiple conditions. To this end, we propose UniCombine, which successfully extends this framework to multi-conditional scenarios." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.789, + 0.46, + 0.803 + ], + "angle": 0, + "content": "2.3. Multi-Conditional Controllable Generation" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.811, + 0.484, + 0.902 + ], + "angle": 0, + "content": "As controllable generation advances, merely providing a single condition to guide the image generation no longer satisfies the needs. As a result, research on multi-conditional controllable generation has emerged. Existing methods like UniControl [35], UniControlNet [60] and Cocktail [14] exhibit acceptable performance when simul" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.485, + 0.907, + 0.652 + ], + "angle": 0, + "content": "taneously leveraging multiple spatial conditions for image generation. However, there is a lack of multi-conditional generative models that support utilizing both spatial conditions and subject conditions to guide the generative process together. Although the recently proposed method Ctrl-X [27] features controlling the appearance and structure simultaneously, its performance remains unsatisfactory with a limited combination of conditions and it is not compatible with the Diffusion Transformer architecture. To address the aforementioned limitations, we propose UniCombine to enable the flexible combination of various control signals." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.679, + 0.605, + 0.695 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.709, + 0.642, + 0.724 + ], + "angle": 0, + "content": "3.1. Preliminary" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.735, + 0.907, + 0.903 + ], + "angle": 0, + "content": "In this work, we mainly explore the latest generative models that utilize the Rectified Flow (RF) [28, 29] training strategy and the MMDiT [7] backbone architecture, like FLUX [23] and SD3 [7]. For the source noise distribution \\(X_0 \\sim p_{\\mathrm{noise}}\\) and the target image distribution \\(X_1 \\sim p_{\\mathrm{data}}\\), the RF defines a linear interpolation between them as \\(X_t = (1 - t)X_0 + tX_1\\) for \\(t \\in [0,1]\\). The training objective is to learn a time-dependent vector field \\(v_t(X_t, t; \\theta)\\) that describes the trajectory of the ODE \\(dX_t = v_t(X_t, t; \\theta)dt\\). Specifically, \\(v_t(X_t, t; \\theta)\\) is optimized to approximate the constant velocity \\(X_1 - X_0\\), leading to the loss function as Eq. (1)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "equation", + "bbox": [ + 0.091, + 0.108, + 0.484, + 0.139 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {R F}} (\\theta) = \\mathbb {E} _ {X _ {1} \\sim p _ {\\text {d a t a}}, X _ {0} \\sim p _ {\\text {n o i s e}}, t \\sim U [ 0, 1 ]} \\left[ \\| (X _ {1} - X _ {0}) - v _ {t} (X _ {t}, t; \\theta) \\| ^ {2} \\right] \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.14, + 0.484, + 0.442 + ], + "angle": 0, + "content": "In this paper, we propose a concept of branch to differentiate the processing flows of input embeddings from different modalities in MMDiT-based models. As shown in Fig. 2 (a), instead of the single-branch architecture [37] where the text prompt is injected into the denoising branch via crossattention, MMDiT uses two independent transformers to construct the text branch and the denoising branch. Based on it, OminiControl [45] incorporates a Condition-LoRA module onto the weights of the denoising branch to process the input conditional embedding, thus forming its Conditional Branch, as depicted in Fig. 2 (b). It is worth noting that, OminiControl [45] can be regarded as a special case of our proposed UniCombine framework under the single-conditional setting. It provides the pre-trained Condition-LoRA modules to meet the need for our multi-conditional settings. In the single-conditional setting, the text branch embedding \\( T \\), the denoising branch embedding \\( X \\), and the conditional branch embedding \\( C \\) are concatenated to form a unified sequence \\( [T;X;C] \\) to be processed in the MMDiT Attention mechanism." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.451, + 0.226, + 0.465 + ], + "angle": 0, + "content": "3.2. UniCombine" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.472, + 0.484, + 0.565 + ], + "angle": 0, + "content": "Building upon the MMDiT-based text-to-image generative model FLUX [23], we propose UniCombine, a multi-conditional controllable generative framework consisting of various conditional branches. Each conditional branch is in charge of processing one conditional embedding, thus forming a unified embedding sequence \\( S \\) as presented in Eq. (2)." + }, + { + "type": "equation", + "bbox": [ + 0.215, + 0.567, + 0.482, + 0.582 + ], + "angle": 0, + "content": "\\[\nS = [ T; X; C _ {1}; \\dots ; C _ {N} ] \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.585, + 0.484, + 0.809 + ], + "angle": 0, + "content": "Given that the single-conditional setting of our UniCombine is equivalent to OmniControl [45], we only focus on the multi-conditional setting in this section. Firstly, we introduce a LoRA Switching module to manage multiple conditional branches effectively. Secondly, we introduce a novel Conditional MMDiT Attention mechanism to process the unified sequence \\( S \\) in the multi-conditional setting. Thirdly, we present an insight analysis of our training-free strategy, which leverages the pre-trained Condition-LoRA module weights to perform a training-free multi-conditional controllable generation. Lastly, we present a feasible training-based strategy, which utilizes a trainable Denoising-LoRA module to enhance the performance further after training on a task-specific multi-conditional dataset." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.811, + 0.484, + 0.901 + ], + "angle": 0, + "content": "LoRA Switching Module. Before denoising with multiple input conditions, the Condition-LoRA modules pre-trained under single-conditional settings should be loaded onto the weights of the denoising branch, like \\([CondLoRA_1, CondLoRA_2, \\ldots]\\). Then the LoRA Switching module determines which one of them should be" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.287 + ], + "angle": 0, + "content": "activated according to the type of input conditions, forming a one-hot gating mechanism \\([0,1,0,\\dots,0]\\), as shown in Fig. 2 (c). Subsequently, different conditional branches with different activated Condition-LoRA modules are used for processing different conditional embeddings, resulting in a minimal number of additional parameters introduced for different conditions. Unlike the single-conditional setting in Fig. 2 (b), which only needs loading LoRA modules, the LoRA Switching module in Fig. 2 (c) enables adaptive selection among multiple LoRA modules to provide the matching conditional branches for each conditional embeddings, granting our framework greater flexibility and adaptability to handle diverse conditional combinations." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.288, + 0.905, + 0.47 + ], + "angle": 0, + "content": "Conditional MMDiT Attention. After concatenating the output embeddings from these \\(N\\) conditional branches, the unified sequence \\(S\\) cannot be processed through the original MMDiT Attention mechanism due to two major challenges: (1) The computational complexity scales quadratically as \\(O(N^2)\\) with respect to the number of conditions, which becomes especially problematic when handling multiple high-resolution conditions. (2) When performing MMDiT Attention on the unified sequence \\(S\\), different condition signals interfere with each other during the attention calculation, making it difficult to effectively utilize the pre-trained Condition-LoRA module weights for the denoising process." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.47, + 0.905, + 0.756 + ], + "angle": 0, + "content": "To address these challenges, we introduce a novel Conditional MMDiT Attention mechanism (CMMDiT Attention) as depicted in Fig. 2 (c) to replace the original MMDiT Attention. Instead of feeding the entire unified sequence \\( S \\) into the MMDiT Attention at once, CMMDiT Attention follows distinct computational mechanisms according to which branch is serving as queries. The core idea is that the branch serving as a query aggregates the information from different scopes of the unified sequence \\( S \\) depending on its type. Specifically, when the denoising branch \\( X \\) and the text branch \\( T \\) serve as queries, their scope of keys and values correspond to the entire unified sequence \\( S \\), granting them a global receptive field and the ability to aggregate information from all conditional branches. In contrast, when the conditional branches \\( C_i \\) serve as queries, their receptive fields do not encompass one another. Their scope of keys and values are restricted to the subsequence \\( S_i \\) as presented in Eq. (3), which prevents feature exchange and avoids information entanglement between different conditions." + }, + { + "type": "equation", + "bbox": [ + 0.662, + 0.761, + 0.905, + 0.776 + ], + "angle": 0, + "content": "\\[\nS _ {i} = [ T; X; C _ {i} ] \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.907, + 0.825 + ], + "angle": 0, + "content": "Furthermore, the CMMDiT Attention reduces computational complexity from \\( O(N^2) \\) to \\( O(N) \\) as the number of conditions increases, making it more scalable." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.826, + 0.905, + 0.901 + ], + "angle": 0, + "content": "Training-free Strategy. The following analyses provide a detailed explanation of why our UniCombine is capable of seamlessly integrating and effectively reusing the pretrained Condition-LoRA module weights to tackle multi-conditional challenges in a training-free manner." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.095, + 0.088, + 0.483, + 0.169 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.175, + 0.483, + 0.202 + ], + "angle": 0, + "content": "Figure 3. Average \\( \\mathrm{X} \\rightarrow \\) Subject cross-attention map of the insertion area." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.217, + 0.484, + 0.278 + ], + "angle": 0, + "content": "On the one hand, when the conditional embeddings \\( C_i \\) serve as queries in CMMDiT, they follow the same attention computational paradigm as in the MMDiT of single-conditional settings, as indicated in Eq. (4)." + }, + { + "type": "equation", + "bbox": [ + 0.108, + 0.281, + 0.482, + 0.315 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\operatorname {C M M D i T} \\left(Q = C _ {i} ^ {q}, K = \\left[ T ^ {k}, X ^ {k}, C _ {i} ^ {k} \\right], V = \\left[ T ^ {v}, X ^ {v}, C _ {i} ^ {v} \\right]\\right) \\\\ = \\operatorname {M M D i T} (Q = C ^ {q}, K = [ T ^ {k}, X ^ {k}, C ^ {k} ], V = [ T ^ {v}, X ^ {v}, C ^ {v} ]) \\tag {4} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.322, + 0.484, + 0.382 + ], + "angle": 0, + "content": "This consistent computational paradigm enables the conditional branches to share the same feature extraction capability between the multi-conditional setting and the single-conditional setting." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.382, + 0.484, + 0.503 + ], + "angle": 0, + "content": "On the other hand, when the denoising embedding \\( X \\) and the text prompt embedding \\( T \\) serve as queries in CMMDiT, their attention computational paradigm diverges from the single-conditional settings. As illustrated in Eq. (5), when the denoising embedding \\( X \\) is used as a query for attention computation with multiple conditional embeddings in CMMDiT, the attention score matrix is computed between \\( X \\) and all the conditional embeddings." + }, + { + "type": "equation", + "bbox": [ + 0.093, + 0.505, + 0.49, + 0.564 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathrm {C M M D i T} (Q = X ^ {q}, K / V = [ X ^ {k / v}, T ^ {k / v}, C _ {1} ^ {k / v}, \\dots , C _ {N} ^ {k / v} ]) \\\\ = \\operatorname {s o f t m a x} \\left(\\frac {1}{\\sqrt {d i m}} X ^ {q} \\left[ X ^ {k}, T ^ {k}, C _ {1} ^ {k}, \\dots , C _ {N} ^ {k} \\right] ^ {\\top}\\right) \\left[ X ^ {v}, T ^ {v}, C _ {1} ^ {v}, \\dots , C _ {N} ^ {v} \\right] \\tag {5} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.569, + 0.484, + 0.645 + ], + "angle": 0, + "content": "It allows \\( X \\) to extract and integrate information from each of the conditional embeddings separately and fusion them. This divide-and-conquer computational paradigm enables the text branch and denoising branch to fuse the conditional features effectively." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.645, + 0.484, + 0.704 + ], + "angle": 0, + "content": "By leveraging the computational paradigms mentioned above, our UniCombine is able to perform a training-free multi-conditional controllable generation with the pretrained Condition-LoRA modules." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.705, + 0.484, + 0.902 + ], + "angle": 0, + "content": "Training-based Strategy. However, due to the lack of training, solely relying on the softmax operation in Eq. (5) to balance the attention score distribution across multiple conditional embeddings may result in an undesirable feature fusion result, making our training-free version unsatisfactory in some cases. To address this issue, we introduce a trainable Denoising-LoRA module within the denoising branch to rectify the distribution of attention scores in Eq. (5). During training, we keep all the Condition-LoRA modules frozen to preserve the conditional extracting capability and train the Denoising-LoRA module solely on the task-specific multi-conditional dataset, as shown in Fig. 2 (c). After training, the denoising embedding \\( X \\) learns to" + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.089, + 0.903, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.531, + 0.296, + 0.887, + 0.311 + ], + "angle": 0, + "content": "Figure 4. SubjectSpatial200K dataset construction pipeline." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.324, + 0.907, + 0.4 + ], + "angle": 0, + "content": "better aggregate the appropriate information during the CM-MDiT Attention operation. As presented in Fig. 3, the average \\(\\mathrm{X} \\rightarrow\\) Subject attention map within the inpainting area is more concentrated on the subject area in the training-based version." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.41, + 0.763, + 0.426 + ], + "angle": 0, + "content": "3.3. SubjectSpatial200K dataset" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.432, + 0.907, + 0.599 + ], + "angle": 0, + "content": "Our SubjectSpatial200K dataset aims to address the lack of a publicly available dataset for multi-conditional generative tasks. Existing datasets fail to include both the subject-driven and spatially-aligned annotations. Recently, the Subjects200K [45] dataset provides a publicly accessible dataset for subject-driven generation. Based on it, we introduce the SubjectSpatial200K dataset, which is a unified high-quality dataset designed for training and testing multi-conditional controllable generative models. This dataset includes comprehensive annotations as elaborated below. Besides, the construction pipeline is detailed in Fig. 4." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.599, + 0.906, + 0.734 + ], + "angle": 0, + "content": "Subject Grounding Annotation. The subject grounding annotation is significantly necessary for many generative tasks like instance-level inpainting [19, 61], instance-level controllable generation [26, 49], and object insertion [4, 43]. By leveraging the open-vocabulary object detection model Mamba-YOLO-World [46] on Subjects200K, we detect bounding boxes for all subjects according to their category descriptions and subsequently derive the corresponding mask regions." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.735, + 0.906, + 0.811 + ], + "angle": 0, + "content": "Spatial Map Annotation. The spatial map annotation further extends the applicable scope of our dataset to spatially-aligned synthesis tasks. Specifically, we employ the Depth-Anything [57] model and the OpenCV [1] library on Subjects200K to derive the Depth and Canny maps." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.824, + 0.638, + 0.841 + ], + "angle": 0, + "content": "4. Experiment" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.849, + 0.596, + 0.865 + ], + "angle": 0, + "content": "4.1. Setup" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.906, + 0.902 + ], + "angle": 0, + "content": "Implementation. We use the FLUX.1-schnell [23] as our base model and the weights provided by OminiControl [45]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.122, + 0.089, + 0.875, + 0.339 + ], + "angle": 0, + "content": "
TaskMethodGenerative QualityControllabilitySubject ConsistencyText Consistency
FID ↓SSIM ↑F1 ↑MSE ↓CLIP-I ↑DINO ↑CLIP-T ↑
Multi-SpatialUniControl44.170.320.071346.02--30.28
UniControlNet20.960.280.091231.06--32.74
UniCombine (training-free)10.350.540.18519.53--33.70
UniCombine (training-based)6.820.640.24165.90--33.45
Subject-InsertionObjectStitch26.860.37--93.0582.3432.25
AnyDoor26.070.37--94.8886.0432.55
UniCombine (training-free)6.370.76--95.6089.0133.11
UniCombine (training-based)4.550.81--97.1492.9633.08
Subject-DepthControlNet w. IP-Adapter29.930.34-1295.8080.4162.2632.94
Ctrl-X52.370.36-2644.9078.0850.8330.20
UniCombine (training-free)10.030.48-507.4091.1585.7333.41
UniCombine (training-based)6.660.55-196.6594.4790.3133.30
Subject-CannyControlNet w. IP-Adapter30.380.380.09-79.8060.1932.85
Ctrl-X47.890.360.05-79.3554.3130.34
UniCombine (training-free)10.220.490.17-91.8486.8833.21
UniCombine (training-based)6.010.610.24-95.2692.5933.30
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.345, + 0.905, + 0.375 + ], + "angle": 0, + "content": "Table 1. Quantitative comparison of our method with existing approaches on Multi-Spatial, Subject-Insertion, Subject-Depth, and Subject-Canny conditional generative tasks. The bold and underlined figures represent the optimal and sub-optimal results, respectively." + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.388, + 0.481, + 0.677 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.102, + 0.684, + 0.472, + 0.699 + ], + "angle": 0, + "content": "Figure 5. Qualitative comparison on Multi-Spatial generation." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.717, + 0.483, + 0.822 + ], + "angle": 0, + "content": "as our pre-trained Condition-LoRA module weights. During the training of our Denoising-LoRA module, we use a rank of 4, consistent with the Condition-LoRA. We choose the Adam optimizer with a learning rate of \\(1e^{-4}\\) and set the weight decay to 0.01. Our models are trained for 30,000 steps on 16 NVIDIA V100 GPUs at a resolution of \\(512 \\times 512\\)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.825, + 0.484, + 0.903 + ], + "angle": 0, + "content": "**Benchmarks.** We evaluate the performance of our method in both training-free and training-based versions. The training and testing datasets are partitioned from the SubjectSpatial200K dataset based on image quality assessment scores evaluated by ChatGPT-4o, with details provided in Sec. A1." + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.388, + 0.905, + 0.677 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.685, + 0.905, + 0.7 + ], + "angle": 0, + "content": "Figure 6. Qualitative comparison on Subject-Insertion generation." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.718, + 0.905, + 0.748 + ], + "angle": 0, + "content": "Importantly, the dataset partitioning scheme remains consistent in all experiments." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.75, + 0.907, + 0.901 + ], + "angle": 0, + "content": "Metrics. To evaluate the subject consistency, we calculate the CLIP-I [36] score and DINO [2] score between the generated images and the ground truth images. To assess the generative quality, we compute the FID [12] and SSIM [50] between the generated image set and the ground truth image set. To measure the controllability, we compute the F1 Score for edge conditions and the MSE score for depth conditions between the extracted maps from generated images and the original conditions. Additionally, we adopt the CLIP-T [36] score to estimate the text consistency between" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.504, + 0.936 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.095, + 0.089, + 0.482, + 0.378 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.098, + 0.386, + 0.474, + 0.401 + ], + "angle": 0, + "content": "Figure 7. Qualitative comparison on Subject-Depth generation." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.413, + 0.375, + 0.429 + ], + "angle": 0, + "content": "the generated images and the text prompts." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.436, + 0.222, + 0.451 + ], + "angle": 0, + "content": "4.2. Main Result" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.458, + 0.483, + 0.503 + ], + "angle": 0, + "content": "We conduct extensive and comprehensive comparative experiments on the Multi-Spatial, Subject-Insertion, and Subject-Spatial conditional generative tasks." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.51, + 0.4, + 0.525 + ], + "angle": 0, + "content": "4.2.1. Multi-Spatial Conditional Generation" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.528, + 0.483, + 0.65 + ], + "angle": 0, + "content": "The Multi-Spatial conditional generation aims to generate images adhering to the collective layout constraints of diverse spatial conditions. This requires the model to achieve a more comprehensive layout control based on input conditions in a complementary manner. The comparative results in Tab. 1 and Fig. 5 demonstrate that our method outperforms existing multi-spatial conditional generation approaches in generative quality and controllability." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.656, + 0.428, + 0.671 + ], + "angle": 0, + "content": "4.2.2. Subject-Insertion Conditional Generation" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.675, + 0.483, + 0.901 + ], + "angle": 0, + "content": "The Subject-Insertion conditional generation requires the model to generate images where the reference subject is inserted into the masked region of the target background. As illustrated in Tab. 1 and Fig. 6, our UniCombine demonstrates superior performance compared to previous methods with three advantages: Firstly, our method ensures that the reference subject is inserted into the background with high consistency and harmonious integration. Secondly, our method excels in open-world object insertion without requiring test-time tuning, unlike conventional customization methods [22, 40]. Finally, our method demonstrates strong semantic comprehension capabilities, enabling it to extract the desired object from a complex subject image with a non-white background, rather than simply pasting the entire subject image into the masked region." + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.088, + 0.905, + 0.378 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.521, + 0.385, + 0.898, + 0.4 + ], + "angle": 0, + "content": "Figure 8. Qualitative comparison on Subject-Canny generation." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.415, + 0.836, + 0.43 + ], + "angle": 0, + "content": "4.2.3. Subject-Spatial Conditional Generation" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.435, + 0.907, + 0.782 + ], + "angle": 0, + "content": "The Subject-Spatial conditional generation focuses on generating images of the reference subject while ensuring the layout aligns with specified spatial conditions. We compare our method with Ctrl-X [27] and a simple baseline model. Ctrl-X is a recently proposed model based on SDXL [34] that simultaneously controls structure and appearance. The baseline model is constructed by integrating the FLUX ControlNet [53, 54] and FLUX IP-Adapter [55] into the FLUX.1-dev [23] base model. Specifically, we divided the Subject-Spatial generative task into different experimental groups based on the type of spatial conditions, referred to as Subject-Depth and Subject-Canny, respectively. As presented in Fig. 7, Fig. 8, and Tab. 1, the experimental results demonstrate the superior performance of our UniCombine: Firstly, our method exhibits stronger semantic comprehension capability, generating the reference subject in the accurate localization of the spatial conditions without confusing appearance features. Secondly, our method demonstrates greater adaptability, generating the reference subject with reasonable morphological transformations to align with the guidance of spatial conditions and text prompts. Lastly, our method achieves superior subject consistency while maintaining excellent spatial coherence." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.791, + 0.685, + 0.805 + ], + "angle": 0, + "content": "4.2.4. Textual Guidance" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.81, + 0.907, + 0.901 + ], + "angle": 0, + "content": "As shown in Fig. 1 and Tab. 1, our method not only allows for controllable generation by combining multiple conditions but also enables precise textual guidance simultaneously. By utilizing a unified input sequence \\( S = [T; X; C_1; \\ldots; C_N] \\) during the denoising process, our UniCombine effectively aligns the descriptive words in \\( T \\) with" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.093, + 0.089, + 0.495, + 0.142 + ], + "angle": 0, + "content": "
MethodCLIP-I ↑DINO ↑CLIP-T ↑AttnOps ↓
Ours w/o CMMDiT95.4788.4233.10732.17M
Ours w/ CMMDiT95.6089.0133.11612.63M
" + }, + { + "type": "table_caption", + "bbox": [ + 0.091, + 0.143, + 0.486, + 0.186 + ], + "angle": 0, + "content": "Table 2. Quantitative ablation of CMMDiT Attention mechanism on training-free Subject-Insertion task. AttnOps is short for the number of attention operations." + }, + { + "type": "table", + "bbox": [ + 0.093, + 0.199, + 0.482, + 0.337 + ], + "angle": 0, + "content": "
Background\nSubjectTraining-free\nw/o CMMDiTTraining-free\nw/ CMMDiTBackground\nSubjectTraining-free\nw/o CMMDiTTraining-free\nw/ CMMDiT
inconsistentsuccessinconsistentsuccess
failsuccessfailsuccess
" + }, + { + "type": "table_caption", + "bbox": [ + 0.091, + 0.339, + 0.483, + 0.368 + ], + "angle": 0, + "content": "Figure 9. Qualitative ablation of CMMDiT Attention mechanism on training-free Subject-Insertion task." + }, + { + "type": "table", + "bbox": [ + 0.107, + 0.38, + 0.466, + 0.433 + ], + "angle": 0, + "content": "
MethodCLIP-I ↑DINO ↑CLIP-T ↑
Ours w/ Text-LoRA96.9792.3233.10
Ours w/ Denoising-LoRA97.1492.9633.08
" + }, + { + "type": "table_caption", + "bbox": [ + 0.091, + 0.434, + 0.483, + 0.462 + ], + "angle": 0, + "content": "Table 3. Quantitative ablation of trainable LoRA on training-based Subject-Insertion task." + }, + { + "type": "image", + "bbox": [ + 0.093, + 0.474, + 0.482, + 0.613 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.616, + 0.483, + 0.645 + ], + "angle": 0, + "content": "Figure 10. Qualitative ablation of trainable LoRA on training-based Subject-Insertion task." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.658, + 0.483, + 0.705 + ], + "angle": 0, + "content": "the relevant features in \\( C_i \\) and the corresponding patches in \\( X \\), thereby achieving a remarkable text-guided multi-conditional controllable generation." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.713, + 0.245, + 0.73 + ], + "angle": 0, + "content": "4.3. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.735, + 0.483, + 0.78 + ], + "angle": 0, + "content": "We exhibit the ablation study results conducted on the Subject-Insertion task in this section, while more results on the other tasks are provided in Sec. A2." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.78, + 0.483, + 0.9 + ], + "angle": 0, + "content": "Effect of Conditional MMDiT Attention. To evaluate the effectiveness of our proposed Conditional MMDiT Attention mechanism, we replace the CMMDiT Attention with the original MMDiT Attention and test its training-free performance to avoid the influence of training data. As shown in Tab. 2 and Fig. 9, our framework attains superior performance with fewer attention operations when employing the CMMDiT Attention mechanism." + }, + { + "type": "table", + "bbox": [ + 0.539, + 0.089, + 0.88, + 0.142 + ], + "angle": 0, + "content": "
MethodCLIP-I ↑DINO ↑CLIP-T ↑
Ours w/ DSB only96.8592.3833.07
Ours w/ DSB and SSB97.1492.9633.08
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.143, + 0.905, + 0.184 + ], + "angle": 0, + "content": "Table 4. Quantitative ablation of training strategy on training-based Subject-Insertion task. DSB: Dual-Stream Blocks. SSB: Single-Stream Blocks." + }, + { + "type": "table", + "bbox": [ + 0.516, + 0.2, + 0.905, + 0.337 + ], + "angle": 0, + "content": "
Background\nSubjectTraining-based\nw/ DSB onlyTraining-based\nw/ DSB + SSBBackground\nSubjectTraining-based\nw/ DSB onlyTraining-based\nw/ DSB + SSB
inconsistentsuccessinconsistentsuccessinconsistentsuccess
inconsistentsuccessfailsuccessinconsistentinconsistent
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.339, + 0.905, + 0.38 + ], + "angle": 0, + "content": "Figure 11. Qualitative ablation of training strategy on training-based Subject-Insertion task. DSB: Dual-Stream Blocks. SSB: Single-Stream Blocks." + }, + { + "type": "table", + "bbox": [ + 0.522, + 0.397, + 0.897, + 0.503 + ], + "angle": 0, + "content": "
ModelGPU Memory ↓Add Params ↓
FLUX (bf16, base model)32933M-
CN, 1 cond35235M744M
IP, 1 cond35325M918M
CN + IP, 2 cond36753M1662M
Ours (training-free), 2 cond33323M29M
Ours (training-based), 2 cond33349M44M
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.504, + 0.905, + 0.533 + ], + "angle": 0, + "content": "Table 5. Comparison of inference GPU memory cost and additionally introduced parameters. CN: ControlNet. IP: IP-Adapter." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.551, + 0.907, + 0.672 + ], + "angle": 0, + "content": "Different Options for Trainable LoRA. To evaluate whether the trainable LoRA module can be applied to the text branch instead of the denoising branch, we load a Text-LoRA in the text branch, with a configuration identical to that of the Denoising-LoRA. The Tab. 3 and Fig. 10 indicate that applying the trainable LoRA module to the denoising branch better modulates the feature aggregation operation across multiple conditional branches." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.673, + 0.907, + 0.824 + ], + "angle": 0, + "content": "Training Strategy. As the parameter scale of the base model increases, the FLUX adaptations of ControlNet [53, 54] and IP-adapter [55] provided by the HuggingFace [16] community inject conditional features only into the dual-stream MMDiT blocks, rather than the entire network, to save memory. In contrast, since our Denoising-LoRA module introduces only a small number of parameters, we incorporate it into both the dual-stream and single-stream blocks to achieve better performance. The results in Tab. 4 and Fig. 11 confirm the validity of our choice." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.825, + 0.905, + 0.901 + ], + "angle": 0, + "content": "Computational Cost. The overheads of our approach in terms of inference GPU memory cost and additionally introduced parameters are minimal. The comparison results against the FLUX ControlNet [53, 54] and FLUX IP-Adapter [55] are shown in Tab. 5." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.166 + ], + "angle": 0, + "content": "More Conditional Branches. Our model places no restrictions on the number of supported conditions. The results shown in Fig. 12 demonstrate our model's strong scalability. As the number of conditional branches increases, the level of control becomes finer." + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.182, + 0.48, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.346, + 0.483, + 0.372 + ], + "angle": 0, + "content": "Figure 12. From left to right are training-free multi-conditional combination tasks under: \\(1/2/3/4\\) conditions." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.397, + 0.484, + 0.489 + ], + "angle": 0, + "content": "More Application Scenarios. Our UniCombine can be easily extended to new scenarios, such as reference-based image stylization. After training a new Condition-LoRA on StyleBooth [10] dataset, our UniCombine is able to integrate the style of the reference image with other conditions successfully, as demonstrated in Fig. 13." + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.504, + 0.48, + 0.636 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.119, + 0.639, + 0.454, + 0.653 + ], + "angle": 0, + "content": "Figure 13. Training-free Spatial-Style combination task." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.692, + 0.21, + 0.708 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.72, + 0.484, + 0.901 + ], + "angle": 0, + "content": "We present UniCombine, a DiT-based multi-conditional controllable generative framework capable of handling any combination of conditions, including but not limited to text prompts, spatial maps, and subject images. Extensive experiments on Subject-Insertion, Subject-Spatial, and Multi-Spatial conditional generative tasks demonstrate the state-of-the-art performance of our UniCombine in both training-free and training-based versions. Additionally, we propose the SubjectSpatial200K dataset to address the lack of a publicly available dataset for training and testing multi-conditional generative models. We believe our work can advance the development of the controllable generation field." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.091, + 0.61, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.115, + 0.908, + 0.142 + ], + "angle": 0, + "content": "[1] G. Bradski. The OpenCV Library. Dr. Dobb's Journal of Software Tools, 2000. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.145, + 0.906, + 0.214 + ], + "angle": 0, + "content": "[2] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9650-9660, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.217, + 0.905, + 0.257 + ], + "angle": 0, + "content": "[3] Jiaxuan Chen, Bo Zhang, Qingdong He, Jinlong Peng, and Li Niu. Mureobjectstitch: Multi-reference image composition. arXiv preprint arXiv:2411.07462, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.259, + 0.905, + 0.327 + ], + "angle": 0, + "content": "[4] Xi Chen, Lianghua Huang, Yu Liu, Yujun Shen, Deli Zhao, and Hengshuang Zhao. Anydoor: Zero-shot object-level image customization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6593-6602, 2024. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.33, + 0.905, + 0.399 + ], + "angle": 0, + "content": "[5] Zheng Chong, Xiao Dong, Haoxiang Li, Shiyue Zhang, Wenqing Zhang, Xujie Zhang, Hanqing Zhao, Dongmei Jiang, and Xiaodan Liang. Catvton: Concatenation is all you need for virtual try-on with diffusion models. arXiv preprint arXiv:2407.15886, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.402, + 0.905, + 0.471 + ], + "angle": 0, + "content": "[6] Guillaume Couairon, Marlene Careil, Matthieu Cord, Stephane Lathuiliere, and Jakob Verbeek. Zero-shot spatial layout conditioning for text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2174-2183, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.473, + 0.905, + 0.543 + ], + "angle": 0, + "content": "[7] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis, 2024. URL https://arxiv.org/abs/2403.03206, 2.2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.545, + 0.905, + 0.613 + ], + "angle": 0, + "content": "[8] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.616, + 0.905, + 0.671 + ], + "angle": 0, + "content": "[9] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks. Communications of the ACM, 63(11):139-144, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.673, + 0.905, + 0.727 + ], + "angle": 0, + "content": "[10] Zhen Han, Chaojie Mao, Zeyinzi Jiang, Yulin Pan, and Jingfeng Zhang. Stylebooth: Image style editing with multimodal instruction. arXiv preprint arXiv:2404.12154, 2024.9" + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.73, + 0.905, + 0.785 + ], + "angle": 0, + "content": "[11] Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.788, + 0.905, + 0.856 + ], + "angle": 0, + "content": "[12] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.859, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[13] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 2" + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.115, + 0.908, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.161 + ], + "angle": 0, + "content": "[14] Minghui Hu, Jianbin Zheng, Daqing Liu, Chuanxia Zheng, Chaoyue Wang, Dacheng Tao, and Tat-Jen Cham. Cocktail: Mixing multi-modality control for text-conditional image generation. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.164, + 0.482, + 0.245 + ], + "angle": 0, + "content": "[15] Teng Hu, Ran Yi, Haokun Zhu, Liang Liu, Jinlong Peng, Yabiao Wang, Chengjie Wang, and Lizhuang Ma. Stroke-based neural painting and stylization with dynamically predicted painting region. In Proceedings of the 31st ACM International Conference on Multimedia, pages 7470-7480, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.249, + 0.482, + 0.289 + ], + "angle": 0, + "content": "[16] HuggingFace. Diffusers: State-of-the-art diffusion models. https://github.com/huggingface/diffusers, 2023.8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.292, + 0.482, + 0.36 + ], + "angle": 0, + "content": "[17] Boyuan Jiang, Xiaobin Hu, Donghao Luo, Qingdong He, Chengming Xu, Jinlong Peng, Jiangning Zhang, Chengjie Wang, Yunsheng Wu, and Yanwei Fu. Fitdit: Advancing the authentic garment details for high-fidelity virtual try-on. arXiv preprint arXiv:2411.10499, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.362, + 0.482, + 0.444 + ], + "angle": 0, + "content": "[18] Ying Jin, Jinlong Peng, Qingdong He, Teng Hu, Hao Chen, Jiafu Wu, Wenbing Zhu, Mingmin Chi, Jun Liu, Yabiao Wang, et al. Dualanodiff: Dual-interrelated diffusion model for few-shot anomaly image generation. Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.447, + 0.482, + 0.502 + ], + "angle": 0, + "content": "[19] Xuan Ju, Xian Liu, Xintao Wang, Yuxuan Bian, Ying Shan, and Qiang Xu. Brushnet: A plug-and-play image inpainting model with decomposed dual-branch diffusion. arXiv preprint arXiv:2403.06976, 2024. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.504, + 0.482, + 0.558 + ], + "angle": 0, + "content": "[20] Chanran Kim, Jeongin Lee, Shichang Joung, Bongmo Kim, and Yeul-Min Baek. Instantfamily: Masked attention for zero-shot multi-id image generation. arXiv preprint arXiv:2404.19427, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.561, + 0.482, + 0.643 + ], + "angle": 0, + "content": "[21] Lingjie Kong, Kai Wu, Xiaobin Hu, Wenhui Han, Jinlong Peng, Chengming Xu, Donghao Luo, Jiangning Zhang, Chengjie Wang, and Yanwei Fu. Anymaker: Zero-shot general object customization via decoupled dual-level id injection. Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.646, + 0.482, + 0.714 + ], + "angle": 0, + "content": "[22] Nupur Kumari, Bingliang Zhang, Richard Zhang, Eli Shechtman, and Jun-Yan Zhu. Multi-concept customization of text-to-image diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1931-1941, 2023. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.717, + 0.482, + 0.743 + ], + "angle": 0, + "content": "[23] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2023. 2, 3, 4, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.746, + 0.482, + 0.801 + ], + "angle": 0, + "content": "[24] Dongxu Li, Junnan Li, and Steven Hoi. Blip-diffusion: Pretrained subject representation for controllable text-to-image generation and editing. Advances in Neural Information Processing Systems, 36:30146-30166, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.803, + 0.482, + 0.871 + ], + "angle": 0, + "content": "[25] Pengzhi Li, Qiang Nie, Ying Chen, Xi Jiang, Kai Wu, Yuhuan Lin, Yong Liu, Jinlong Peng, Chengjie Wang, and Feng Zheng. Tuning-free image customization with image and text guidance. In European Conference on Computer Vision, pages 233-250. Springer, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.874, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[26] Yuheng Li, Haotian Liu, Qingyang Wu, Fangzhou Mu, Jianwei Yang, Jianfeng Gao, Chunyuan Li, and Yong Jae Lee." + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.905, + 0.135 + ], + "angle": 0, + "content": "Gligen: Open-set grounded text-to-image generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22511-22521, 2023. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.137, + 0.905, + 0.204 + ], + "angle": 0, + "content": "[27] Kuan Heng Lin, Sicheng Mo, Ben Klingher, Fangzhou Mu, and Bolei Zhou. Ctrl-x: Controlling structure and appearance for text-to-image generation without guidance. Advances in Neural Information Processing Systems, 37: 128911-128939, 2025. 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.207, + 0.905, + 0.248 + ], + "angle": 0, + "content": "[28] Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le. Flow matching for generative modeling. arXiv preprint arXiv:2210.02747, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.25, + 0.905, + 0.291 + ], + "angle": 0, + "content": "[29] Xingchao Liu, Chengyue Gong, and Qiang Liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.294, + 0.907, + 0.361 + ], + "angle": 0, + "content": "[30] Ron Mokady, Amir Hertz, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Null-text inversion for editing real images using guided diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6038–6047, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.364, + 0.905, + 0.433 + ], + "angle": 0, + "content": "[31] Chong Mou, Xintao Wang, Liangbin Xie, Yanze Wu, Jian Zhang, Zhongang Qi, and Ying Shan. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 4296-4304, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.435, + 0.905, + 0.489 + ], + "angle": 0, + "content": "[32] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.492, + 0.905, + 0.547 + ], + "angle": 0, + "content": "[33] Jinlong Peng, Zekun Luo, Liang Liu, and Boshen Zhang. Frih: fine-grained region-aware image harmonization. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 4478-4486, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.549, + 0.905, + 0.617 + ], + "angle": 0, + "content": "[34] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.62, + 0.905, + 0.688 + ], + "angle": 0, + "content": "[35] Can Qin, Shu Zhang, Ning Yu, Yihao Feng, Xinyi Yang, Yingbo Zhou, Huan Wang, Juan Carlos Niebles, Caiming Xiong, Silvio Savarese, et al. Unicontrol: A unified diffusion model for controllable visual generation in the wild. arXiv preprint arXiv:2305.11147, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.69, + 0.905, + 0.772 + ], + "angle": 0, + "content": "[36] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.775, + 0.907, + 0.844 + ], + "angle": 0, + "content": "[37] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.846, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[38] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international conference," + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.907, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.926, + 0.509, + 0.937 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.092, + 0.484, + 0.121 + ], + "angle": 0, + "content": "Munich, Germany, October 5-9, 2015, proceedings, part III 18, pages 234-241. Springer, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.122, + 0.484, + 0.178 + ], + "angle": 0, + "content": "[39] Litu Rout, Yujia Chen, Nataniel Ruiz, Constantine Caramanis, Sanjay Shakkottai, and Wen-Sheng Chu. Semantic image inversion and editing using rectified stochastic differential equations. arXiv preprint arXiv:2410.10792, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.179, + 0.484, + 0.261 + ], + "angle": 0, + "content": "[40] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22500-22510, 2023. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.263, + 0.483, + 0.331 + ], + "angle": 0, + "content": "[41] Kihyuk Sohn, Nataniel Ruiz, Kimin Lee, Daniel Castro Chin, Irina Blok, Huiwen Chang, Jarred Barber, Lu Jiang, Glenn Entis, Yuanzhen Li, et al. Styledrop: Text-to-image generation in any style. arXiv preprint arXiv:2306.00983, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.333, + 0.483, + 0.375 + ], + "angle": 0, + "content": "[42] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.377, + 0.483, + 0.432 + ], + "angle": 0, + "content": "[43] Yizhi Song, Zhifei Zhang, Zhe Lin, Scott Cohen, Brian Price, Jianming Zhang, Soo Ye Kim, and Daniel Aliaga. Objectstitch: Generative object compositing. arXiv preprint arXiv:2212.00932, 2022. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.434, + 0.483, + 0.488 + ], + "angle": 0, + "content": "[44] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.49, + 0.483, + 0.546 + ], + "angle": 0, + "content": "[45] Zhenxiong Tan, Songhua Liu, Xingyi Yang, Qiaochu Xue, and Xinchao Wang. *Omnicontrol: Minimal and universal control for diffusion transformer.* arXiv preprint arXiv:2411.15098, 3, 2024. 2, 3, 4, 5, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.548, + 0.483, + 0.617 + ], + "angle": 0, + "content": "[46] Haoxuan Wang, Qingdong He, Jinlong Peng, Hao Yang, Mingmin Chi, and Yabiao Wang. Mamba-yolo-world: Marrying yolo-world with mamba for open-vocabulary detection. IEEE International Conference on Acoustics, Speech, and Signal Processing, 2025. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.619, + 0.483, + 0.673 + ], + "angle": 0, + "content": "[47] Qixun Wang, Xu Bai, Haofan Wang, Zekui Qin, Anthony Chen, Huaxia Li, Xu Tang, and Yao Hu. Instantid: Zero-shot identity-preserving generation in seconds. arXiv preprint arXiv:2401.07519, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.676, + 0.483, + 0.771 + ], + "angle": 0, + "content": "[48] Su Wang, Chitwan Sahara, Ceslee Montgomery, Jordi Pont-Tuset, Shai Noy, Stefano Pellegrini, Yasumasa Onoe, Sarah Laszlo, David J Fleet, Radu Soricut, et al. Imagen editor and editbench: Advancing and evaluating text-guided image inpainting. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 18359-18369, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.773, + 0.483, + 0.815 + ], + "angle": 0, + "content": "[49] Xudong Wang, Trevor Darrell, Sai Saketh Rambhatla, Rohit Girdhar, and Ishan Misra. Instancediffusion: Instance-level control for image generation, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.817, + 0.483, + 0.871 + ], + "angle": 0, + "content": "[50] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.873, + 0.483, + 0.901 + ], + "angle": 0, + "content": "[51] Daniel Winter, Asaf Shul, Matan Cohen, Dana Berman, Yael Pritch, Alex Rav-Acha, and Yedid Hoshen. Objectmate: A" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.484, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.905, + 0.121 + ], + "angle": 0, + "content": "recurrence prior for object insertion and subject-driven generation. arXiv preprint arXiv:2412.08645, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.122, + 0.905, + 0.178 + ], + "angle": 0, + "content": "[52] Peng Xing, Haofan Wang, Yanpeng Sun, Qixun Wang, Xu Bai, Hao Ai, Renyuan Huang, and Zechao Li. Csgo: Content-style composition in text-to-image generation. arXiv preprint arXiv:2408.16766, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.179, + 0.904, + 0.22 + ], + "angle": 0, + "content": "[53] XLabs-AI. Flux-controlnet-canny-diffusers. https://huggingface.co/XLabs-AI/flux-controlnet-canny-diffusers,2024.7,8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.222, + 0.904, + 0.262 + ], + "angle": 0, + "content": "[54] XLabs-AI. Flux-controlnet-depth-diffusers. https://huggingface.co/XLabs-AI/flux-controlnet-depth-diffusers,2024.7,8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.264, + 0.902, + 0.291 + ], + "angle": 0, + "content": "[55] XLabs-AI. Flux-ip-adapter. https://huggingface.co/XLabs-AI/flux-ip-adapter, 2024.7,8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.293, + 0.905, + 0.375 + ], + "angle": 0, + "content": "[56] Binxin Yang, Shuyang Gu, Bo Zhang, Ting Zhang, Xuejin Chen, Xiaoyan Sun, Dong Chen, and Fang Wen. Paint by example: Exemplar-based image editing with diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18381-18391, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.377, + 0.905, + 0.419 + ], + "angle": 0, + "content": "[57] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In CVPR, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.421, + 0.905, + 0.475 + ], + "angle": 0, + "content": "[58] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. arXiv preprint arXiv:2308.06721, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.477, + 0.905, + 0.532 + ], + "angle": 0, + "content": "[59] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836-3847, 2023. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.533, + 0.905, + 0.602 + ], + "angle": 0, + "content": "[60] Shihao Zhao, Dongdong Chen, Yen-Chun Chen, Jianmin Bao, Shaozhe Hao, Lu Yuan, and Kwan-Yee K Wong. Uni-controlnet: All-in-one control to text-to-image diffusion models. Advances in Neural Information Processing Systems, 36, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.604, + 0.905, + 0.673 + ], + "angle": 0, + "content": "[61] Junhao Zhuang, Yanhong Zeng, Wenran Liu, Chun Yuan, and Kai Chen. A task is worth one word: Learning with task prompts for high-quality versatile image inpainting. In European Conference on Computer Vision, pages 195-211. Springer, 2025. 2, 5" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.673 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.227, + 0.086, + 0.773, + 0.13 + ], + "angle": 0, + "content": "UniCombine: Unified Multi-Conditional Combination with Diffusion Transformer" + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.142, + 0.615, + 0.163 + ], + "angle": 0, + "content": "Supplementary Material" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.179, + 0.368, + 0.196 + ], + "angle": 0, + "content": "A1. Dataset Partitioning Scheme" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.204, + 0.484, + 0.279 + ], + "angle": 0, + "content": "In our proposed SubjectSpatial200K dataset, we utilize the ChatGPT-4o assessment scores provided by Subjects200K [45] on Subject Consistency, Composition Structure, and Image Quality to guide the dataset partitioning in our experiments." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.28, + 0.483, + 0.31 + ], + "angle": 0, + "content": "- Subject Consistency: Ensuring the identity of the subject image is consistent with that of the ground truth image." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.311, + 0.482, + 0.34 + ], + "angle": 0, + "content": "- Composition Structure: Verifying a reasonable composition of the subject and ground truth images." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.341, + 0.482, + 0.37 + ], + "angle": 0, + "content": "- Image Quality: Confirming each image pair maintains high resolution and visual fidelity." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.28, + 0.483, + 0.37 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.371, + 0.483, + 0.401 + ], + "angle": 0, + "content": "We partition the dataset into 139,403 training samples and 5,827 testing samples through Algorithm 1." + }, + { + "type": "table", + "bbox": [ + 0.094, + 0.405, + 0.482, + 0.589 + ], + "angle": 0, + "content": "
Algorithm 1: Dataset Partitioning Scheme
Input: example
Output: train or test
cs← example["Composite Structure"]
iq← example["Image Quality"]
sc← example["Subject Consistency"]
scores← [cs, iq, sc]
if all(s==5 for s in scores) then
return train;
else if cs≥3 and iq==5 and sc==5 then
return test;
" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.61, + 0.451, + 0.626 + ], + "angle": 0, + "content": "A2. More Ablation on CMMDiT Attention" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.635, + 0.484, + 0.725 + ], + "angle": 0, + "content": "More quantitative and qualitative ablation results on the other multi-conditional generative tasks are provided here. The comprehensive ablation results in Tab. A1, Tab. A2, Tab. A3, Fig. A1, Fig. A2, and Fig. A3 demonstrate that the UniCombine performs better with our proposed CMMDiT Attention." + }, + { + "type": "table", + "bbox": [ + 0.101, + 0.735, + 0.472, + 0.794 + ], + "angle": 0, + "content": "
MethodCLIP-I ↑DINO ↑CLIP-T ↑F1 ↑
Ours w/o CMMDiT91.5186.3133.200.16
Ours w/ CMMDiT91.8486.8833.210.17
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.091, + 0.795, + 0.482, + 0.823 + ], + "angle": 0, + "content": "Table A1. Quantitative ablation of CMMDiT Attention mechanism on training-free Subject-Canny task" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.845, + 0.34, + 0.862 + ], + "angle": 0, + "content": "A3. More Qualitative Results" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.483, + 0.902 + ], + "angle": 0, + "content": "More qualitative results are presented in Fig. A4 and Fig. A5." + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.182, + 0.905, + 0.322 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.323, + 0.905, + 0.351 + ], + "angle": 0, + "content": "Figure A1. Qualitative ablation of CMMDiT Attention mechanism on training-free Subject-Canny task" + }, + { + "type": "table", + "bbox": [ + 0.517, + 0.363, + 0.901, + 0.422 + ], + "angle": 0, + "content": "
MethodCLIP-I ↑DINO ↑CLIP-T ↑MSE ↓
Ours w/o CMMDiT90.8385.3833.38547.63
Ours w/ CMMDiT91.1585.7333.41507.40
" + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.424, + 0.905, + 0.451 + ], + "angle": 0, + "content": "Table A2. Quantitative ablation of CMMDiT Attention mechanism on training-free Subject-Depth task" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.459, + 0.905, + 0.597 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.599, + 0.905, + 0.627 + ], + "angle": 0, + "content": "Figure A2. Qualitative ablation of CMMDiT Attention mechanism on training-free Subject-Depth task" + }, + { + "type": "table", + "bbox": [ + 0.56, + 0.64, + 0.858, + 0.699 + ], + "angle": 0, + "content": "
MethodCLIP-T ↑F1 ↑MSE ↓
Ours w/o CMMDiT33.700.17524.04
Ours w/ CMMDiT33.700.18519.53
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.7, + 0.905, + 0.728 + ], + "angle": 0, + "content": "Table A3. Quantitative ablation of CMMDiT Attention mechanism on training-free Multi-Spatial task" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.737, + 0.905, + 0.875 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.877, + 0.905, + 0.904 + ], + "angle": 0, + "content": "Figure A3. Qualitative ablation of CMMDiT Attention mechanism on training-free Multi-Spatial task" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.094, + 0.098, + 0.476, + 0.859 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.098, + 0.905, + 0.859 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.26, + 0.873, + 0.737, + 0.888 + ], + "angle": 0, + "content": "Figure A4. More qualitative results on Multi-Spatial and Subject-Insertion tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.091, + 0.098, + 0.477, + 0.86 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.098, + 0.905, + 0.86 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.263, + 0.873, + 0.733, + 0.888 + ], + "angle": 0, + "content": "Figure A5. More qualitative results on Subject-Depth and Subject-Canny tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "14" + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09277/39fed2ad-9645-4fde-a1c3-86b0c99b7b36_origin.pdf b/data/2025/2503_09xxx/2503.09277/39fed2ad-9645-4fde-a1c3-86b0c99b7b36_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..519edcf50614af3b35ddfb40b1d5136bc27b4c97 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/39fed2ad-9645-4fde-a1c3-86b0c99b7b36_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ccdc8108a290b385b2525b036f451b5dca21e11515c65ae490b08bb29254142 +size 13732806 diff --git a/data/2025/2503_09xxx/2503.09277/full.md b/data/2025/2503_09xxx/2503.09277/full.md new file mode 100644 index 0000000000000000000000000000000000000000..37c85a9e8e29e19ff0ed5b1e6707600f9d1cc941 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/full.md @@ -0,0 +1,380 @@ +# UniCombine: Unified Multi-Conditional Combination with Diffusion Transformer + +Haoxuan Wang $^{1\dagger}$ , Jinlong Peng $^{2\dagger}$ , Qingdong He $^{2}$ , Hao Yang $^{3}$ , Ying Jin $^{1}$ , Jiafu Wu $^{2}$ , Xiaobin Hu $^{2}$ , Yanjie Pan $^{1}$ , Zhenye Gan $^{2}$ , Mingmin Chi $^{1*}$ , Bo Peng $^{4*}$ , Yabiao Wang $^{2,5*}$ + +$^{1}$ Fudan University, $^{2}$ Tencent Youtu Lab, $^{3}$ Shanghai Jiao Tong University, $^{4}$ Shanghai Ocean University $^{5}$ Zhejiang University + +https://github.com/Xuan-World/UniCombine + +![](images/2c8007f4046d1a3e78dbaf014b7a491dbbbf9b78ef27cf90ee2c58035b246a98.jpg) +Figure 1. Fantastic results of our proposed UniCombine on multi-conditional controllable generation: (a) Subject-Insertion task. (b) and (c) Subject-Spatial task. (d) Multi-Spatial task. Our unified framework effectively handles any combination of input conditions and achieves remarkable alignment with all of them, including but not limited to text prompts, spatial maps, and subject images. + +# Abstract + +With the rapid development of diffusion models in image generation, the demand for more powerful and flexible controllable frameworks is increasing. Although existing methods can guide generation beyond text prompts, the challenge of effectively combining multiple conditional inputs + +while maintaining consistency with all of them remains unsolved. To address this, we introduce UniCombine, a DiT-based multi-conditional controllable generative framework capable of handling any combination of conditions, including but not limited to text prompts, spatial maps, and subject images. Specifically, we introduce a novel Conditional MMDiT Attention mechanism and incorporate a trainable LoRA module to build both the training-free and training-based versions. Additionally, we propose a new pipeline to construct SubjectSpatial200K, the first dataset + +designed for multi-conditional generative tasks covering both the subject-driven and spatially-aligned conditions. Extensive experimental results on multi-conditional generation demonstrate the outstanding universality and powerful capability of our approach with state-of-the-art performance. + +# 1. Introduction + +With the advancement of diffusion-based [13, 42] text-to-image generative technology, a series of single-conditional controllable generative frameworks like ControlNet [59], T2I-Adapter [31], IP-Adapter [58], and InstantID [47] have expanded the scope of the control signals from text prompts to image conditions. It allows users to control more plentiful aspects of the generated images, such as layout, style, characteristics, etc. These conventional approaches are specifically designed for the UNet [38] backbone of Latent Diffusion Models (LDM) [37] with dedicated control networks. Besides, some recent approaches, such as Omini-Control [45], integrate control signals into the Diffusion Transformer (DiT) [7, 23] architecture, which demonstrates superior performance compared to the UNet in LDM. + +Although the methods mentioned above have achieved a promising single-conditional performance, the challenge of multi-conditional controllable generation is still unsolved. Previous multi-conditional generative methods like UniControl [35] and UniControlNet [60] are generally restricted to handling spatial conditions like Canny or Depth maps and fail to accommodate subject conditions, resulting in limited applicable scenarios. Despite the recently proposed Ctrl-X [27] features controlling structure and appearance together, its performance is unsatisfactory and supports only a limited combination of conditions. + +Moreover, we assume that many existing generative tasks can be viewed as a multi-conditional generation, such as virtual try-on [5, 17], object insertion [3, 51], style transfer [15, 33, 52], spatially-aligned customization [20, 21, 25, 27], etc. Consequently, there is a need for a unified framework to encompass these generative tasks in a way of multi-conditional generation. This framework should ensure consistency with all input constraints, including subject ID preservation, spatial structural alignment, background coherence, and style uniformity. + +To achieve this, we propose UniCombine, a powerful and universal framework that offers several key advantages: Firstly, our framework is capable of simultaneously handling any combination of conditions, including but not limited to text prompts, spatial maps, and subject images. Specifically, we introduce a novel Conditional MMDiT Attention mechanism and incorporate a trainable Denoising-LoRA module to build both the training-free and training-based versions. By integrating multiple pre + +trained Condition-LoRA module weights into the conditional branches, UniCombine achieves excellent training-free performance, which can be improved further after training on the task-specific multi-conditional dataset. Secondly, due to the lack of a publicly available dataset for multi-conditional generative tasks, we build the SubjectSpatial200K dataset to serve as the training dataset and the testing benchmark. Specifically, we generate the subject grounding annotations and spatial map annotations for all the data samples from Subjects200K [45] and therefore formulate our SubjectSpatial200K dataset. Thirdly, our UniCombine can achieve many unprecedented multi-conditional combinations, as shown in Fig. 1, such as combining a reference subject image with the inpainting area of a background image or with the layout guidance of a depth (or canny) map while imposing precise control via text prompt. Furthermore, extensive experiments on Subject-Insertion, Subject-Spatial, and Multi-Spatial conditional generation demonstrate the outstanding universality and powerful capability of our method against other existing specialized approaches. + +In summary, we highlight our contributions as follows: + +- We present UniCombine, a DiT-based multi-conditional controllable generative framework capable of handling any combination of conditions, including but not limited to text prompts, spatial maps, and subject images. +- We construct the SubjectSpatial200K dataset, which encompasses both subject-driven and spatially-aligned conditions for all text-image sample pairs. It addresses the absence of a publicly available dataset for training and testing multi-conditional controllable generative models. +- We conduct extensive experiments on Subject-Insertion, Subject-Spatial, and Multi-Spatial conditional generative tasks. The experimental results demonstrate the state-of-the-art performance of our UniCombine, which effectively aligns with all conditions harmoniously. + +# 2. Related Work + +# 2.1. Diffusion-Based Models + +Diffusion-based [13, 42] models have demonstrated superior performance than GAN-based [9] ones across various domains, including controllable generation [18, 31, 47, 58, 59], image editing [11, 30, 39], customized generation [8, 22, 40], object insertion [4, 43, 56], mask-guided inpainting [19, 48, 61], and so on. These breakthroughs begin with the LDM [37] and are further advanced with the DiT [32] architecture. The latest text-to-image generative models, SD3 [7] and FLUX [23], have attained state-of-the-art results by employing the Rectified Flow [28, 29] training strategy, the RPE [44] positional embedding and the MultiModal Diffusion Transformer (MMDiT) [7] architecture. + +![](images/e21e3bf9a4a78294b155fc81f3e6b7e3e4251e119c112d81fb0f47a4822439d5.jpg) +(a) Overall Framework + +![](images/3162d24f03eacb746bd4ecce492028784582c1beb179bc5cf91d96a185a56eec.jpg) +(b) Single-Conditional Setting + +![](images/7fa892b46091963fb1ee53c8e239a5cf468404dfb70774813a489096638da93b.jpg) +(c) Multi-Conditional Setting +Figure 2. Overview of our proposed UniCombine. (a) The overall framework. We regard the MMDiT-based diffusion models as consisting of the text branch and the denoising branch. Based on it, our UniCombine introduces multiple conditional branches to process the input conditions. (b) The single-conditional setting of our UniCombine. It is equivalent to OminiControl [45] which is a special case of our proposed UniCombine framework under a single-conditional setting. (c) The multi-conditional setting of our UniCombine. Our LoRA Switching module adaptively activates the pre-trained Condition-LoRA modules on the weights of the denoising branch according to the conditional types. The proposed Conditional MMDiT Attention mechanism is used to replace the original MMDiT Attention mechanism for handling the unified multi-conditional input sequence. Whether to load the optional Denoising-LoRA module is the difference between the training-free and training-based versions. + +# 2.2. Controllable Generation + +Controllable generation allows for customizing the desired spatial layout, filter style, or subject appearance in the generated images. A series of methods such as ControlNet [59], T2I-Adapter [31], GLIGEN [26], and ZestGuide [6] successfully introduce the spatial conditions into controllable generation, enabling models to control the spatial layout of generated images. Another series of methods, such as IP-Adapter [58], InstantID [47], BLIP-Diffusion [24], and StyleDrop [41] incorporate the subject conditions into controllable generation, ensuring consistency between generated images and reference images in style, characteristics, subject appearance, etc. To unify these two tasks, OminiControl [45] proposes a novel MMDiT-based controllable framework to handle various conditions with a unified pipeline. Unfortunately, it lacks the capability to control generation with multiple conditions. To this end, we propose UniCombine, which successfully extends this framework to multi-conditional scenarios. + +# 2.3. Multi-Conditional Controllable Generation + +As controllable generation advances, merely providing a single condition to guide the image generation no longer satisfies the needs. As a result, research on multi-conditional controllable generation has emerged. Existing methods like UniControl [35], UniControlNet [60] and Cocktail [14] exhibit acceptable performance when simul + +taneously leveraging multiple spatial conditions for image generation. However, there is a lack of multi-conditional generative models that support utilizing both spatial conditions and subject conditions to guide the generative process together. Although the recently proposed method Ctrl-X [27] features controlling the appearance and structure simultaneously, its performance remains unsatisfactory with a limited combination of conditions and it is not compatible with the Diffusion Transformer architecture. To address the aforementioned limitations, we propose UniCombine to enable the flexible combination of various control signals. + +# 3. Method + +# 3.1. Preliminary + +In this work, we mainly explore the latest generative models that utilize the Rectified Flow (RF) [28, 29] training strategy and the MMDiT [7] backbone architecture, like FLUX [23] and SD3 [7]. For the source noise distribution $X_0 \sim p_{\mathrm{noise}}$ and the target image distribution $X_1 \sim p_{\mathrm{data}}$ , the RF defines a linear interpolation between them as $X_t = (1 - t)X_0 + tX_1$ for $t \in [0,1]$ . The training objective is to learn a time-dependent vector field $v_t(X_t, t; \theta)$ that describes the trajectory of the ODE $dX_t = v_t(X_t, t; \theta)dt$ . Specifically, $v_t(X_t, t; \theta)$ is optimized to approximate the constant velocity $X_1 - X_0$ , leading to the loss function as Eq. (1). + +$$ +\mathcal {L} _ {\mathrm {R F}} (\theta) = \mathbb {E} _ {X _ {1} \sim p _ {\text {d a t a}}, X _ {0} \sim p _ {\text {n o i s e}}, t \sim U [ 0, 1 ]} \left[ \| (X _ {1} - X _ {0}) - v _ {t} (X _ {t}, t; \theta) \| ^ {2} \right] \tag {1} +$$ + +In this paper, we propose a concept of branch to differentiate the processing flows of input embeddings from different modalities in MMDiT-based models. As shown in Fig. 2 (a), instead of the single-branch architecture [37] where the text prompt is injected into the denoising branch via crossattention, MMDiT uses two independent transformers to construct the text branch and the denoising branch. Based on it, OminiControl [45] incorporates a Condition-LoRA module onto the weights of the denoising branch to process the input conditional embedding, thus forming its Conditional Branch, as depicted in Fig. 2 (b). It is worth noting that, OminiControl [45] can be regarded as a special case of our proposed UniCombine framework under the single-conditional setting. It provides the pre-trained Condition-LoRA modules to meet the need for our multi-conditional settings. In the single-conditional setting, the text branch embedding $T$ , the denoising branch embedding $X$ , and the conditional branch embedding $C$ are concatenated to form a unified sequence $[T;X;C]$ to be processed in the MMDiT Attention mechanism. + +# 3.2. UniCombine + +Building upon the MMDiT-based text-to-image generative model FLUX [23], we propose UniCombine, a multi-conditional controllable generative framework consisting of various conditional branches. Each conditional branch is in charge of processing one conditional embedding, thus forming a unified embedding sequence $S$ as presented in Eq. (2). + +$$ +S = [ T; X; C _ {1}; \dots ; C _ {N} ] \tag {2} +$$ + +Given that the single-conditional setting of our UniCombine is equivalent to OmniControl [45], we only focus on the multi-conditional setting in this section. Firstly, we introduce a LoRA Switching module to manage multiple conditional branches effectively. Secondly, we introduce a novel Conditional MMDiT Attention mechanism to process the unified sequence $S$ in the multi-conditional setting. Thirdly, we present an insight analysis of our training-free strategy, which leverages the pre-trained Condition-LoRA module weights to perform a training-free multi-conditional controllable generation. Lastly, we present a feasible training-based strategy, which utilizes a trainable Denoising-LoRA module to enhance the performance further after training on a task-specific multi-conditional dataset. + +LoRA Switching Module. Before denoising with multiple input conditions, the Condition-LoRA modules pre-trained under single-conditional settings should be loaded onto the weights of the denoising branch, like $[CondLoRA_1, CondLoRA_2, \ldots]$ . Then the LoRA Switching module determines which one of them should be + +activated according to the type of input conditions, forming a one-hot gating mechanism $[0,1,0,\dots,0]$ , as shown in Fig. 2 (c). Subsequently, different conditional branches with different activated Condition-LoRA modules are used for processing different conditional embeddings, resulting in a minimal number of additional parameters introduced for different conditions. Unlike the single-conditional setting in Fig. 2 (b), which only needs loading LoRA modules, the LoRA Switching module in Fig. 2 (c) enables adaptive selection among multiple LoRA modules to provide the matching conditional branches for each conditional embeddings, granting our framework greater flexibility and adaptability to handle diverse conditional combinations. + +Conditional MMDiT Attention. After concatenating the output embeddings from these $N$ conditional branches, the unified sequence $S$ cannot be processed through the original MMDiT Attention mechanism due to two major challenges: (1) The computational complexity scales quadratically as $O(N^2)$ with respect to the number of conditions, which becomes especially problematic when handling multiple high-resolution conditions. (2) When performing MMDiT Attention on the unified sequence $S$ , different condition signals interfere with each other during the attention calculation, making it difficult to effectively utilize the pre-trained Condition-LoRA module weights for the denoising process. + +To address these challenges, we introduce a novel Conditional MMDiT Attention mechanism (CMMDiT Attention) as depicted in Fig. 2 (c) to replace the original MMDiT Attention. Instead of feeding the entire unified sequence $S$ into the MMDiT Attention at once, CMMDiT Attention follows distinct computational mechanisms according to which branch is serving as queries. The core idea is that the branch serving as a query aggregates the information from different scopes of the unified sequence $S$ depending on its type. Specifically, when the denoising branch $X$ and the text branch $T$ serve as queries, their scope of keys and values correspond to the entire unified sequence $S$ , granting them a global receptive field and the ability to aggregate information from all conditional branches. In contrast, when the conditional branches $C_i$ serve as queries, their receptive fields do not encompass one another. Their scope of keys and values are restricted to the subsequence $S_i$ as presented in Eq. (3), which prevents feature exchange and avoids information entanglement between different conditions. + +$$ +S _ {i} = [ T; X; C _ {i} ] \tag {3} +$$ + +Furthermore, the CMMDiT Attention reduces computational complexity from $O(N^2)$ to $O(N)$ as the number of conditions increases, making it more scalable. + +Training-free Strategy. The following analyses provide a detailed explanation of why our UniCombine is capable of seamlessly integrating and effectively reusing the pretrained Condition-LoRA module weights to tackle multi-conditional challenges in a training-free manner. + +![](images/8c57a3c424f01738f8a53f6d6f0c8688e8016cc7a73bf6ea21df63ffdafe3eaa.jpg) +Figure 3. Average $\mathrm{X} \rightarrow$ Subject cross-attention map of the insertion area. + +On the one hand, when the conditional embeddings $C_i$ serve as queries in CMMDiT, they follow the same attention computational paradigm as in the MMDiT of single-conditional settings, as indicated in Eq. (4). + +$$ +\begin{array}{l} \operatorname {C M M D i T} \left(Q = C _ {i} ^ {q}, K = \left[ T ^ {k}, X ^ {k}, C _ {i} ^ {k} \right], V = \left[ T ^ {v}, X ^ {v}, C _ {i} ^ {v} \right]\right) \\ = \operatorname {M M D i T} (Q = C ^ {q}, K = [ T ^ {k}, X ^ {k}, C ^ {k} ], V = [ T ^ {v}, X ^ {v}, C ^ {v} ]) \tag {4} \\ \end{array} +$$ + +This consistent computational paradigm enables the conditional branches to share the same feature extraction capability between the multi-conditional setting and the single-conditional setting. + +On the other hand, when the denoising embedding $X$ and the text prompt embedding $T$ serve as queries in CMMDiT, their attention computational paradigm diverges from the single-conditional settings. As illustrated in Eq. (5), when the denoising embedding $X$ is used as a query for attention computation with multiple conditional embeddings in CMMDiT, the attention score matrix is computed between $X$ and all the conditional embeddings. + +$$ +\begin{array}{l} \mathrm {C M M D i T} (Q = X ^ {q}, K / V = [ X ^ {k / v}, T ^ {k / v}, C _ {1} ^ {k / v}, \dots , C _ {N} ^ {k / v} ]) \\ = \operatorname {s o f t m a x} \left(\frac {1}{\sqrt {d i m}} X ^ {q} \left[ X ^ {k}, T ^ {k}, C _ {1} ^ {k}, \dots , C _ {N} ^ {k} \right] ^ {\top}\right) \left[ X ^ {v}, T ^ {v}, C _ {1} ^ {v}, \dots , C _ {N} ^ {v} \right] \tag {5} \\ \end{array} +$$ + +It allows $X$ to extract and integrate information from each of the conditional embeddings separately and fusion them. This divide-and-conquer computational paradigm enables the text branch and denoising branch to fuse the conditional features effectively. + +By leveraging the computational paradigms mentioned above, our UniCombine is able to perform a training-free multi-conditional controllable generation with the pretrained Condition-LoRA modules. + +Training-based Strategy. However, due to the lack of training, solely relying on the softmax operation in Eq. (5) to balance the attention score distribution across multiple conditional embeddings may result in an undesirable feature fusion result, making our training-free version unsatisfactory in some cases. To address this issue, we introduce a trainable Denoising-LoRA module within the denoising branch to rectify the distribution of attention scores in Eq. (5). During training, we keep all the Condition-LoRA modules frozen to preserve the conditional extracting capability and train the Denoising-LoRA module solely on the task-specific multi-conditional dataset, as shown in Fig. 2 (c). After training, the denoising embedding $X$ learns to + +![](images/d04a32c8e874aab106f9fa111d4a7b255e65e731a4acd2452f053c1c30949d06.jpg) +Figure 4. SubjectSpatial200K dataset construction pipeline. + +better aggregate the appropriate information during the CM-MDiT Attention operation. As presented in Fig. 3, the average $\mathrm{X} \rightarrow$ Subject attention map within the inpainting area is more concentrated on the subject area in the training-based version. + +# 3.3. SubjectSpatial200K dataset + +Our SubjectSpatial200K dataset aims to address the lack of a publicly available dataset for multi-conditional generative tasks. Existing datasets fail to include both the subject-driven and spatially-aligned annotations. Recently, the Subjects200K [45] dataset provides a publicly accessible dataset for subject-driven generation. Based on it, we introduce the SubjectSpatial200K dataset, which is a unified high-quality dataset designed for training and testing multi-conditional controllable generative models. This dataset includes comprehensive annotations as elaborated below. Besides, the construction pipeline is detailed in Fig. 4. + +Subject Grounding Annotation. The subject grounding annotation is significantly necessary for many generative tasks like instance-level inpainting [19, 61], instance-level controllable generation [26, 49], and object insertion [4, 43]. By leveraging the open-vocabulary object detection model Mamba-YOLO-World [46] on Subjects200K, we detect bounding boxes for all subjects according to their category descriptions and subsequently derive the corresponding mask regions. + +Spatial Map Annotation. The spatial map annotation further extends the applicable scope of our dataset to spatially-aligned synthesis tasks. Specifically, we employ the Depth-Anything [57] model and the OpenCV [1] library on Subjects200K to derive the Depth and Canny maps. + +# 4. Experiment + +# 4.1. Setup + +Implementation. We use the FLUX.1-schnell [23] as our base model and the weights provided by OminiControl [45] + +
TaskMethodGenerative QualityControllabilitySubject ConsistencyText Consistency
FID ↓SSIM ↑F1 ↑MSE ↓CLIP-I ↑DINO ↑CLIP-T ↑
Multi-SpatialUniControl44.170.320.071346.02--30.28
UniControlNet20.960.280.091231.06--32.74
UniCombine (training-free)10.350.540.18519.53--33.70
UniCombine (training-based)6.820.640.24165.90--33.45
Subject-InsertionObjectStitch26.860.37--93.0582.3432.25
AnyDoor26.070.37--94.8886.0432.55
UniCombine (training-free)6.370.76--95.6089.0133.11
UniCombine (training-based)4.550.81--97.1492.9633.08
Subject-DepthControlNet w. IP-Adapter29.930.34-1295.8080.4162.2632.94
Ctrl-X52.370.36-2644.9078.0850.8330.20
UniCombine (training-free)10.030.48-507.4091.1585.7333.41
UniCombine (training-based)6.660.55-196.6594.4790.3133.30
Subject-CannyControlNet w. IP-Adapter30.380.380.09-79.8060.1932.85
Ctrl-X47.890.360.05-79.3554.3130.34
UniCombine (training-free)10.220.490.17-91.8486.8833.21
UniCombine (training-based)6.010.610.24-95.2692.5933.30
+ +Table 1. Quantitative comparison of our method with existing approaches on Multi-Spatial, Subject-Insertion, Subject-Depth, and Subject-Canny conditional generative tasks. The bold and underlined figures represent the optimal and sub-optimal results, respectively. + +![](images/1e06b7a6c1ae6b58e94df98cc9be3d1764e9ac9f480e954299aa319606949d55.jpg) +Figure 5. Qualitative comparison on Multi-Spatial generation. + +as our pre-trained Condition-LoRA module weights. During the training of our Denoising-LoRA module, we use a rank of 4, consistent with the Condition-LoRA. We choose the Adam optimizer with a learning rate of $1e^{-4}$ and set the weight decay to 0.01. Our models are trained for 30,000 steps on 16 NVIDIA V100 GPUs at a resolution of $512 \times 512$ . + +**Benchmarks.** We evaluate the performance of our method in both training-free and training-based versions. The training and testing datasets are partitioned from the SubjectSpatial200K dataset based on image quality assessment scores evaluated by ChatGPT-4o, with details provided in Sec. A1. + +![](images/d60c55bfaeeeedfd5aefc1bba87bd491b67f9a1c54a3f0b04bece1daafa5fa08.jpg) +Figure 6. Qualitative comparison on Subject-Insertion generation. + +Importantly, the dataset partitioning scheme remains consistent in all experiments. + +Metrics. To evaluate the subject consistency, we calculate the CLIP-I [36] score and DINO [2] score between the generated images and the ground truth images. To assess the generative quality, we compute the FID [12] and SSIM [50] between the generated image set and the ground truth image set. To measure the controllability, we compute the F1 Score for edge conditions and the MSE score for depth conditions between the extracted maps from generated images and the original conditions. Additionally, we adopt the CLIP-T [36] score to estimate the text consistency between + +![](images/506fe3547ab1943e7faa90d0f349eb3f11dc59ed4a47d7cf36007f74a6ca38f0.jpg) +Figure 7. Qualitative comparison on Subject-Depth generation. + +the generated images and the text prompts. + +# 4.2. Main Result + +We conduct extensive and comprehensive comparative experiments on the Multi-Spatial, Subject-Insertion, and Subject-Spatial conditional generative tasks. + +# 4.2.1. Multi-Spatial Conditional Generation + +The Multi-Spatial conditional generation aims to generate images adhering to the collective layout constraints of diverse spatial conditions. This requires the model to achieve a more comprehensive layout control based on input conditions in a complementary manner. The comparative results in Tab. 1 and Fig. 5 demonstrate that our method outperforms existing multi-spatial conditional generation approaches in generative quality and controllability. + +# 4.2.2. Subject-Insertion Conditional Generation + +The Subject-Insertion conditional generation requires the model to generate images where the reference subject is inserted into the masked region of the target background. As illustrated in Tab. 1 and Fig. 6, our UniCombine demonstrates superior performance compared to previous methods with three advantages: Firstly, our method ensures that the reference subject is inserted into the background with high consistency and harmonious integration. Secondly, our method excels in open-world object insertion without requiring test-time tuning, unlike conventional customization methods [22, 40]. Finally, our method demonstrates strong semantic comprehension capabilities, enabling it to extract the desired object from a complex subject image with a non-white background, rather than simply pasting the entire subject image into the masked region. + +![](images/44ebde2dd6cfbcb637ef99847a469c9c5e74ddeb4368792fe3d6809b1e91856e.jpg) +Figure 8. Qualitative comparison on Subject-Canny generation. + +# 4.2.3. Subject-Spatial Conditional Generation + +The Subject-Spatial conditional generation focuses on generating images of the reference subject while ensuring the layout aligns with specified spatial conditions. We compare our method with Ctrl-X [27] and a simple baseline model. Ctrl-X is a recently proposed model based on SDXL [34] that simultaneously controls structure and appearance. The baseline model is constructed by integrating the FLUX ControlNet [53, 54] and FLUX IP-Adapter [55] into the FLUX.1-dev [23] base model. Specifically, we divided the Subject-Spatial generative task into different experimental groups based on the type of spatial conditions, referred to as Subject-Depth and Subject-Canny, respectively. As presented in Fig. 7, Fig. 8, and Tab. 1, the experimental results demonstrate the superior performance of our UniCombine: Firstly, our method exhibits stronger semantic comprehension capability, generating the reference subject in the accurate localization of the spatial conditions without confusing appearance features. Secondly, our method demonstrates greater adaptability, generating the reference subject with reasonable morphological transformations to align with the guidance of spatial conditions and text prompts. Lastly, our method achieves superior subject consistency while maintaining excellent spatial coherence. + +# 4.2.4. Textual Guidance + +As shown in Fig. 1 and Tab. 1, our method not only allows for controllable generation by combining multiple conditions but also enables precise textual guidance simultaneously. By utilizing a unified input sequence $S = [T; X; C_1; \ldots; C_N]$ during the denoising process, our UniCombine effectively aligns the descriptive words in $T$ with + +
MethodCLIP-I ↑DINO ↑CLIP-T ↑AttnOps ↓
Ours w/o CMMDiT95.4788.4233.10732.17M
Ours w/ CMMDiT95.6089.0133.11612.63M
+ +Table 2. Quantitative ablation of CMMDiT Attention mechanism on training-free Subject-Insertion task. AttnOps is short for the number of attention operations. + +
Background +SubjectTraining-free +w/o CMMDiTTraining-free +w/ CMMDiTBackground +SubjectTraining-free +w/o CMMDiTTraining-free +w/ CMMDiT
inconsistentsuccessinconsistentsuccess
failsuccessfailsuccess
+ +Figure 9. Qualitative ablation of CMMDiT Attention mechanism on training-free Subject-Insertion task. + +
MethodCLIP-I ↑DINO ↑CLIP-T ↑
Ours w/ Text-LoRA96.9792.3233.10
Ours w/ Denoising-LoRA97.1492.9633.08
+ +![](images/fc8bc88e1ec09551aeba8132d1ccff2d2a1eb574ea62259a59e0357472255443.jpg) +Figure 10. Qualitative ablation of trainable LoRA on training-based Subject-Insertion task. + +the relevant features in $C_i$ and the corresponding patches in $X$ , thereby achieving a remarkable text-guided multi-conditional controllable generation. + +# 4.3. Ablation Study + +We exhibit the ablation study results conducted on the Subject-Insertion task in this section, while more results on the other tasks are provided in Sec. A2. + +Effect of Conditional MMDiT Attention. To evaluate the effectiveness of our proposed Conditional MMDiT Attention mechanism, we replace the CMMDiT Attention with the original MMDiT Attention and test its training-free performance to avoid the influence of training data. As shown in Tab. 2 and Fig. 9, our framework attains superior performance with fewer attention operations when employing the CMMDiT Attention mechanism. + +Table 3. Quantitative ablation of trainable LoRA on training-based Subject-Insertion task. + +
MethodCLIP-I ↑DINO ↑CLIP-T ↑
Ours w/ DSB only96.8592.3833.07
Ours w/ DSB and SSB97.1492.9633.08
+ +Table 4. Quantitative ablation of training strategy on training-based Subject-Insertion task. DSB: Dual-Stream Blocks. SSB: Single-Stream Blocks. + +
Background +SubjectTraining-based +w/ DSB onlyTraining-based +w/ DSB + SSBBackground +SubjectTraining-based +w/ DSB onlyTraining-based +w/ DSB + SSB
inconsistentsuccessinconsistentsuccessinconsistentsuccess
inconsistentsuccessfailsuccessinconsistentinconsistent
+ +Figure 11. Qualitative ablation of training strategy on training-based Subject-Insertion task. DSB: Dual-Stream Blocks. SSB: Single-Stream Blocks. + +
ModelGPU Memory ↓Add Params ↓
FLUX (bf16, base model)32933M-
CN, 1 cond35235M744M
IP, 1 cond35325M918M
CN + IP, 2 cond36753M1662M
Ours (training-free), 2 cond33323M29M
Ours (training-based), 2 cond33349M44M
+ +Table 5. Comparison of inference GPU memory cost and additionally introduced parameters. CN: ControlNet. IP: IP-Adapter. + +Different Options for Trainable LoRA. To evaluate whether the trainable LoRA module can be applied to the text branch instead of the denoising branch, we load a Text-LoRA in the text branch, with a configuration identical to that of the Denoising-LoRA. The Tab. 3 and Fig. 10 indicate that applying the trainable LoRA module to the denoising branch better modulates the feature aggregation operation across multiple conditional branches. + +Training Strategy. As the parameter scale of the base model increases, the FLUX adaptations of ControlNet [53, 54] and IP-adapter [55] provided by the HuggingFace [16] community inject conditional features only into the dual-stream MMDiT blocks, rather than the entire network, to save memory. In contrast, since our Denoising-LoRA module introduces only a small number of parameters, we incorporate it into both the dual-stream and single-stream blocks to achieve better performance. The results in Tab. 4 and Fig. 11 confirm the validity of our choice. + +Computational Cost. The overheads of our approach in terms of inference GPU memory cost and additionally introduced parameters are minimal. The comparison results against the FLUX ControlNet [53, 54] and FLUX IP-Adapter [55] are shown in Tab. 5. + +More Conditional Branches. Our model places no restrictions on the number of supported conditions. The results shown in Fig. 12 demonstrate our model's strong scalability. As the number of conditional branches increases, the level of control becomes finer. + +![](images/a621a99e80843ff5996990493bd8940b3ce3197ae924350ffd10503c3b7c6b1c.jpg) +Figure 12. From left to right are training-free multi-conditional combination tasks under: $1/2/3/4$ conditions. + +More Application Scenarios. Our UniCombine can be easily extended to new scenarios, such as reference-based image stylization. After training a new Condition-LoRA on StyleBooth [10] dataset, our UniCombine is able to integrate the style of the reference image with other conditions successfully, as demonstrated in Fig. 13. + +![](images/697b9a0d3b2d71d9ac73a42626a46546416d486f912cfdb2949db0c5a79882ad.jpg) +Figure 13. Training-free Spatial-Style combination task. + +# 5. Conclusion + +We present UniCombine, a DiT-based multi-conditional controllable generative framework capable of handling any combination of conditions, including but not limited to text prompts, spatial maps, and subject images. Extensive experiments on Subject-Insertion, Subject-Spatial, and Multi-Spatial conditional generative tasks demonstrate the state-of-the-art performance of our UniCombine in both training-free and training-based versions. Additionally, we propose the SubjectSpatial200K dataset to address the lack of a publicly available dataset for training and testing multi-conditional generative models. We believe our work can advance the development of the controllable generation field. + +# References + +[1] G. Bradski. The OpenCV Library. Dr. Dobb's Journal of Software Tools, 2000. 5 +[2] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9650-9660, 2021. 6 +[3] Jiaxuan Chen, Bo Zhang, Qingdong He, Jinlong Peng, and Li Niu. Mureobjectstitch: Multi-reference image composition. arXiv preprint arXiv:2411.07462, 2024. 2 +[4] Xi Chen, Lianghua Huang, Yu Liu, Yujun Shen, Deli Zhao, and Hengshuang Zhao. Anydoor: Zero-shot object-level image customization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6593-6602, 2024. 2, 5 +[5] Zheng Chong, Xiao Dong, Haoxiang Li, Shiyue Zhang, Wenqing Zhang, Xujie Zhang, Hanqing Zhao, Dongmei Jiang, and Xiaodan Liang. Catvton: Concatenation is all you need for virtual try-on with diffusion models. arXiv preprint arXiv:2407.15886, 2024. 2 +[6] Guillaume Couairon, Marlene Careil, Matthieu Cord, Stephane Lathuiliere, and Jakob Verbeek. Zero-shot spatial layout conditioning for text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2174-2183, 2023. 3 +[7] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis, 2024. URL https://arxiv.org/abs/2403.03206, 2.2, 3 +[8] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022. 2 +[9] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks. Communications of the ACM, 63(11):139-144, 2020. 2 +[10] Zhen Han, Chaojie Mao, Zeyinzi Jiang, Yulin Pan, and Jingfeng Zhang. Stylebooth: Image style editing with multimodal instruction. arXiv preprint arXiv:2404.12154, 2024.9 +[11] Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022. 2 +[12] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 6 +[13] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 2 + +[14] Minghui Hu, Jianbin Zheng, Daqing Liu, Chuanxia Zheng, Chaoyue Wang, Dacheng Tao, and Tat-Jen Cham. Cocktail: Mixing multi-modality control for text-conditional image generation. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 3 +[15] Teng Hu, Ran Yi, Haokun Zhu, Liang Liu, Jinlong Peng, Yabiao Wang, Chengjie Wang, and Lizhuang Ma. Stroke-based neural painting and stylization with dynamically predicted painting region. In Proceedings of the 31st ACM International Conference on Multimedia, pages 7470-7480, 2023. 2 +[16] HuggingFace. Diffusers: State-of-the-art diffusion models. https://github.com/huggingface/diffusers, 2023.8 +[17] Boyuan Jiang, Xiaobin Hu, Donghao Luo, Qingdong He, Chengming Xu, Jinlong Peng, Jiangning Zhang, Chengjie Wang, Yunsheng Wu, and Yanwei Fu. Fitdit: Advancing the authentic garment details for high-fidelity virtual try-on. arXiv preprint arXiv:2411.10499, 2024. 2 +[18] Ying Jin, Jinlong Peng, Qingdong He, Teng Hu, Hao Chen, Jiafu Wu, Wenbing Zhu, Mingmin Chi, Jun Liu, Yabiao Wang, et al. Dualanodiff: Dual-interrelated diffusion model for few-shot anomaly image generation. Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2025. 2 +[19] Xuan Ju, Xian Liu, Xintao Wang, Yuxuan Bian, Ying Shan, and Qiang Xu. Brushnet: A plug-and-play image inpainting model with decomposed dual-branch diffusion. arXiv preprint arXiv:2403.06976, 2024. 2, 5 +[20] Chanran Kim, Jeongin Lee, Shichang Joung, Bongmo Kim, and Yeul-Min Baek. Instantfamily: Masked attention for zero-shot multi-id image generation. arXiv preprint arXiv:2404.19427, 2024. 2 +[21] Lingjie Kong, Kai Wu, Xiaobin Hu, Wenhui Han, Jinlong Peng, Chengming Xu, Donghao Luo, Jiangning Zhang, Chengjie Wang, and Yanwei Fu. Anymaker: Zero-shot general object customization via decoupled dual-level id injection. Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2025. 2 +[22] Nupur Kumari, Bingliang Zhang, Richard Zhang, Eli Shechtman, and Jun-Yan Zhu. Multi-concept customization of text-to-image diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1931-1941, 2023. 2, 7 +[23] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2023. 2, 3, 4, 5, 7 +[24] Dongxu Li, Junnan Li, and Steven Hoi. Blip-diffusion: Pretrained subject representation for controllable text-to-image generation and editing. Advances in Neural Information Processing Systems, 36:30146-30166, 2023. 3 +[25] Pengzhi Li, Qiang Nie, Ying Chen, Xi Jiang, Kai Wu, Yuhuan Lin, Yong Liu, Jinlong Peng, Chengjie Wang, and Feng Zheng. Tuning-free image customization with image and text guidance. In European Conference on Computer Vision, pages 233-250. Springer, 2024. 2 +[26] Yuheng Li, Haotian Liu, Qingyang Wu, Fangzhou Mu, Jianwei Yang, Jianfeng Gao, Chunyuan Li, and Yong Jae Lee. + +Gligen: Open-set grounded text-to-image generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22511-22521, 2023. 3, 5 +[27] Kuan Heng Lin, Sicheng Mo, Ben Klingher, Fangzhou Mu, and Bolei Zhou. Ctrl-x: Controlling structure and appearance for text-to-image generation without guidance. Advances in Neural Information Processing Systems, 37: 128911-128939, 2025. 2, 3, 7 +[28] Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le. Flow matching for generative modeling. arXiv preprint arXiv:2210.02747, 2022. 2, 3 +[29] Xingchao Liu, Chengyue Gong, and Qiang Liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022. 2, 3 +[30] Ron Mokady, Amir Hertz, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Null-text inversion for editing real images using guided diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6038–6047, 2023. 2 +[31] Chong Mou, Xintao Wang, Liangbin Xie, Yanze Wu, Jian Zhang, Zhongang Qi, and Ying Shan. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 4296-4304, 2024. 2, 3 +[32] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 2 +[33] Jinlong Peng, Zekun Luo, Liang Liu, and Boshen Zhang. Frih: fine-grained region-aware image harmonization. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 4478-4486, 2024. 2 +[34] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023. 7 +[35] Can Qin, Shu Zhang, Ning Yu, Yihao Feng, Xinyi Yang, Yingbo Zhou, Huan Wang, Juan Carlos Niebles, Caiming Xiong, Silvio Savarese, et al. Unicontrol: A unified diffusion model for controllable visual generation in the wild. arXiv preprint arXiv:2305.11147, 2023. 2, 3 +[36] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 6 +[37] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2, 4 +[38] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international conference, + +Munich, Germany, October 5-9, 2015, proceedings, part III 18, pages 234-241. Springer, 2015. 2 +[39] Litu Rout, Yujia Chen, Nataniel Ruiz, Constantine Caramanis, Sanjay Shakkottai, and Wen-Sheng Chu. Semantic image inversion and editing using rectified stochastic differential equations. arXiv preprint arXiv:2410.10792, 2024. 2 +[40] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22500-22510, 2023. 2, 7 +[41] Kihyuk Sohn, Nataniel Ruiz, Kimin Lee, Daniel Castro Chin, Irina Blok, Huiwen Chang, Jarred Barber, Lu Jiang, Glenn Entis, Yuanzhen Li, et al. Styledrop: Text-to-image generation in any style. arXiv preprint arXiv:2306.00983, 2023. 3 +[42] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502, 2020. 2 +[43] Yizhi Song, Zhifei Zhang, Zhe Lin, Scott Cohen, Brian Price, Jianming Zhang, Soo Ye Kim, and Daniel Aliaga. Objectstitch: Generative object compositing. arXiv preprint arXiv:2212.00932, 2022. 2, 5 +[44] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. 2 +[45] Zhenxiong Tan, Songhua Liu, Xingyi Yang, Qiaochu Xue, and Xinchao Wang. *Omnicontrol: Minimal and universal control for diffusion transformer.* arXiv preprint arXiv:2411.15098, 3, 2024. 2, 3, 4, 5, 12 +[46] Haoxuan Wang, Qingdong He, Jinlong Peng, Hao Yang, Mingmin Chi, and Yabiao Wang. Mamba-yolo-world: Marrying yolo-world with mamba for open-vocabulary detection. IEEE International Conference on Acoustics, Speech, and Signal Processing, 2025. 5 +[47] Qixun Wang, Xu Bai, Haofan Wang, Zekui Qin, Anthony Chen, Huaxia Li, Xu Tang, and Yao Hu. Instantid: Zero-shot identity-preserving generation in seconds. arXiv preprint arXiv:2401.07519, 2024. 2, 3 +[48] Su Wang, Chitwan Sahara, Ceslee Montgomery, Jordi Pont-Tuset, Shai Noy, Stefano Pellegrini, Yasumasa Onoe, Sarah Laszlo, David J Fleet, Radu Soricut, et al. Imagen editor and editbench: Advancing and evaluating text-guided image inpainting. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 18359-18369, 2023. 2 +[49] Xudong Wang, Trevor Darrell, Sai Saketh Rambhatla, Rohit Girdhar, and Ishan Misra. Instancediffusion: Instance-level control for image generation, 2024. 5 +[50] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 6 +[51] Daniel Winter, Asaf Shul, Matan Cohen, Dana Berman, Yael Pritch, Alex Rav-Acha, and Yedid Hoshen. Objectmate: A + +recurrence prior for object insertion and subject-driven generation. arXiv preprint arXiv:2412.08645, 2024. 2 +[52] Peng Xing, Haofan Wang, Yanpeng Sun, Qixun Wang, Xu Bai, Hao Ai, Renyuan Huang, and Zechao Li. Csgo: Content-style composition in text-to-image generation. arXiv preprint arXiv:2408.16766, 2024. 2 +[53] XLabs-AI. Flux-controlnet-canny-diffusers. https://huggingface.co/XLabs-AI/flux-controlnet-canny-diffusers,2024.7,8 +[54] XLabs-AI. Flux-controlnet-depth-diffusers. https://huggingface.co/XLabs-AI/flux-controlnet-depth-diffusers,2024.7,8 +[55] XLabs-AI. Flux-ip-adapter. https://huggingface.co/XLabs-AI/flux-ip-adapter, 2024.7,8 +[56] Binxin Yang, Shuyang Gu, Bo Zhang, Ting Zhang, Xuejin Chen, Xiaoyan Sun, Dong Chen, and Fang Wen. Paint by example: Exemplar-based image editing with diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18381-18391, 2023. 2 +[57] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In CVPR, 2024. 5 +[58] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. arXiv preprint arXiv:2308.06721, 2023. 2, 3 +[59] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836-3847, 2023. 2, 3 +[60] Shihao Zhao, Dongdong Chen, Yen-Chun Chen, Jianmin Bao, Shaozhe Hao, Lu Yuan, and Kwan-Yee K Wong. Uni-controlnet: All-in-one control to text-to-image diffusion models. Advances in Neural Information Processing Systems, 36, 2024. 2, 3 +[61] Junhao Zhuang, Yanhong Zeng, Wenran Liu, Chun Yuan, and Kai Chen. A task is worth one word: Learning with task prompts for high-quality versatile image inpainting. In European Conference on Computer Vision, pages 195-211. Springer, 2025. 2, 5 + +# UniCombine: Unified Multi-Conditional Combination with Diffusion Transformer + +Supplementary Material + +# A1. Dataset Partitioning Scheme + +In our proposed SubjectSpatial200K dataset, we utilize the ChatGPT-4o assessment scores provided by Subjects200K [45] on Subject Consistency, Composition Structure, and Image Quality to guide the dataset partitioning in our experiments. + +- Subject Consistency: Ensuring the identity of the subject image is consistent with that of the ground truth image. +- Composition Structure: Verifying a reasonable composition of the subject and ground truth images. +- Image Quality: Confirming each image pair maintains high resolution and visual fidelity. + +We partition the dataset into 139,403 training samples and 5,827 testing samples through Algorithm 1. + +
Algorithm 1: Dataset Partitioning Scheme
Input: example
Output: train or test
cs← example["Composite Structure"]
iq← example["Image Quality"]
sc← example["Subject Consistency"]
scores← [cs, iq, sc]
if all(s==5 for s in scores) then
return train;
else if cs≥3 and iq==5 and sc==5 then
return test;
+ +# A2. More Ablation on CMMDiT Attention + +More quantitative and qualitative ablation results on the other multi-conditional generative tasks are provided here. The comprehensive ablation results in Tab. A1, Tab. A2, Tab. A3, Fig. A1, Fig. A2, and Fig. A3 demonstrate that the UniCombine performs better with our proposed CMMDiT Attention. + +
MethodCLIP-I ↑DINO ↑CLIP-T ↑F1 ↑
Ours w/o CMMDiT91.5186.3133.200.16
Ours w/ CMMDiT91.8486.8833.210.17
+ +Table A1. Quantitative ablation of CMMDiT Attention mechanism on training-free Subject-Canny task + +# A3. More Qualitative Results + +More qualitative results are presented in Fig. A4 and Fig. A5. + +![](images/c8c9030826d874847297d6aad99732ff7992fa1efed8eaf2d42146029239dcfe.jpg) +Figure A1. Qualitative ablation of CMMDiT Attention mechanism on training-free Subject-Canny task + +
MethodCLIP-I ↑DINO ↑CLIP-T ↑MSE ↓
Ours w/o CMMDiT90.8385.3833.38547.63
Ours w/ CMMDiT91.1585.7333.41507.40
+ +![](images/eec53f0d14c8df15f39c097595e64b40514c9c785b2e319a4f3ff0334abfd869.jpg) +Table A2. Quantitative ablation of CMMDiT Attention mechanism on training-free Subject-Depth task +Figure A2. Qualitative ablation of CMMDiT Attention mechanism on training-free Subject-Depth task + +
MethodCLIP-T ↑F1 ↑MSE ↓
Ours w/o CMMDiT33.700.17524.04
Ours w/ CMMDiT33.700.18519.53
+ +Table A3. Quantitative ablation of CMMDiT Attention mechanism on training-free Multi-Spatial task + +![](images/4009d7abc995632fbc8e9c0c98b070e0d250aff3285f29dd8f899b9c93e1e20e.jpg) +Figure A3. Qualitative ablation of CMMDiT Attention mechanism on training-free Multi-Spatial task + +![](images/adfac4a9eebfdf05d1126a0a351b1aaafa113e3c17e6ad1d0d3ccee8c001ec91.jpg) +Figure A4. More qualitative results on Multi-Spatial and Subject-Insertion tasks. + +![](images/721139a8e4ed891aeab8a9eecb8d33d046e4e4574b78dabb5cd359d3ae06e6b6.jpg) + +![](images/179cb411321ca8ecf6e101c92a53a0ef8cbc9728a7f08f18d3c37f263493e513.jpg) +Figure A5. More qualitative results on Subject-Depth and Subject-Canny tasks. + +![](images/6cc95ee7d4dc5bf05bbb25255f5081767689dccf861e6597d6fd8d83a62792f9.jpg) \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09277/images/081e7579449455ce9a0a6c20f089187297d01d022d3027a0bfa4afb2c5a31aa9.jpg b/data/2025/2503_09xxx/2503.09277/images/081e7579449455ce9a0a6c20f089187297d01d022d3027a0bfa4afb2c5a31aa9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3e5a713645766a1c08f21f806b2f38f60f0a6aa5 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/081e7579449455ce9a0a6c20f089187297d01d022d3027a0bfa4afb2c5a31aa9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:709da8e3ae9bd07de3a8b6c217926ba732f01c783523d8314d6189c50b16f3d4 +size 40776 diff --git a/data/2025/2503_09xxx/2503.09277/images/179cb411321ca8ecf6e101c92a53a0ef8cbc9728a7f08f18d3c37f263493e513.jpg b/data/2025/2503_09xxx/2503.09277/images/179cb411321ca8ecf6e101c92a53a0ef8cbc9728a7f08f18d3c37f263493e513.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b548d8b510ed7851f392b5d2c819246fa9bb4997 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/179cb411321ca8ecf6e101c92a53a0ef8cbc9728a7f08f18d3c37f263493e513.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9a1fdd86ae44331ad2f5c72684ebc97cc6a9c667e995879c622146790457137 +size 174330 diff --git a/data/2025/2503_09xxx/2503.09277/images/1e06b7a6c1ae6b58e94df98cc9be3d1764e9ac9f480e954299aa319606949d55.jpg b/data/2025/2503_09xxx/2503.09277/images/1e06b7a6c1ae6b58e94df98cc9be3d1764e9ac9f480e954299aa319606949d55.jpg new file mode 100644 index 0000000000000000000000000000000000000000..65ba1542bbd556e186e506680afaec360624c049 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/1e06b7a6c1ae6b58e94df98cc9be3d1764e9ac9f480e954299aa319606949d55.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72a5f48fb2dcee34a6802ca62351f4d94fe66e4d746eff74474f87c83a305c58 +size 86895 diff --git a/data/2025/2503_09xxx/2503.09277/images/21b0e5e13ae17e5a1c0fc46b9ae2bd7c1ae40e8754d83e2afe15c56a0a886ccc.jpg b/data/2025/2503_09xxx/2503.09277/images/21b0e5e13ae17e5a1c0fc46b9ae2bd7c1ae40e8754d83e2afe15c56a0a886ccc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7f2df86ee537163a3cad7b268e9f45339c4e007d --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/21b0e5e13ae17e5a1c0fc46b9ae2bd7c1ae40e8754d83e2afe15c56a0a886ccc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85918bfe739ab1e2a7decaa87c05fdc929559ea85037cb0d68a8c7bdc76a65ab +size 11653 diff --git a/data/2025/2503_09xxx/2503.09277/images/2a92bbe800381328c2717f661d17a8523810856178e0fb0febb65310f9baa364.jpg b/data/2025/2503_09xxx/2503.09277/images/2a92bbe800381328c2717f661d17a8523810856178e0fb0febb65310f9baa364.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7f0c3f944f4c6b8482606f12267d6b6439245903 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/2a92bbe800381328c2717f661d17a8523810856178e0fb0febb65310f9baa364.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b839751151db900e062734d6ef135c75b93fbd0eab5b8886529d012a8c8096d +size 47088 diff --git a/data/2025/2503_09xxx/2503.09277/images/2c8007f4046d1a3e78dbaf014b7a491dbbbf9b78ef27cf90ee2c58035b246a98.jpg b/data/2025/2503_09xxx/2503.09277/images/2c8007f4046d1a3e78dbaf014b7a491dbbbf9b78ef27cf90ee2c58035b246a98.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb03035b013d8d89f520d26ee8a39cbd81eb5848 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/2c8007f4046d1a3e78dbaf014b7a491dbbbf9b78ef27cf90ee2c58035b246a98.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:876c272238986d016ca0f06a8374f274140b9fc038e038672deb897ebe111739 +size 234700 diff --git a/data/2025/2503_09xxx/2503.09277/images/3162d24f03eacb746bd4ecce492028784582c1beb179bc5cf91d96a185a56eec.jpg b/data/2025/2503_09xxx/2503.09277/images/3162d24f03eacb746bd4ecce492028784582c1beb179bc5cf91d96a185a56eec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ef7ea0d88bc7b223de7fc6ba67c77ebbfde9d4e4 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/3162d24f03eacb746bd4ecce492028784582c1beb179bc5cf91d96a185a56eec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04fb4cd5a4bb59fd61c3c147aaa4160a3109d4a9534710527c954704ab36bd46 +size 44302 diff --git a/data/2025/2503_09xxx/2503.09277/images/3618030c54b9a80305073bef94f2394a26e7580cbe554afe3dc981094168e421.jpg b/data/2025/2503_09xxx/2503.09277/images/3618030c54b9a80305073bef94f2394a26e7580cbe554afe3dc981094168e421.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bf48ff8be1d7a0388ca92f059c7b34e8edc1fb8e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/3618030c54b9a80305073bef94f2394a26e7580cbe554afe3dc981094168e421.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffbd7b160842ee81b71d3c352962f0c7de5dcd86918207ec210afce8679b486c +size 16869 diff --git a/data/2025/2503_09xxx/2503.09277/images/3661792d048303c132e0cc107e11197e07793e4fd697e499d3481bbdc763b8c7.jpg b/data/2025/2503_09xxx/2503.09277/images/3661792d048303c132e0cc107e11197e07793e4fd697e499d3481bbdc763b8c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5bf4b832e995eedb17b258e5c50f87c38dd789df --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/3661792d048303c132e0cc107e11197e07793e4fd697e499d3481bbdc763b8c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:740a8552a06c0508c8180f55e4cc98209dff1875b5d8a777b40edea43db49007 +size 2432 diff --git a/data/2025/2503_09xxx/2503.09277/images/366bfb556eb7348d43c9ba6eb176249717e1ac12f3b91c45a0204d9d3b46b469.jpg b/data/2025/2503_09xxx/2503.09277/images/366bfb556eb7348d43c9ba6eb176249717e1ac12f3b91c45a0204d9d3b46b469.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bbaa5d964d17b0e79d7401eda4cf77156e9da2ae --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/366bfb556eb7348d43c9ba6eb176249717e1ac12f3b91c45a0204d9d3b46b469.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8a8617d6d1d6ffb5fac8ee29b8445e0a53699ca30ccbc5c4518de96881b0c05 +size 12900 diff --git a/data/2025/2503_09xxx/2503.09277/images/4009d7abc995632fbc8e9c0c98b070e0d250aff3285f29dd8f899b9c93e1e20e.jpg b/data/2025/2503_09xxx/2503.09277/images/4009d7abc995632fbc8e9c0c98b070e0d250aff3285f29dd8f899b9c93e1e20e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb14dfaf62b6c9ab54d7b8ea23699ee67e2a9a0b --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/4009d7abc995632fbc8e9c0c98b070e0d250aff3285f29dd8f899b9c93e1e20e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b69ad2318815a795be17a1a8384dbd77313fbee93e1c01967b5b292ec13a5460 +size 39165 diff --git a/data/2025/2503_09xxx/2503.09277/images/44ebde2dd6cfbcb637ef99847a469c9c5e74ddeb4368792fe3d6809b1e91856e.jpg b/data/2025/2503_09xxx/2503.09277/images/44ebde2dd6cfbcb637ef99847a469c9c5e74ddeb4368792fe3d6809b1e91856e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9bf3089c4edd35004f89563e6c42e398b3024fbf --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/44ebde2dd6cfbcb637ef99847a469c9c5e74ddeb4368792fe3d6809b1e91856e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce3a01e4de886cd2ab5caf02801577ed8eb3dfcaac0e2382e20a9d5c50f27275 +size 82393 diff --git a/data/2025/2503_09xxx/2503.09277/images/506fe3547ab1943e7faa90d0f349eb3f11dc59ed4a47d7cf36007f74a6ca38f0.jpg b/data/2025/2503_09xxx/2503.09277/images/506fe3547ab1943e7faa90d0f349eb3f11dc59ed4a47d7cf36007f74a6ca38f0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0565b329f2ab897a67679006cf74c721df9d9ef7 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/506fe3547ab1943e7faa90d0f349eb3f11dc59ed4a47d7cf36007f74a6ca38f0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d45d4f12a146ea9847a39f38f32094e963f834779c8fc5a585148546878b3c9 +size 88959 diff --git a/data/2025/2503_09xxx/2503.09277/images/697b9a0d3b2d71d9ac73a42626a46546416d486f912cfdb2949db0c5a79882ad.jpg b/data/2025/2503_09xxx/2503.09277/images/697b9a0d3b2d71d9ac73a42626a46546416d486f912cfdb2949db0c5a79882ad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d4b1f42841576dee98ffd275184d403f6d68a6e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/697b9a0d3b2d71d9ac73a42626a46546416d486f912cfdb2949db0c5a79882ad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8d37b3d0a2142bba515a7c6bac6bac7e766846fa8f47f5a0eea5aa47767a65b +size 42045 diff --git a/data/2025/2503_09xxx/2503.09277/images/6cc95ee7d4dc5bf05bbb25255f5081767689dccf861e6597d6fd8d83a62792f9.jpg b/data/2025/2503_09xxx/2503.09277/images/6cc95ee7d4dc5bf05bbb25255f5081767689dccf861e6597d6fd8d83a62792f9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a9c94937fa9fe2dff3ce155f382d2cc2b98d2aa --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/6cc95ee7d4dc5bf05bbb25255f5081767689dccf861e6597d6fd8d83a62792f9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac74cca43e03715127143bf39c623b6558a48f78fb9344117789472679f41e20 +size 167065 diff --git a/data/2025/2503_09xxx/2503.09277/images/719ea07bb1cd1e89b41905beec203585fbe544e5a1ee5e9ab3f601d287ad2795.jpg b/data/2025/2503_09xxx/2503.09277/images/719ea07bb1cd1e89b41905beec203585fbe544e5a1ee5e9ab3f601d287ad2795.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6e62587119e1237aac65c3ef021f7fcedc5aaddb --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/719ea07bb1cd1e89b41905beec203585fbe544e5a1ee5e9ab3f601d287ad2795.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a0b66fb29f3fb8c65606761e027421eeea3cd0aa828a7217b04041ed7ab7b17 +size 20914 diff --git a/data/2025/2503_09xxx/2503.09277/images/721139a8e4ed891aeab8a9eecb8d33d046e4e4574b78dabb5cd359d3ae06e6b6.jpg b/data/2025/2503_09xxx/2503.09277/images/721139a8e4ed891aeab8a9eecb8d33d046e4e4574b78dabb5cd359d3ae06e6b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cc4877bd014c3667a577598dda688a6c98b20cbe --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/721139a8e4ed891aeab8a9eecb8d33d046e4e4574b78dabb5cd359d3ae06e6b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c736a29b1a340865096e478df65440e80911ca6c4808b33929336ba831da9a4a +size 206874 diff --git a/data/2025/2503_09xxx/2503.09277/images/7780b4ae3fc30805750b62e7bb1b1c2e1322a555efcac56b2f8f7632150cf53d.jpg b/data/2025/2503_09xxx/2503.09277/images/7780b4ae3fc30805750b62e7bb1b1c2e1322a555efcac56b2f8f7632150cf53d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4dcad5041517eb931b079908268d411cbba04118 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/7780b4ae3fc30805750b62e7bb1b1c2e1322a555efcac56b2f8f7632150cf53d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c79dfbceb84c75aa4e3d51288c3c00172d85cc19713bee727d8e6609faffaed4 +size 20301 diff --git a/data/2025/2503_09xxx/2503.09277/images/7fa892b46091963fb1ee53c8e239a5cf468404dfb70774813a489096638da93b.jpg b/data/2025/2503_09xxx/2503.09277/images/7fa892b46091963fb1ee53c8e239a5cf468404dfb70774813a489096638da93b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69479c3c867a3559abeac620f7895b1fb3fcf750 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/7fa892b46091963fb1ee53c8e239a5cf468404dfb70774813a489096638da93b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b99844561d60f7674655063592e405507c2b78af6890c9bcb0fe57031d25dece +size 64504 diff --git a/data/2025/2503_09xxx/2503.09277/images/825bdf58c053f99cc780532dc82a60487da430bb38579c85ad77eb49436315d5.jpg b/data/2025/2503_09xxx/2503.09277/images/825bdf58c053f99cc780532dc82a60487da430bb38579c85ad77eb49436315d5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f1ee76c583daf23921178e60518b0fae67b5f63e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/825bdf58c053f99cc780532dc82a60487da430bb38579c85ad77eb49436315d5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dea4018f5cf99464b9fc62748339694de7b75a395dcc5ee27239989e7d7db964 +size 15609 diff --git a/data/2025/2503_09xxx/2503.09277/images/8b5b9183e06014b6e81f79d2b0e6e78e929ba4f810448e1f8a953e9f0afcf401.jpg b/data/2025/2503_09xxx/2503.09277/images/8b5b9183e06014b6e81f79d2b0e6e78e929ba4f810448e1f8a953e9f0afcf401.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5530636d0d1cd54cfb715be5af2c319cb5c4052a --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/8b5b9183e06014b6e81f79d2b0e6e78e929ba4f810448e1f8a953e9f0afcf401.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bffe2d0dc54d10c01c321ede5f10cf189aadc6f48215c32e475e27da16997595 +size 20087 diff --git a/data/2025/2503_09xxx/2503.09277/images/8c57a3c424f01738f8a53f6d6f0c8688e8016cc7a73bf6ea21df63ffdafe3eaa.jpg b/data/2025/2503_09xxx/2503.09277/images/8c57a3c424f01738f8a53f6d6f0c8688e8016cc7a73bf6ea21df63ffdafe3eaa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..08eb63e3263903ec6f4cd4af9f27a175c65056f6 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/8c57a3c424f01738f8a53f6d6f0c8688e8016cc7a73bf6ea21df63ffdafe3eaa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:016ec497b56b1401928a50e055915c4c72072fd34ccf1cc0350d4d5a62c39769 +size 19374 diff --git a/data/2025/2503_09xxx/2503.09277/images/93282f189196c23bdfd799eb8a46475b81447637d02f5a79af7f94e82a528bc4.jpg b/data/2025/2503_09xxx/2503.09277/images/93282f189196c23bdfd799eb8a46475b81447637d02f5a79af7f94e82a528bc4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..048039107bee9338e4b9aa93b7bcafbfeec0bc1d --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/93282f189196c23bdfd799eb8a46475b81447637d02f5a79af7f94e82a528bc4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b10ff0a27bfd1691c09bda351509a4411ecc78a740d0161169f98996bd0f5f5 +size 2848 diff --git a/data/2025/2503_09xxx/2503.09277/images/99512443b31f1e244aae53591cf61ea45942f31984f52b9e648e6112280d77e4.jpg b/data/2025/2503_09xxx/2503.09277/images/99512443b31f1e244aae53591cf61ea45942f31984f52b9e648e6112280d77e4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f40472007a72166a5e30e8be3b1d2c22b90ae485 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/99512443b31f1e244aae53591cf61ea45942f31984f52b9e648e6112280d77e4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7082300b4c10c3013944bd455c589684446bb32e3dd812c0e3c3e718da15774 +size 6869 diff --git a/data/2025/2503_09xxx/2503.09277/images/a34bbcb900321fe501a520855202f376e6664d9faf88390dd30b55aa8d6f1b53.jpg b/data/2025/2503_09xxx/2503.09277/images/a34bbcb900321fe501a520855202f376e6664d9faf88390dd30b55aa8d6f1b53.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a56383163ad463da0ab42b4cf3d06360fea1b00 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/a34bbcb900321fe501a520855202f376e6664d9faf88390dd30b55aa8d6f1b53.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40c4dcc591b1f2068f9e9d4e4af1937cba5837020dca2c97bdef32b60f269ae6 +size 127930 diff --git a/data/2025/2503_09xxx/2503.09277/images/a4f5c4fc9d8ce66afbdd123ab7fca8f063e5af00acb1254be7e07f83f23cd544.jpg b/data/2025/2503_09xxx/2503.09277/images/a4f5c4fc9d8ce66afbdd123ab7fca8f063e5af00acb1254be7e07f83f23cd544.jpg new file mode 100644 index 0000000000000000000000000000000000000000..322410ef04fe992027622fdd13e0e51509924c6d --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/a4f5c4fc9d8ce66afbdd123ab7fca8f063e5af00acb1254be7e07f83f23cd544.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e7baea7ea7c83ba240eee758eefa02863a7fc29980960a0c6bb673147a5e187 +size 30326 diff --git a/data/2025/2503_09xxx/2503.09277/images/a621a99e80843ff5996990493bd8940b3ce3197ae924350ffd10503c3b7c6b1c.jpg b/data/2025/2503_09xxx/2503.09277/images/a621a99e80843ff5996990493bd8940b3ce3197ae924350ffd10503c3b7c6b1c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ddf197a1aa68132dba1d1fb5587faf9a77800c3 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/a621a99e80843ff5996990493bd8940b3ce3197ae924350ffd10503c3b7c6b1c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d15f4e39a67a5c1f2fbea837da79e874b169e463291d7ffc76651e8b0c156cf +size 52001 diff --git a/data/2025/2503_09xxx/2503.09277/images/adfac4a9eebfdf05d1126a0a351b1aaafa113e3c17e6ad1d0d3ccee8c001ec91.jpg b/data/2025/2503_09xxx/2503.09277/images/adfac4a9eebfdf05d1126a0a351b1aaafa113e3c17e6ad1d0d3ccee8c001ec91.jpg new file mode 100644 index 0000000000000000000000000000000000000000..451d5c613b12a29615046609d2ebd14904e217a1 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/adfac4a9eebfdf05d1126a0a351b1aaafa113e3c17e6ad1d0d3ccee8c001ec91.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:741897812deae39d3b18a3cc86fba44dce0a659c98e747fce112d0e573ed35ee +size 180277 diff --git a/data/2025/2503_09xxx/2503.09277/images/b5701621428ee2941e72b32e1a85040f321539ed97d657923eef29882e89bdfd.jpg b/data/2025/2503_09xxx/2503.09277/images/b5701621428ee2941e72b32e1a85040f321539ed97d657923eef29882e89bdfd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ba5643639e77b7f1fedf079ac9100b978336a265 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/b5701621428ee2941e72b32e1a85040f321539ed97d657923eef29882e89bdfd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e7a45fa65512c9ae22794bb3057a2c4ffabe00d55d9beede529ff4ae5e4e24b +size 14961 diff --git a/data/2025/2503_09xxx/2503.09277/images/c8c9030826d874847297d6aad99732ff7992fa1efed8eaf2d42146029239dcfe.jpg b/data/2025/2503_09xxx/2503.09277/images/c8c9030826d874847297d6aad99732ff7992fa1efed8eaf2d42146029239dcfe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5eae28709eb62b770b0ae99525b1cce0f843510d --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/c8c9030826d874847297d6aad99732ff7992fa1efed8eaf2d42146029239dcfe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1481bb29aaee3a64e64cd2d979edd758c748e5f07b73852c20bce28c77d58f1b +size 40514 diff --git a/data/2025/2503_09xxx/2503.09277/images/d04a32c8e874aab106f9fa111d4a7b255e65e731a4acd2452f053c1c30949d06.jpg b/data/2025/2503_09xxx/2503.09277/images/d04a32c8e874aab106f9fa111d4a7b255e65e731a4acd2452f053c1c30949d06.jpg new file mode 100644 index 0000000000000000000000000000000000000000..62a493b153f679f8dfa9bf7913bca526890cfefb --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/d04a32c8e874aab106f9fa111d4a7b255e65e731a4acd2452f053c1c30949d06.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:656025c605ae4526c66cf7e912be2b328e94f6dcf2f537d7ba5261f0d8a755a3 +size 48261 diff --git a/data/2025/2503_09xxx/2503.09277/images/d60c55bfaeeeedfd5aefc1bba87bd491b67f9a1c54a3f0b04bece1daafa5fa08.jpg b/data/2025/2503_09xxx/2503.09277/images/d60c55bfaeeeedfd5aefc1bba87bd491b67f9a1c54a3f0b04bece1daafa5fa08.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7a3ee94865988b4f4de2c34cc6057d3eac1b6171 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/d60c55bfaeeeedfd5aefc1bba87bd491b67f9a1c54a3f0b04bece1daafa5fa08.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ece158e8db1789f82cfd43af58401beb52a8c51af787608decd48c5e47b9d771 +size 103814 diff --git a/data/2025/2503_09xxx/2503.09277/images/e21e3bf9a4a78294b155fc81f3e6b7e3e4251e119c112d81fb0f47a4822439d5.jpg b/data/2025/2503_09xxx/2503.09277/images/e21e3bf9a4a78294b155fc81f3e6b7e3e4251e119c112d81fb0f47a4822439d5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..010156d9570f10f8f8f013318a0d9b5727f0de58 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/e21e3bf9a4a78294b155fc81f3e6b7e3e4251e119c112d81fb0f47a4822439d5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cbf1ac73a58cea83685b741b7436266347b9e8f4f4b889fd99bb1388cfcae95 +size 32231 diff --git a/data/2025/2503_09xxx/2503.09277/images/e87e6f8b3e137a3f2fcae475f15efcbe2b56a7113615b229b8cb075304866a5d.jpg b/data/2025/2503_09xxx/2503.09277/images/e87e6f8b3e137a3f2fcae475f15efcbe2b56a7113615b229b8cb075304866a5d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cdc4608341563730b6985928cd3f73fcb4293683 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/e87e6f8b3e137a3f2fcae475f15efcbe2b56a7113615b229b8cb075304866a5d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb26d21de9a50b9dde593e23d6a2300d44ce601659c0d26abe07c82130a050b8 +size 43147 diff --git a/data/2025/2503_09xxx/2503.09277/images/eec53f0d14c8df15f39c097595e64b40514c9c785b2e319a4f3ff0334abfd869.jpg b/data/2025/2503_09xxx/2503.09277/images/eec53f0d14c8df15f39c097595e64b40514c9c785b2e319a4f3ff0334abfd869.jpg new file mode 100644 index 0000000000000000000000000000000000000000..62c612f26a28ee906471a858cbe5adbfea1a6d06 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/eec53f0d14c8df15f39c097595e64b40514c9c785b2e319a4f3ff0334abfd869.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f5ca47fdf2ed0173f55e72d69976d4e523f63f2cedf2af036afd87cd1a3e80e +size 40533 diff --git a/data/2025/2503_09xxx/2503.09277/images/fc8bc88e1ec09551aeba8132d1ccff2d2a1eb574ea62259a59e0357472255443.jpg b/data/2025/2503_09xxx/2503.09277/images/fc8bc88e1ec09551aeba8132d1ccff2d2a1eb574ea62259a59e0357472255443.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cae1afb81d55a1e10c9101ae6c12fa28895d284d --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/images/fc8bc88e1ec09551aeba8132d1ccff2d2a1eb574ea62259a59e0357472255443.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3980fa01608f376e83f1f77cb5074012d0329b0e2e46048912dfb764c6147e18 +size 54104 diff --git a/data/2025/2503_09xxx/2503.09277/layout.json b/data/2025/2503_09xxx/2503.09277/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..29941e6494259b7b1ddb46d05b17be9464a9695b --- /dev/null +++ b/data/2025/2503_09xxx/2503.09277/layout.json @@ -0,0 +1,9730 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 138, + 103, + 473, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 103, + 473, + 137 + ], + "spans": [ + { + "bbox": [ + 138, + 103, + 473, + 137 + ], + "type": "text", + "content": "UniCombine: Unified Multi-Conditional Combination with Diffusion Transformer" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "spans": [ + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "text", + "content": "Haoxuan Wang" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "text", + "content": ", Jinlong Peng" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "inline_equation", + "content": "^{2\\dagger}" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "text", + "content": ", Qingdong He" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "text", + "content": ", Hao Yang" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "text", + "content": ", Ying Jin" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "text", + "content": ", Jiafu Wu" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "text", + "content": ", Xiaobin Hu" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "text", + "content": ", Yanjie Pan" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "text", + "content": ", Zhenye Gan" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "text", + "content": ", Mingmin Chi" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "text", + "content": ", Bo Peng" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "inline_equation", + "content": "^{4*}" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "text", + "content": ", Yabiao Wang" + }, + { + "bbox": [ + 96, + 150, + 512, + 179 + ], + "type": "inline_equation", + "content": "^{2,5*}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 81, + 180, + 529, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 180, + 529, + 194 + ], + "spans": [ + { + "bbox": [ + 81, + 180, + 529, + 194 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 81, + 180, + 529, + 194 + ], + "type": "text", + "content": "Fudan University, " + }, + { + "bbox": [ + 81, + 180, + 529, + 194 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 81, + 180, + 529, + 194 + ], + "type": "text", + "content": "Tencent Youtu Lab, " + }, + { + "bbox": [ + 81, + 180, + 529, + 194 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 81, + 180, + 529, + 194 + ], + "type": "text", + "content": "Shanghai Jiao Tong University, " + }, + { + "bbox": [ + 81, + 180, + 529, + 194 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 81, + 180, + 529, + 194 + ], + "type": "text", + "content": "Shanghai Ocean University " + }, + { + "bbox": [ + 81, + 180, + 529, + 194 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 81, + 180, + 529, + 194 + ], + "type": "text", + "content": "Zhejiang University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 157, + 208, + 448, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 208, + 448, + 221 + ], + "spans": [ + { + "bbox": [ + 157, + 208, + 448, + 221 + ], + "type": "text", + "content": "https://github.com/Xuan-World/UniCombine" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 59, + 232, + 550, + 542 + ], + "blocks": [ + { + "bbox": [ + 59, + 232, + 550, + 542 + ], + "lines": [ + { + "bbox": [ + 59, + 232, + 550, + 542 + ], + "spans": [ + { + "bbox": [ + 59, + 232, + 550, + 542 + ], + "type": "image", + "image_path": "2c8007f4046d1a3e78dbaf014b7a491dbbbf9b78ef27cf90ee2c58035b246a98.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 548, + 555, + 583 + ], + "lines": [ + { + "bbox": [ + 55, + 548, + 555, + 583 + ], + "spans": [ + { + "bbox": [ + 55, + 548, + 555, + 583 + ], + "type": "text", + "content": "Figure 1. Fantastic results of our proposed UniCombine on multi-conditional controllable generation: (a) Subject-Insertion task. (b) and (c) Subject-Spatial task. (d) Multi-Spatial task. Our unified framework effectively handles any combination of input conditions and achieves remarkable alignment with all of them, including but not limited to text prompts, spatial maps, and subject images." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 152, + 592, + 200, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 592, + 200, + 604 + ], + "spans": [ + { + "bbox": [ + 152, + 592, + 200, + 604 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 619, + 295, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 619, + 295, + 679 + ], + "spans": [ + { + "bbox": [ + 55, + 619, + 295, + 679 + ], + "type": "text", + "content": "With the rapid development of diffusion models in image generation, the demand for more powerful and flexible controllable frameworks is increasing. Although existing methods can guide generation beyond text prompts, the challenge of effectively combining multiple conditional inputs" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 593, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 593, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 593, + 555, + 713 + ], + "type": "text", + "content": "while maintaining consistency with all of them remains unsolved. To address this, we introduce UniCombine, a DiT-based multi-conditional controllable generative framework capable of handling any combination of conditions, including but not limited to text prompts, spatial maps, and subject images. Specifically, we introduce a novel Conditional MMDiT Attention mechanism and incorporate a trainable LoRA module to build both the training-free and training-based versions. Additionally, we propose a new pipeline to construct SubjectSpatial200K, the first dataset" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 228, + 35, + 562 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 228, + 35, + 562 + ], + "spans": [ + { + "bbox": [ + 14, + 228, + 35, + 562 + ], + "type": "text", + "content": "arXiv:2503.09277v2 [cs.CV] 8 Jul 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 693, + 139, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 693, + 139, + 703 + ], + "spans": [ + { + "bbox": [ + 70, + 693, + 139, + 703 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 70, + 693, + 139, + 703 + ], + "type": "text", + "content": " Equal contribution." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 704, + 150, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 704, + 150, + 712 + ], + "spans": [ + { + "bbox": [ + 70, + 704, + 150, + 712 + ], + "type": "text", + "content": "* Corresponding author." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 54, + 72, + 294, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 72, + 294, + 144 + ], + "spans": [ + { + "bbox": [ + 54, + 72, + 294, + 144 + ], + "type": "text", + "content": "designed for multi-conditional generative tasks covering both the subject-driven and spatially-aligned conditions. Extensive experimental results on multi-conditional generation demonstrate the outstanding universality and powerful capability of our approach with state-of-the-art performance." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 167, + 136, + 179 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 167, + 136, + 179 + ], + "spans": [ + { + "bbox": [ + 55, + 167, + 136, + 179 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 186, + 296, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 186, + 296, + 354 + ], + "spans": [ + { + "bbox": [ + 56, + 186, + 296, + 354 + ], + "type": "text", + "content": "With the advancement of diffusion-based [13, 42] text-to-image generative technology, a series of single-conditional controllable generative frameworks like ControlNet [59], T2I-Adapter [31], IP-Adapter [58], and InstantID [47] have expanded the scope of the control signals from text prompts to image conditions. It allows users to control more plentiful aspects of the generated images, such as layout, style, characteristics, etc. These conventional approaches are specifically designed for the UNet [38] backbone of Latent Diffusion Models (LDM) [37] with dedicated control networks. Besides, some recent approaches, such as Omini-Control [45], integrate control signals into the Diffusion Transformer (DiT) [7, 23] architecture, which demonstrates superior performance compared to the UNet in LDM." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 355, + 296, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 355, + 296, + 486 + ], + "spans": [ + { + "bbox": [ + 56, + 355, + 296, + 486 + ], + "type": "text", + "content": "Although the methods mentioned above have achieved a promising single-conditional performance, the challenge of multi-conditional controllable generation is still unsolved. Previous multi-conditional generative methods like UniControl [35] and UniControlNet [60] are generally restricted to handling spatial conditions like Canny or Depth maps and fail to accommodate subject conditions, resulting in limited applicable scenarios. Despite the recently proposed Ctrl-X [27] features controlling structure and appearance together, its performance is unsatisfactory and supports only a limited combination of conditions." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 487, + 296, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 487, + 296, + 605 + ], + "spans": [ + { + "bbox": [ + 56, + 487, + 296, + 605 + ], + "type": "text", + "content": "Moreover, we assume that many existing generative tasks can be viewed as a multi-conditional generation, such as virtual try-on [5, 17], object insertion [3, 51], style transfer [15, 33, 52], spatially-aligned customization [20, 21, 25, 27], etc. Consequently, there is a need for a unified framework to encompass these generative tasks in a way of multi-conditional generation. This framework should ensure consistency with all input constraints, including subject ID preservation, spatial structural alignment, background coherence, and style uniformity." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 606, + 294, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 606, + 294, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 606, + 294, + 714 + ], + "type": "text", + "content": "To achieve this, we propose UniCombine, a powerful and universal framework that offers several key advantages: Firstly, our framework is capable of simultaneously handling any combination of conditions, including but not limited to text prompts, spatial maps, and subject images. Specifically, we introduce a novel Conditional MMDiT Attention mechanism and incorporate a trainable Denoising-LoRA module to build both the training-free and training-based versions. By integrating multiple pre" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 72, + 553, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 324 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 324 + ], + "type": "text", + "content": "trained Condition-LoRA module weights into the conditional branches, UniCombine achieves excellent training-free performance, which can be improved further after training on the task-specific multi-conditional dataset. Secondly, due to the lack of a publicly available dataset for multi-conditional generative tasks, we build the SubjectSpatial200K dataset to serve as the training dataset and the testing benchmark. Specifically, we generate the subject grounding annotations and spatial map annotations for all the data samples from Subjects200K [45] and therefore formulate our SubjectSpatial200K dataset. Thirdly, our UniCombine can achieve many unprecedented multi-conditional combinations, as shown in Fig. 1, such as combining a reference subject image with the inpainting area of a background image or with the layout guidance of a depth (or canny) map while imposing precise control via text prompt. Furthermore, extensive experiments on Subject-Insertion, Subject-Spatial, and Multi-Spatial conditional generation demonstrate the outstanding universality and powerful capability of our method against other existing specialized approaches." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 324, + 326, + 547, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 326, + 547, + 338 + ], + "spans": [ + { + "bbox": [ + 324, + 326, + 547, + 338 + ], + "type": "text", + "content": "In summary, we highlight our contributions as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 341, + 553, + 509 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 314, + 341, + 553, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 341, + 553, + 388 + ], + "spans": [ + { + "bbox": [ + 314, + 341, + 553, + 388 + ], + "type": "text", + "content": "- We present UniCombine, a DiT-based multi-conditional controllable generative framework capable of handling any combination of conditions, including but not limited to text prompts, spatial maps, and subject images." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 314, + 388, + 553, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 388, + 553, + 449 + ], + "spans": [ + { + "bbox": [ + 314, + 388, + 553, + 449 + ], + "type": "text", + "content": "- We construct the SubjectSpatial200K dataset, which encompasses both subject-driven and spatially-aligned conditions for all text-image sample pairs. It addresses the absence of a publicly available dataset for training and testing multi-conditional controllable generative models." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 449, + 553, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 449, + 553, + 509 + ], + "spans": [ + { + "bbox": [ + 314, + 449, + 553, + 509 + ], + "type": "text", + "content": "- We conduct extensive experiments on Subject-Insertion, Subject-Spatial, and Multi-Spatial conditional generative tasks. The experimental results demonstrate the state-of-the-art performance of our UniCombine, which effectively aligns with all conditions harmoniously." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 527, + 400, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 527, + 400, + 540 + ], + "spans": [ + { + "bbox": [ + 314, + 527, + 400, + 540 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 549, + 449, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 549, + 449, + 562 + ], + "spans": [ + { + "bbox": [ + 314, + 549, + 449, + 562 + ], + "type": "text", + "content": "2.1. Diffusion-Based Models" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 570, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 570, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 570, + 553, + 713 + ], + "type": "text", + "content": "Diffusion-based [13, 42] models have demonstrated superior performance than GAN-based [9] ones across various domains, including controllable generation [18, 31, 47, 58, 59], image editing [11, 30, 39], customized generation [8, 22, 40], object insertion [4, 43, 56], mask-guided inpainting [19, 48, 61], and so on. These breakthroughs begin with the LDM [37] and are further advanced with the DiT [32] architecture. The latest text-to-image generative models, SD3 [7] and FLUX [23], have attained state-of-the-art results by employing the Rectified Flow [28, 29] training strategy, the RPE [44] positional embedding and the MultiModal Diffusion Transformer (MMDiT) [7] architecture." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 69, + 197, + 266 + ], + "blocks": [ + { + "bbox": [ + 57, + 69, + 197, + 266 + ], + "lines": [ + { + "bbox": [ + 57, + 69, + 197, + 266 + ], + "spans": [ + { + "bbox": [ + 57, + 69, + 197, + 266 + ], + "type": "image", + "image_path": "e21e3bf9a4a78294b155fc81f3e6b7e3e4251e119c112d81fb0f47a4822439d5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 87, + 269, + 165, + 278 + ], + "lines": [ + { + "bbox": [ + 87, + 269, + 165, + 278 + ], + "spans": [ + { + "bbox": [ + 87, + 269, + 165, + 278 + ], + "type": "text", + "content": "(a) Overall Framework" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 201, + 69, + 334, + 266 + ], + "blocks": [ + { + "bbox": [ + 201, + 69, + 334, + 266 + ], + "lines": [ + { + "bbox": [ + 201, + 69, + 334, + 266 + ], + "spans": [ + { + "bbox": [ + 201, + 69, + 334, + 266 + ], + "type": "image", + "image_path": "3162d24f03eacb746bd4ecce492028784582c1beb179bc5cf91d96a185a56eec.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 269, + 319, + 279 + ], + "lines": [ + { + "bbox": [ + 216, + 269, + 319, + 279 + ], + "spans": [ + { + "bbox": [ + 216, + 269, + 319, + 279 + ], + "type": "text", + "content": "(b) Single-Conditional Setting" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 338, + 70, + 554, + 266 + ], + "blocks": [ + { + "bbox": [ + 338, + 70, + 554, + 266 + ], + "lines": [ + { + "bbox": [ + 338, + 70, + 554, + 266 + ], + "spans": [ + { + "bbox": [ + 338, + 70, + 554, + 266 + ], + "type": "image", + "image_path": "7fa892b46091963fb1ee53c8e239a5cf468404dfb70774813a489096638da93b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 397, + 269, + 496, + 279 + ], + "lines": [ + { + "bbox": [ + 397, + 269, + 496, + 279 + ], + "spans": [ + { + "bbox": [ + 397, + 269, + 496, + 279 + ], + "type": "text", + "content": "(c) Multi-Conditional Setting" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 54, + 285, + 555, + 373 + ], + "lines": [ + { + "bbox": [ + 54, + 285, + 555, + 373 + ], + "spans": [ + { + "bbox": [ + 54, + 285, + 555, + 373 + ], + "type": "text", + "content": "Figure 2. Overview of our proposed UniCombine. (a) The overall framework. We regard the MMDiT-based diffusion models as consisting of the text branch and the denoising branch. Based on it, our UniCombine introduces multiple conditional branches to process the input conditions. (b) The single-conditional setting of our UniCombine. It is equivalent to OminiControl [45] which is a special case of our proposed UniCombine framework under a single-conditional setting. (c) The multi-conditional setting of our UniCombine. Our LoRA Switching module adaptively activates the pre-trained Condition-LoRA modules on the weights of the denoising branch according to the conditional types. The proposed Conditional MMDiT Attention mechanism is used to replace the original MMDiT Attention mechanism for handling the unified multi-conditional input sequence. Whether to load the optional Denoising-LoRA module is the difference between the training-free and training-based versions." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 384, + 192, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 384, + 192, + 395 + ], + "spans": [ + { + "bbox": [ + 55, + 384, + 192, + 395 + ], + "type": "text", + "content": "2.2. Controllable Generation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 401, + 296, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 401, + 296, + 616 + ], + "spans": [ + { + "bbox": [ + 56, + 401, + 296, + 616 + ], + "type": "text", + "content": "Controllable generation allows for customizing the desired spatial layout, filter style, or subject appearance in the generated images. A series of methods such as ControlNet [59], T2I-Adapter [31], GLIGEN [26], and ZestGuide [6] successfully introduce the spatial conditions into controllable generation, enabling models to control the spatial layout of generated images. Another series of methods, such as IP-Adapter [58], InstantID [47], BLIP-Diffusion [24], and StyleDrop [41] incorporate the subject conditions into controllable generation, ensuring consistency between generated images and reference images in style, characteristics, subject appearance, etc. To unify these two tasks, OminiControl [45] proposes a novel MMDiT-based controllable framework to handle various conditions with a unified pipeline. Unfortunately, it lacks the capability to control generation with multiple conditions. To this end, we propose UniCombine, which successfully extends this framework to multi-conditional scenarios." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 624, + 281, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 624, + 281, + 635 + ], + "spans": [ + { + "bbox": [ + 55, + 624, + 281, + 635 + ], + "type": "text", + "content": "2.3. Multi-Conditional Controllable Generation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 296, + 714 + ], + "type": "text", + "content": "As controllable generation advances, merely providing a single condition to guide the image generation no longer satisfies the needs. As a result, research on multi-conditional controllable generation has emerged. Existing methods like UniControl [35], UniControlNet [60] and Cocktail [14] exhibit acceptable performance when simul" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 384, + 555, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 384, + 555, + 516 + ], + "spans": [ + { + "bbox": [ + 313, + 384, + 555, + 516 + ], + "type": "text", + "content": "taneously leveraging multiple spatial conditions for image generation. However, there is a lack of multi-conditional generative models that support utilizing both spatial conditions and subject conditions to guide the generative process together. Although the recently proposed method Ctrl-X [27] features controlling the appearance and structure simultaneously, its performance remains unsatisfactory with a limited combination of conditions and it is not compatible with the Diffusion Transformer architecture. To address the aforementioned limitations, we propose UniCombine to enable the flexible combination of various control signals." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 537, + 370, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 537, + 370, + 550 + ], + "spans": [ + { + "bbox": [ + 314, + 537, + 370, + 550 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 561, + 392, + 573 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 561, + 392, + 573 + ], + "spans": [ + { + "bbox": [ + 313, + 561, + 392, + 573 + ], + "type": "text", + "content": "3.1. Preliminary" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "content": "In this work, we mainly explore the latest generative models that utilize the Rectified Flow (RF) [28, 29] training strategy and the MMDiT [7] backbone architecture, like FLUX [23] and SD3 [7]. For the source noise distribution " + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "inline_equation", + "content": "X_0 \\sim p_{\\mathrm{noise}}" + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "content": " and the target image distribution " + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "inline_equation", + "content": "X_1 \\sim p_{\\mathrm{data}}" + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "content": ", the RF defines a linear interpolation between them as " + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "inline_equation", + "content": "X_t = (1 - t)X_0 + tX_1" + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "inline_equation", + "content": "t \\in [0,1]" + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "content": ". The training objective is to learn a time-dependent vector field " + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "inline_equation", + "content": "v_t(X_t, t; \\theta)" + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "content": " that describes the trajectory of the ODE " + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "inline_equation", + "content": "dX_t = v_t(X_t, t; \\theta)dt" + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "content": ". Specifically, " + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "inline_equation", + "content": "v_t(X_t, t; \\theta)" + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "content": " is optimized to approximate the constant velocity " + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "inline_equation", + "content": "X_1 - X_0" + }, + { + "bbox": [ + 313, + 582, + 555, + 715 + ], + "type": "text", + "content": ", leading to the loss function as Eq. (1)." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 85, + 296, + 110 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 85, + 296, + 110 + ], + "spans": [ + { + "bbox": [ + 55, + 85, + 296, + 110 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {R F}} (\\theta) = \\mathbb {E} _ {X _ {1} \\sim p _ {\\text {d a t a}}, X _ {0} \\sim p _ {\\text {n o i s e}}, t \\sim U [ 0, 1 ]} \\left[ \\| (X _ {1} - X _ {0}) - v _ {t} (X _ {t}, t; \\theta) \\| ^ {2} \\right] \\tag {1}", + "image_path": "99512443b31f1e244aae53591cf61ea45942f31984f52b9e648e6112280d77e4.jpg" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 110, + 296, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 110, + 296, + 350 + ], + "spans": [ + { + "bbox": [ + 55, + 110, + 296, + 350 + ], + "type": "text", + "content": "In this paper, we propose a concept of branch to differentiate the processing flows of input embeddings from different modalities in MMDiT-based models. As shown in Fig. 2 (a), instead of the single-branch architecture [37] where the text prompt is injected into the denoising branch via crossattention, MMDiT uses two independent transformers to construct the text branch and the denoising branch. Based on it, OminiControl [45] incorporates a Condition-LoRA module onto the weights of the denoising branch to process the input conditional embedding, thus forming its Conditional Branch, as depicted in Fig. 2 (b). It is worth noting that, OminiControl [45] can be regarded as a special case of our proposed UniCombine framework under the single-conditional setting. It provides the pre-trained Condition-LoRA modules to meet the need for our multi-conditional settings. In the single-conditional setting, the text branch embedding " + }, + { + "bbox": [ + 55, + 110, + 296, + 350 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 55, + 110, + 296, + 350 + ], + "type": "text", + "content": ", the denoising branch embedding " + }, + { + "bbox": [ + 55, + 110, + 296, + 350 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 55, + 110, + 296, + 350 + ], + "type": "text", + "content": ", and the conditional branch embedding " + }, + { + "bbox": [ + 55, + 110, + 296, + 350 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 55, + 110, + 296, + 350 + ], + "type": "text", + "content": " are concatenated to form a unified sequence " + }, + { + "bbox": [ + 55, + 110, + 296, + 350 + ], + "type": "inline_equation", + "content": "[T;X;C]" + }, + { + "bbox": [ + 55, + 110, + 296, + 350 + ], + "type": "text", + "content": " to be processed in the MMDiT Attention mechanism." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 357, + 138, + 368 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 357, + 138, + 368 + ], + "spans": [ + { + "bbox": [ + 55, + 357, + 138, + 368 + ], + "type": "text", + "content": "3.2. UniCombine" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 373, + 296, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 373, + 296, + 447 + ], + "spans": [ + { + "bbox": [ + 55, + 373, + 296, + 447 + ], + "type": "text", + "content": "Building upon the MMDiT-based text-to-image generative model FLUX [23], we propose UniCombine, a multi-conditional controllable generative framework consisting of various conditional branches. Each conditional branch is in charge of processing one conditional embedding, thus forming a unified embedding sequence " + }, + { + "bbox": [ + 55, + 373, + 296, + 447 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 55, + 373, + 296, + 447 + ], + "type": "text", + "content": " as presented in Eq. (2)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 131, + 449, + 294, + 460 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 449, + 294, + 460 + ], + "spans": [ + { + "bbox": [ + 131, + 449, + 294, + 460 + ], + "type": "interline_equation", + "content": "S = [ T; X; C _ {1}; \\dots ; C _ {N} ] \\tag {2}", + "image_path": "93282f189196c23bdfd799eb8a46475b81447637d02f5a79af7f94e82a528bc4.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 463, + 296, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 463, + 296, + 640 + ], + "spans": [ + { + "bbox": [ + 55, + 463, + 296, + 640 + ], + "type": "text", + "content": "Given that the single-conditional setting of our UniCombine is equivalent to OmniControl [45], we only focus on the multi-conditional setting in this section. Firstly, we introduce a LoRA Switching module to manage multiple conditional branches effectively. Secondly, we introduce a novel Conditional MMDiT Attention mechanism to process the unified sequence " + }, + { + "bbox": [ + 55, + 463, + 296, + 640 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 55, + 463, + 296, + 640 + ], + "type": "text", + "content": " in the multi-conditional setting. Thirdly, we present an insight analysis of our training-free strategy, which leverages the pre-trained Condition-LoRA module weights to perform a training-free multi-conditional controllable generation. Lastly, we present a feasible training-based strategy, which utilizes a trainable Denoising-LoRA module to enhance the performance further after training on a task-specific multi-conditional dataset." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 642, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 642, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 296, + 713 + ], + "type": "text", + "content": "LoRA Switching Module. Before denoising with multiple input conditions, the Condition-LoRA modules pre-trained under single-conditional settings should be loaded onto the weights of the denoising branch, like " + }, + { + "bbox": [ + 55, + 642, + 296, + 713 + ], + "type": "inline_equation", + "content": "[CondLoRA_1, CondLoRA_2, \\ldots]" + }, + { + "bbox": [ + 55, + 642, + 296, + 713 + ], + "type": "text", + "content": ". Then the LoRA Switching module determines which one of them should be" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 72, + 553, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 227 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 227 + ], + "type": "text", + "content": "activated according to the type of input conditions, forming a one-hot gating mechanism " + }, + { + "bbox": [ + 313, + 72, + 553, + 227 + ], + "type": "inline_equation", + "content": "[0,1,0,\\dots,0]" + }, + { + "bbox": [ + 313, + 72, + 553, + 227 + ], + "type": "text", + "content": ", as shown in Fig. 2 (c). Subsequently, different conditional branches with different activated Condition-LoRA modules are used for processing different conditional embeddings, resulting in a minimal number of additional parameters introduced for different conditions. Unlike the single-conditional setting in Fig. 2 (b), which only needs loading LoRA modules, the LoRA Switching module in Fig. 2 (c) enables adaptive selection among multiple LoRA modules to provide the matching conditional branches for each conditional embeddings, granting our framework greater flexibility and adaptability to handle diverse conditional combinations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 228, + 553, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 228, + 553, + 372 + ], + "spans": [ + { + "bbox": [ + 313, + 228, + 553, + 372 + ], + "type": "text", + "content": "Conditional MMDiT Attention. After concatenating the output embeddings from these " + }, + { + "bbox": [ + 313, + 228, + 553, + 372 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 228, + 553, + 372 + ], + "type": "text", + "content": " conditional branches, the unified sequence " + }, + { + "bbox": [ + 313, + 228, + 553, + 372 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 313, + 228, + 553, + 372 + ], + "type": "text", + "content": " cannot be processed through the original MMDiT Attention mechanism due to two major challenges: (1) The computational complexity scales quadratically as " + }, + { + "bbox": [ + 313, + 228, + 553, + 372 + ], + "type": "inline_equation", + "content": "O(N^2)" + }, + { + "bbox": [ + 313, + 228, + 553, + 372 + ], + "type": "text", + "content": " with respect to the number of conditions, which becomes especially problematic when handling multiple high-resolution conditions. (2) When performing MMDiT Attention on the unified sequence " + }, + { + "bbox": [ + 313, + 228, + 553, + 372 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 313, + 228, + 553, + 372 + ], + "type": "text", + "content": ", different condition signals interfere with each other during the attention calculation, making it difficult to effectively utilize the pre-trained Condition-LoRA module weights for the denoising process." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 372, + 553, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 372, + 553, + 598 + ], + "spans": [ + { + "bbox": [ + 313, + 372, + 553, + 598 + ], + "type": "text", + "content": "To address these challenges, we introduce a novel Conditional MMDiT Attention mechanism (CMMDiT Attention) as depicted in Fig. 2 (c) to replace the original MMDiT Attention. Instead of feeding the entire unified sequence " + }, + { + "bbox": [ + 313, + 372, + 553, + 598 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 313, + 372, + 553, + 598 + ], + "type": "text", + "content": " into the MMDiT Attention at once, CMMDiT Attention follows distinct computational mechanisms according to which branch is serving as queries. The core idea is that the branch serving as a query aggregates the information from different scopes of the unified sequence " + }, + { + "bbox": [ + 313, + 372, + 553, + 598 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 313, + 372, + 553, + 598 + ], + "type": "text", + "content": " depending on its type. Specifically, when the denoising branch " + }, + { + "bbox": [ + 313, + 372, + 553, + 598 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 313, + 372, + 553, + 598 + ], + "type": "text", + "content": " and the text branch " + }, + { + "bbox": [ + 313, + 372, + 553, + 598 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 313, + 372, + 553, + 598 + ], + "type": "text", + "content": " serve as queries, their scope of keys and values correspond to the entire unified sequence " + }, + { + "bbox": [ + 313, + 372, + 553, + 598 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 313, + 372, + 553, + 598 + ], + "type": "text", + "content": ", granting them a global receptive field and the ability to aggregate information from all conditional branches. In contrast, when the conditional branches " + }, + { + "bbox": [ + 313, + 372, + 553, + 598 + ], + "type": "inline_equation", + "content": "C_i" + }, + { + "bbox": [ + 313, + 372, + 553, + 598 + ], + "type": "text", + "content": " serve as queries, their receptive fields do not encompass one another. Their scope of keys and values are restricted to the subsequence " + }, + { + "bbox": [ + 313, + 372, + 553, + 598 + ], + "type": "inline_equation", + "content": "S_i" + }, + { + "bbox": [ + 313, + 372, + 553, + 598 + ], + "type": "text", + "content": " as presented in Eq. (3), which prevents feature exchange and avoids information entanglement between different conditions." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 405, + 602, + 553, + 614 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 405, + 602, + 553, + 614 + ], + "spans": [ + { + "bbox": [ + 405, + 602, + 553, + 614 + ], + "type": "interline_equation", + "content": "S _ {i} = [ T; X; C _ {i} ] \\tag {3}", + "image_path": "3661792d048303c132e0cc107e11197e07793e4fd697e499d3481bbdc763b8c7.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 617, + 555, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 555, + 653 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 555, + 653 + ], + "type": "text", + "content": "Furthermore, the CMMDiT Attention reduces computational complexity from " + }, + { + "bbox": [ + 313, + 617, + 555, + 653 + ], + "type": "inline_equation", + "content": "O(N^2)" + }, + { + "bbox": [ + 313, + 617, + 555, + 653 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 313, + 617, + 555, + 653 + ], + "type": "inline_equation", + "content": "O(N)" + }, + { + "bbox": [ + 313, + 617, + 555, + 653 + ], + "type": "text", + "content": " as the number of conditions increases, making it more scalable." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 654, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 654, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 654, + 553, + 713 + ], + "type": "text", + "content": "Training-free Strategy. The following analyses provide a detailed explanation of why our UniCombine is capable of seamlessly integrating and effectively reusing the pretrained Condition-LoRA module weights to tackle multi-conditional challenges in a training-free manner." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 69, + 295, + 133 + ], + "blocks": [ + { + "bbox": [ + 58, + 69, + 295, + 133 + ], + "lines": [ + { + "bbox": [ + 58, + 69, + 295, + 133 + ], + "spans": [ + { + "bbox": [ + 58, + 69, + 295, + 133 + ], + "type": "image", + "image_path": "8c57a3c424f01738f8a53f6d6f0c8688e8016cc7a73bf6ea21df63ffdafe3eaa.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 138, + 295, + 159 + ], + "lines": [ + { + "bbox": [ + 55, + 138, + 295, + 159 + ], + "spans": [ + { + "bbox": [ + 55, + 138, + 295, + 159 + ], + "type": "text", + "content": "Figure 3. Average " + }, + { + "bbox": [ + 55, + 138, + 295, + 159 + ], + "type": "inline_equation", + "content": "\\mathrm{X} \\rightarrow" + }, + { + "bbox": [ + 55, + 138, + 295, + 159 + ], + "type": "text", + "content": " Subject cross-attention map of the insertion area." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 171, + 296, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 171, + 296, + 220 + ], + "spans": [ + { + "bbox": [ + 55, + 171, + 296, + 220 + ], + "type": "text", + "content": "On the one hand, when the conditional embeddings " + }, + { + "bbox": [ + 55, + 171, + 296, + 220 + ], + "type": "inline_equation", + "content": "C_i" + }, + { + "bbox": [ + 55, + 171, + 296, + 220 + ], + "type": "text", + "content": " serve as queries in CMMDiT, they follow the same attention computational paradigm as in the MMDiT of single-conditional settings, as indicated in Eq. (4)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 66, + 222, + 294, + 249 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 222, + 294, + 249 + ], + "spans": [ + { + "bbox": [ + 66, + 222, + 294, + 249 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\operatorname {C M M D i T} \\left(Q = C _ {i} ^ {q}, K = \\left[ T ^ {k}, X ^ {k}, C _ {i} ^ {k} \\right], V = \\left[ T ^ {v}, X ^ {v}, C _ {i} ^ {v} \\right]\\right) \\\\ = \\operatorname {M M D i T} (Q = C ^ {q}, K = [ T ^ {k}, X ^ {k}, C ^ {k} ], V = [ T ^ {v}, X ^ {v}, C ^ {v} ]) \\tag {4} \\\\ \\end{array}", + "image_path": "21b0e5e13ae17e5a1c0fc46b9ae2bd7c1ae40e8754d83e2afe15c56a0a886ccc.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 255, + 296, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 255, + 296, + 302 + ], + "spans": [ + { + "bbox": [ + 55, + 255, + 296, + 302 + ], + "type": "text", + "content": "This consistent computational paradigm enables the conditional branches to share the same feature extraction capability between the multi-conditional setting and the single-conditional setting." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 302, + 296, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 302, + 296, + 398 + ], + "spans": [ + { + "bbox": [ + 55, + 302, + 296, + 398 + ], + "type": "text", + "content": "On the other hand, when the denoising embedding " + }, + { + "bbox": [ + 55, + 302, + 296, + 398 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 55, + 302, + 296, + 398 + ], + "type": "text", + "content": " and the text prompt embedding " + }, + { + "bbox": [ + 55, + 302, + 296, + 398 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 55, + 302, + 296, + 398 + ], + "type": "text", + "content": " serve as queries in CMMDiT, their attention computational paradigm diverges from the single-conditional settings. As illustrated in Eq. (5), when the denoising embedding " + }, + { + "bbox": [ + 55, + 302, + 296, + 398 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 55, + 302, + 296, + 398 + ], + "type": "text", + "content": " is used as a query for attention computation with multiple conditional embeddings in CMMDiT, the attention score matrix is computed between " + }, + { + "bbox": [ + 55, + 302, + 296, + 398 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 55, + 302, + 296, + 398 + ], + "type": "text", + "content": " and all the conditional embeddings." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 399, + 299, + 446 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 399, + 299, + 446 + ], + "spans": [ + { + "bbox": [ + 56, + 399, + 299, + 446 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathrm {C M M D i T} (Q = X ^ {q}, K / V = [ X ^ {k / v}, T ^ {k / v}, C _ {1} ^ {k / v}, \\dots , C _ {N} ^ {k / v} ]) \\\\ = \\operatorname {s o f t m a x} \\left(\\frac {1}{\\sqrt {d i m}} X ^ {q} \\left[ X ^ {k}, T ^ {k}, C _ {1} ^ {k}, \\dots , C _ {N} ^ {k} \\right] ^ {\\top}\\right) \\left[ X ^ {v}, T ^ {v}, C _ {1} ^ {v}, \\dots , C _ {N} ^ {v} \\right] \\tag {5} \\\\ \\end{array}", + "image_path": "366bfb556eb7348d43c9ba6eb176249717e1ac12f3b91c45a0204d9d3b46b469.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 450, + 296, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 450, + 296, + 510 + ], + "spans": [ + { + "bbox": [ + 55, + 450, + 296, + 510 + ], + "type": "text", + "content": "It allows " + }, + { + "bbox": [ + 55, + 450, + 296, + 510 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 55, + 450, + 296, + 510 + ], + "type": "text", + "content": " to extract and integrate information from each of the conditional embeddings separately and fusion them. This divide-and-conquer computational paradigm enables the text branch and denoising branch to fuse the conditional features effectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 510, + 296, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 510, + 296, + 557 + ], + "spans": [ + { + "bbox": [ + 55, + 510, + 296, + 557 + ], + "type": "text", + "content": "By leveraging the computational paradigms mentioned above, our UniCombine is able to perform a training-free multi-conditional controllable generation with the pretrained Condition-LoRA modules." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "text", + "content": "Training-based Strategy. However, due to the lack of training, solely relying on the softmax operation in Eq. (5) to balance the attention score distribution across multiple conditional embeddings may result in an undesirable feature fusion result, making our training-free version unsatisfactory in some cases. To address this issue, we introduce a trainable Denoising-LoRA module within the denoising branch to rectify the distribution of attention scores in Eq. (5). During training, we keep all the Condition-LoRA modules frozen to preserve the conditional extracting capability and train the Denoising-LoRA module solely on the task-specific multi-conditional dataset, as shown in Fig. 2 (c). After training, the denoising embedding " + }, + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 55, + 558, + 296, + 714 + ], + "type": "text", + "content": " learns to" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 318, + 70, + 552, + 228 + ], + "blocks": [ + { + "bbox": [ + 318, + 70, + 552, + 228 + ], + "lines": [ + { + "bbox": [ + 318, + 70, + 552, + 228 + ], + "spans": [ + { + "bbox": [ + 318, + 70, + 552, + 228 + ], + "type": "image", + "image_path": "d04a32c8e874aab106f9fa111d4a7b255e65e731a4acd2452f053c1c30949d06.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 324, + 234, + 542, + 246 + ], + "lines": [ + { + "bbox": [ + 324, + 234, + 542, + 246 + ], + "spans": [ + { + "bbox": [ + 324, + 234, + 542, + 246 + ], + "type": "text", + "content": "Figure 4. SubjectSpatial200K dataset construction pipeline." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 256, + 555, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 256, + 555, + 316 + ], + "spans": [ + { + "bbox": [ + 313, + 256, + 555, + 316 + ], + "type": "text", + "content": "better aggregate the appropriate information during the CM-MDiT Attention operation. As presented in Fig. 3, the average " + }, + { + "bbox": [ + 313, + 256, + 555, + 316 + ], + "type": "inline_equation", + "content": "\\mathrm{X} \\rightarrow" + }, + { + "bbox": [ + 313, + 256, + 555, + 316 + ], + "type": "text", + "content": " Subject attention map within the inpainting area is more concentrated on the subject area in the training-based version." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 324, + 466, + 337 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 324, + 466, + 337 + ], + "spans": [ + { + "bbox": [ + 313, + 324, + 466, + 337 + ], + "type": "text", + "content": "3.3. SubjectSpatial200K dataset" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 342, + 555, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 342, + 555, + 474 + ], + "spans": [ + { + "bbox": [ + 313, + 342, + 555, + 474 + ], + "type": "text", + "content": "Our SubjectSpatial200K dataset aims to address the lack of a publicly available dataset for multi-conditional generative tasks. Existing datasets fail to include both the subject-driven and spatially-aligned annotations. Recently, the Subjects200K [45] dataset provides a publicly accessible dataset for subject-driven generation. Based on it, we introduce the SubjectSpatial200K dataset, which is a unified high-quality dataset designed for training and testing multi-conditional controllable generative models. This dataset includes comprehensive annotations as elaborated below. Besides, the construction pipeline is detailed in Fig. 4." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 474, + 554, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 474, + 554, + 581 + ], + "spans": [ + { + "bbox": [ + 313, + 474, + 554, + 581 + ], + "type": "text", + "content": "Subject Grounding Annotation. The subject grounding annotation is significantly necessary for many generative tasks like instance-level inpainting [19, 61], instance-level controllable generation [26, 49], and object insertion [4, 43]. By leveraging the open-vocabulary object detection model Mamba-YOLO-World [46] on Subjects200K, we detect bounding boxes for all subjects according to their category descriptions and subsequently derive the corresponding mask regions." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 582, + 554, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 582, + 554, + 642 + ], + "spans": [ + { + "bbox": [ + 313, + 582, + 554, + 642 + ], + "type": "text", + "content": "Spatial Map Annotation. The spatial map annotation further extends the applicable scope of our dataset to spatially-aligned synthesis tasks. Specifically, we employ the Depth-Anything [57] model and the OpenCV [1] library on Subjects200K to derive the Depth and Canny maps." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 652, + 390, + 666 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 652, + 390, + 666 + ], + "spans": [ + { + "bbox": [ + 313, + 652, + 390, + 666 + ], + "type": "text", + "content": "4. Experiment" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 672, + 364, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 672, + 364, + 685 + ], + "spans": [ + { + "bbox": [ + 313, + 672, + 364, + 685 + ], + "type": "text", + "content": "4.1. Setup" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 689, + 554, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 554, + 714 + ], + "type": "text", + "content": "Implementation. We use the FLUX.1-schnell [23] as our base model and the weights provided by OminiControl [45]" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 74, + 70, + 535, + 268 + ], + "blocks": [ + { + "bbox": [ + 74, + 70, + 535, + 268 + ], + "lines": [ + { + "bbox": [ + 74, + 70, + 535, + 268 + ], + "spans": [ + { + "bbox": [ + 74, + 70, + 535, + 268 + ], + "type": "table", + "html": "
TaskMethodGenerative QualityControllabilitySubject ConsistencyText Consistency
FID ↓SSIM ↑F1 ↑MSE ↓CLIP-I ↑DINO ↑CLIP-T ↑
Multi-SpatialUniControl44.170.320.071346.02--30.28
UniControlNet20.960.280.091231.06--32.74
UniCombine (training-free)10.350.540.18519.53--33.70
UniCombine (training-based)6.820.640.24165.90--33.45
Subject-InsertionObjectStitch26.860.37--93.0582.3432.25
AnyDoor26.070.37--94.8886.0432.55
UniCombine (training-free)6.370.76--95.6089.0133.11
UniCombine (training-based)4.550.81--97.1492.9633.08
Subject-DepthControlNet w. IP-Adapter29.930.34-1295.8080.4162.2632.94
Ctrl-X52.370.36-2644.9078.0850.8330.20
UniCombine (training-free)10.030.48-507.4091.1585.7333.41
UniCombine (training-based)6.660.55-196.6594.4790.3133.30
Subject-CannyControlNet w. IP-Adapter30.380.380.09-79.8060.1932.85
Ctrl-X47.890.360.05-79.3554.3130.34
UniCombine (training-free)10.220.490.17-91.8486.8833.21
UniCombine (training-based)6.010.610.24-95.2692.5933.30
", + "image_path": "a34bbcb900321fe501a520855202f376e6664d9faf88390dd30b55aa8d6f1b53.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 273, + 553, + 297 + ], + "lines": [ + { + "bbox": [ + 55, + 273, + 553, + 297 + ], + "spans": [ + { + "bbox": [ + 55, + 273, + 553, + 297 + ], + "type": "text", + "content": "Table 1. Quantitative comparison of our method with existing approaches on Multi-Spatial, Subject-Insertion, Subject-Depth, and Subject-Canny conditional generative tasks. The bold and underlined figures represent the optimal and sub-optimal results, respectively." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 57, + 307, + 294, + 536 + ], + "blocks": [ + { + "bbox": [ + 57, + 307, + 294, + 536 + ], + "lines": [ + { + "bbox": [ + 57, + 307, + 294, + 536 + ], + "spans": [ + { + "bbox": [ + 57, + 307, + 294, + 536 + ], + "type": "image", + "image_path": "1e06b7a6c1ae6b58e94df98cc9be3d1764e9ac9f480e954299aa319606949d55.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 62, + 541, + 288, + 553 + ], + "lines": [ + { + "bbox": [ + 62, + 541, + 288, + 553 + ], + "spans": [ + { + "bbox": [ + 62, + 541, + 288, + 553 + ], + "type": "text", + "content": "Figure 5. Qualitative comparison on Multi-Spatial generation." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 567, + 295, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 567, + 295, + 651 + ], + "spans": [ + { + "bbox": [ + 55, + 567, + 295, + 651 + ], + "type": "text", + "content": "as our pre-trained Condition-LoRA module weights. During the training of our Denoising-LoRA module, we use a rank of 4, consistent with the Condition-LoRA. We choose the Adam optimizer with a learning rate of " + }, + { + "bbox": [ + 55, + 567, + 295, + 651 + ], + "type": "inline_equation", + "content": "1e^{-4}" + }, + { + "bbox": [ + 55, + 567, + 295, + 651 + ], + "type": "text", + "content": " and set the weight decay to 0.01. Our models are trained for 30,000 steps on 16 NVIDIA V100 GPUs at a resolution of " + }, + { + "bbox": [ + 55, + 567, + 295, + 651 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 55, + 567, + 295, + 651 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "type": "text", + "content": "**Benchmarks.** We evaluate the performance of our method in both training-free and training-based versions. The training and testing datasets are partitioned from the SubjectSpatial200K dataset based on image quality assessment scores evaluated by ChatGPT-4o, with details provided in Sec. A1." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 315, + 307, + 553, + 536 + ], + "blocks": [ + { + "bbox": [ + 315, + 307, + 553, + 536 + ], + "lines": [ + { + "bbox": [ + 315, + 307, + 553, + 536 + ], + "spans": [ + { + "bbox": [ + 315, + 307, + 553, + 536 + ], + "type": "image", + "image_path": "d60c55bfaeeeedfd5aefc1bba87bd491b67f9a1c54a3f0b04bece1daafa5fa08.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 542, + 553, + 554 + ], + "lines": [ + { + "bbox": [ + 313, + 542, + 553, + 554 + ], + "spans": [ + { + "bbox": [ + 313, + 542, + 553, + 554 + ], + "type": "text", + "content": "Figure 6. Qualitative comparison on Subject-Insertion generation." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 568, + 553, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 568, + 553, + 592 + ], + "spans": [ + { + "bbox": [ + 313, + 568, + 553, + 592 + ], + "type": "text", + "content": "Importantly, the dataset partitioning scheme remains consistent in all experiments." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 594, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 555, + 713 + ], + "type": "text", + "content": "Metrics. To evaluate the subject consistency, we calculate the CLIP-I [36] score and DINO [2] score between the generated images and the ground truth images. To assess the generative quality, we compute the FID [12] and SSIM [50] between the generated image set and the ground truth image set. To measure the controllability, we compute the F1 Score for edge conditions and the MSE score for depth conditions between the extracted maps from generated images and the original conditions. Additionally, we adopt the CLIP-T [36] score to estimate the text consistency between" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 70, + 294, + 299 + ], + "blocks": [ + { + "bbox": [ + 58, + 70, + 294, + 299 + ], + "lines": [ + { + "bbox": [ + 58, + 70, + 294, + 299 + ], + "spans": [ + { + "bbox": [ + 58, + 70, + 294, + 299 + ], + "type": "image", + "image_path": "506fe3547ab1943e7faa90d0f349eb3f11dc59ed4a47d7cf36007f74a6ca38f0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 59, + 305, + 290, + 317 + ], + "lines": [ + { + "bbox": [ + 59, + 305, + 290, + 317 + ], + "spans": [ + { + "bbox": [ + 59, + 305, + 290, + 317 + ], + "type": "text", + "content": "Figure 7. Qualitative comparison on Subject-Depth generation." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 327, + 229, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 327, + 229, + 339 + ], + "spans": [ + { + "bbox": [ + 55, + 327, + 229, + 339 + ], + "type": "text", + "content": "the generated images and the text prompts." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 345, + 135, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 345, + 135, + 357 + ], + "spans": [ + { + "bbox": [ + 55, + 345, + 135, + 357 + ], + "type": "text", + "content": "4.2. Main Result" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 362, + 295, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 362, + 295, + 398 + ], + "spans": [ + { + "bbox": [ + 55, + 362, + 295, + 398 + ], + "type": "text", + "content": "We conduct extensive and comprehensive comparative experiments on the Multi-Spatial, Subject-Insertion, and Subject-Spatial conditional generative tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 403, + 244, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 403, + 244, + 415 + ], + "spans": [ + { + "bbox": [ + 55, + 403, + 244, + 415 + ], + "type": "text", + "content": "4.2.1. Multi-Spatial Conditional Generation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 418, + 295, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 418, + 295, + 514 + ], + "spans": [ + { + "bbox": [ + 55, + 418, + 295, + 514 + ], + "type": "text", + "content": "The Multi-Spatial conditional generation aims to generate images adhering to the collective layout constraints of diverse spatial conditions. This requires the model to achieve a more comprehensive layout control based on input conditions in a complementary manner. The comparative results in Tab. 1 and Fig. 5 demonstrate that our method outperforms existing multi-spatial conditional generation approaches in generative quality and controllability." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 519, + 261, + 531 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 519, + 261, + 531 + ], + "spans": [ + { + "bbox": [ + 55, + 519, + 261, + 531 + ], + "type": "text", + "content": "4.2.2. Subject-Insertion Conditional Generation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 534, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 534, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 534, + 295, + 713 + ], + "type": "text", + "content": "The Subject-Insertion conditional generation requires the model to generate images where the reference subject is inserted into the masked region of the target background. As illustrated in Tab. 1 and Fig. 6, our UniCombine demonstrates superior performance compared to previous methods with three advantages: Firstly, our method ensures that the reference subject is inserted into the background with high consistency and harmonious integration. Secondly, our method excels in open-world object insertion without requiring test-time tuning, unlike conventional customization methods [22, 40]. Finally, our method demonstrates strong semantic comprehension capabilities, enabling it to extract the desired object from a complex subject image with a non-white background, rather than simply pasting the entire subject image into the masked region." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 316, + 69, + 553, + 299 + ], + "blocks": [ + { + "bbox": [ + 316, + 69, + 553, + 299 + ], + "lines": [ + { + "bbox": [ + 316, + 69, + 553, + 299 + ], + "spans": [ + { + "bbox": [ + 316, + 69, + 553, + 299 + ], + "type": "image", + "image_path": "44ebde2dd6cfbcb637ef99847a469c9c5e74ddeb4368792fe3d6809b1e91856e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 318, + 304, + 549, + 316 + ], + "lines": [ + { + "bbox": [ + 318, + 304, + 549, + 316 + ], + "spans": [ + { + "bbox": [ + 318, + 304, + 549, + 316 + ], + "type": "text", + "content": "Figure 8. Qualitative comparison on Subject-Canny generation." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 328, + 511, + 340 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 328, + 511, + 340 + ], + "spans": [ + { + "bbox": [ + 313, + 328, + 511, + 340 + ], + "type": "text", + "content": "4.2.3. Subject-Spatial Conditional Generation" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 344, + 555, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 344, + 555, + 619 + ], + "spans": [ + { + "bbox": [ + 313, + 344, + 555, + 619 + ], + "type": "text", + "content": "The Subject-Spatial conditional generation focuses on generating images of the reference subject while ensuring the layout aligns with specified spatial conditions. We compare our method with Ctrl-X [27] and a simple baseline model. Ctrl-X is a recently proposed model based on SDXL [34] that simultaneously controls structure and appearance. The baseline model is constructed by integrating the FLUX ControlNet [53, 54] and FLUX IP-Adapter [55] into the FLUX.1-dev [23] base model. Specifically, we divided the Subject-Spatial generative task into different experimental groups based on the type of spatial conditions, referred to as Subject-Depth and Subject-Canny, respectively. As presented in Fig. 7, Fig. 8, and Tab. 1, the experimental results demonstrate the superior performance of our UniCombine: Firstly, our method exhibits stronger semantic comprehension capability, generating the reference subject in the accurate localization of the spatial conditions without confusing appearance features. Secondly, our method demonstrates greater adaptability, generating the reference subject with reasonable morphological transformations to align with the guidance of spatial conditions and text prompts. Lastly, our method achieves superior subject consistency while maintaining excellent spatial coherence." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 626, + 419, + 637 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 626, + 419, + 637 + ], + "spans": [ + { + "bbox": [ + 313, + 626, + 419, + 637 + ], + "type": "text", + "content": "4.2.4. Textual Guidance" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 641, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 641, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 641, + 555, + 713 + ], + "type": "text", + "content": "As shown in Fig. 1 and Tab. 1, our method not only allows for controllable generation by combining multiple conditions but also enables precise textual guidance simultaneously. By utilizing a unified input sequence " + }, + { + "bbox": [ + 313, + 641, + 555, + 713 + ], + "type": "inline_equation", + "content": "S = [T; X; C_1; \\ldots; C_N]" + }, + { + "bbox": [ + 313, + 641, + 555, + 713 + ], + "type": "text", + "content": " during the denoising process, our UniCombine effectively aligns the descriptive words in " + }, + { + "bbox": [ + 313, + 641, + 555, + 713 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 313, + 641, + 555, + 713 + ], + "type": "text", + "content": " with" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 56, + 70, + 302, + 112 + ], + "blocks": [ + { + "bbox": [ + 56, + 70, + 302, + 112 + ], + "lines": [ + { + "bbox": [ + 56, + 70, + 302, + 112 + ], + "spans": [ + { + "bbox": [ + 56, + 70, + 302, + 112 + ], + "type": "table", + "html": "
MethodCLIP-I ↑DINO ↑CLIP-T ↑AttnOps ↓
Ours w/o CMMDiT95.4788.4233.10732.17M
Ours w/ CMMDiT95.6089.0133.11612.63M
", + "image_path": "8b5b9183e06014b6e81f79d2b0e6e78e929ba4f810448e1f8a953e9f0afcf401.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 56, + 157, + 294, + 266 + ], + "blocks": [ + { + "bbox": [ + 55, + 113, + 297, + 147 + ], + "lines": [ + { + "bbox": [ + 55, + 113, + 297, + 147 + ], + "spans": [ + { + "bbox": [ + 55, + 113, + 297, + 147 + ], + "type": "text", + "content": "Table 2. Quantitative ablation of CMMDiT Attention mechanism on training-free Subject-Insertion task. AttnOps is short for the number of attention operations." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 56, + 157, + 294, + 266 + ], + "lines": [ + { + "bbox": [ + 56, + 157, + 294, + 266 + ], + "spans": [ + { + "bbox": [ + 56, + 157, + 294, + 266 + ], + "type": "table", + "html": "
Background\nSubjectTraining-free\nw/o CMMDiTTraining-free\nw/ CMMDiTBackground\nSubjectTraining-free\nw/o CMMDiTTraining-free\nw/ CMMDiT
inconsistentsuccessinconsistentsuccess
failsuccessfailsuccess
", + "image_path": "2a92bbe800381328c2717f661d17a8523810856178e0fb0febb65310f9baa364.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 65, + 300, + 285, + 342 + ], + "blocks": [ + { + "bbox": [ + 55, + 268, + 295, + 291 + ], + "lines": [ + { + "bbox": [ + 55, + 268, + 295, + 291 + ], + "spans": [ + { + "bbox": [ + 55, + 268, + 295, + 291 + ], + "type": "text", + "content": "Figure 9. Qualitative ablation of CMMDiT Attention mechanism on training-free Subject-Insertion task." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 65, + 300, + 285, + 342 + ], + "lines": [ + { + "bbox": [ + 65, + 300, + 285, + 342 + ], + "spans": [ + { + "bbox": [ + 65, + 300, + 285, + 342 + ], + "type": "table", + "html": "
MethodCLIP-I ↑DINO ↑CLIP-T ↑
Ours w/ Text-LoRA96.9792.3233.10
Ours w/ Denoising-LoRA97.1492.9633.08
", + "image_path": "b5701621428ee2941e72b32e1a85040f321539ed97d657923eef29882e89bdfd.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 56, + 375, + 294, + 485 + ], + "blocks": [ + { + "bbox": [ + 56, + 375, + 294, + 485 + ], + "lines": [ + { + "bbox": [ + 56, + 375, + 294, + 485 + ], + "spans": [ + { + "bbox": [ + 56, + 375, + 294, + 485 + ], + "type": "image", + "image_path": "fc8bc88e1ec09551aeba8132d1ccff2d2a1eb574ea62259a59e0357472255443.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 487, + 295, + 510 + ], + "lines": [ + { + "bbox": [ + 55, + 487, + 295, + 510 + ], + "spans": [ + { + "bbox": [ + 55, + 487, + 295, + 510 + ], + "type": "text", + "content": "Figure 10. Qualitative ablation of trainable LoRA on training-based Subject-Insertion task." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 521, + 295, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 521, + 295, + 558 + ], + "spans": [ + { + "bbox": [ + 55, + 521, + 295, + 558 + ], + "type": "text", + "content": "the relevant features in " + }, + { + "bbox": [ + 55, + 521, + 295, + 558 + ], + "type": "inline_equation", + "content": "C_i" + }, + { + "bbox": [ + 55, + 521, + 295, + 558 + ], + "type": "text", + "content": " and the corresponding patches in " + }, + { + "bbox": [ + 55, + 521, + 295, + 558 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 55, + 521, + 295, + 558 + ], + "type": "text", + "content": ", thereby achieving a remarkable text-guided multi-conditional controllable generation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 564, + 149, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 564, + 149, + 578 + ], + "spans": [ + { + "bbox": [ + 55, + 564, + 149, + 578 + ], + "type": "text", + "content": "4.3. Ablation Study" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 582, + 295, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 582, + 295, + 617 + ], + "spans": [ + { + "bbox": [ + 55, + 582, + 295, + 617 + ], + "type": "text", + "content": "We exhibit the ablation study results conducted on the Subject-Insertion task in this section, while more results on the other tasks are provided in Sec. A2." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 617, + 295, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 617, + 295, + 712 + ], + "spans": [ + { + "bbox": [ + 55, + 617, + 295, + 712 + ], + "type": "text", + "content": "Effect of Conditional MMDiT Attention. To evaluate the effectiveness of our proposed Conditional MMDiT Attention mechanism, we replace the CMMDiT Attention with the original MMDiT Attention and test its training-free performance to avoid the influence of training data. As shown in Tab. 2 and Fig. 9, our framework attains superior performance with fewer attention operations when employing the CMMDiT Attention mechanism." + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 329, + 70, + 538, + 112 + ], + "blocks": [ + { + "bbox": [ + 55, + 343, + 295, + 365 + ], + "lines": [ + { + "bbox": [ + 55, + 343, + 295, + 365 + ], + "spans": [ + { + "bbox": [ + 55, + 343, + 295, + 365 + ], + "type": "text", + "content": "Table 3. Quantitative ablation of trainable LoRA on training-based Subject-Insertion task." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 329, + 70, + 538, + 112 + ], + "lines": [ + { + "bbox": [ + 329, + 70, + 538, + 112 + ], + "spans": [ + { + "bbox": [ + 329, + 70, + 538, + 112 + ], + "type": "table", + "html": "
MethodCLIP-I ↑DINO ↑CLIP-T ↑
Ours w/ DSB only96.8592.3833.07
Ours w/ DSB and SSB97.1492.9633.08
", + "image_path": "3618030c54b9a80305073bef94f2394a26e7580cbe554afe3dc981094168e421.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 315, + 158, + 553, + 266 + ], + "blocks": [ + { + "bbox": [ + 313, + 113, + 553, + 145 + ], + "lines": [ + { + "bbox": [ + 313, + 113, + 553, + 145 + ], + "spans": [ + { + "bbox": [ + 313, + 113, + 553, + 145 + ], + "type": "text", + "content": "Table 4. Quantitative ablation of training strategy on training-based Subject-Insertion task. DSB: Dual-Stream Blocks. SSB: Single-Stream Blocks." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 315, + 158, + 553, + 266 + ], + "lines": [ + { + "bbox": [ + 315, + 158, + 553, + 266 + ], + "spans": [ + { + "bbox": [ + 315, + 158, + 553, + 266 + ], + "type": "table", + "html": "
Background\nSubjectTraining-based\nw/ DSB onlyTraining-based\nw/ DSB + SSBBackground\nSubjectTraining-based\nw/ DSB onlyTraining-based\nw/ DSB + SSB
inconsistentsuccessinconsistentsuccessinconsistentsuccess
inconsistentsuccessfailsuccessinconsistentinconsistent
", + "image_path": "e87e6f8b3e137a3f2fcae475f15efcbe2b56a7113615b229b8cb075304866a5d.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 319, + 314, + 548, + 398 + ], + "blocks": [ + { + "bbox": [ + 313, + 268, + 553, + 300 + ], + "lines": [ + { + "bbox": [ + 313, + 268, + 553, + 300 + ], + "spans": [ + { + "bbox": [ + 313, + 268, + 553, + 300 + ], + "type": "text", + "content": "Figure 11. Qualitative ablation of training strategy on training-based Subject-Insertion task. DSB: Dual-Stream Blocks. SSB: Single-Stream Blocks." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 319, + 314, + 548, + 398 + ], + "lines": [ + { + "bbox": [ + 319, + 314, + 548, + 398 + ], + "spans": [ + { + "bbox": [ + 319, + 314, + 548, + 398 + ], + "type": "table", + "html": "
ModelGPU Memory ↓Add Params ↓
FLUX (bf16, base model)32933M-
CN, 1 cond35235M744M
IP, 1 cond35325M918M
CN + IP, 2 cond36753M1662M
Ours (training-free), 2 cond33323M29M
Ours (training-based), 2 cond33349M44M
", + "image_path": "a4f5c4fc9d8ce66afbdd123ab7fca8f063e5af00acb1254be7e07f83f23cd544.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 399, + 553, + 422 + ], + "lines": [ + { + "bbox": [ + 313, + 399, + 553, + 422 + ], + "spans": [ + { + "bbox": [ + 313, + 399, + 553, + 422 + ], + "type": "text", + "content": "Table 5. Comparison of inference GPU memory cost and additionally introduced parameters. CN: ControlNet. IP: IP-Adapter." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 436, + 555, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 436, + 555, + 532 + ], + "spans": [ + { + "bbox": [ + 313, + 436, + 555, + 532 + ], + "type": "text", + "content": "Different Options for Trainable LoRA. To evaluate whether the trainable LoRA module can be applied to the text branch instead of the denoising branch, we load a Text-LoRA in the text branch, with a configuration identical to that of the Denoising-LoRA. The Tab. 3 and Fig. 10 indicate that applying the trainable LoRA module to the denoising branch better modulates the feature aggregation operation across multiple conditional branches." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 533, + 555, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 533, + 555, + 652 + ], + "spans": [ + { + "bbox": [ + 313, + 533, + 555, + 652 + ], + "type": "text", + "content": "Training Strategy. As the parameter scale of the base model increases, the FLUX adaptations of ControlNet [53, 54] and IP-adapter [55] provided by the HuggingFace [16] community inject conditional features only into the dual-stream MMDiT blocks, rather than the entire network, to save memory. In contrast, since our Denoising-LoRA module introduces only a small number of parameters, we incorporate it into both the dual-stream and single-stream blocks to achieve better performance. The results in Tab. 4 and Fig. 11 confirm the validity of our choice." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 653, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 653, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 653, + 553, + 713 + ], + "type": "text", + "content": "Computational Cost. The overheads of our approach in terms of inference GPU memory cost and additionally introduced parameters are minimal. The comparison results against the FLUX ControlNet [53, 54] and FLUX IP-Adapter [55] are shown in Tab. 5." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 131 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 131 + ], + "type": "text", + "content": "More Conditional Branches. Our model places no restrictions on the number of supported conditions. The results shown in Fig. 12 demonstrate our model's strong scalability. As the number of conditional branches increases, the level of control becomes finer." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 58, + 144, + 293, + 273 + ], + "blocks": [ + { + "bbox": [ + 58, + 144, + 293, + 273 + ], + "lines": [ + { + "bbox": [ + 58, + 144, + 293, + 273 + ], + "spans": [ + { + "bbox": [ + 58, + 144, + 293, + 273 + ], + "type": "image", + "image_path": "a621a99e80843ff5996990493bd8940b3ce3197ae924350ffd10503c3b7c6b1c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 274, + 295, + 294 + ], + "lines": [ + { + "bbox": [ + 55, + 274, + 295, + 294 + ], + "spans": [ + { + "bbox": [ + 55, + 274, + 295, + 294 + ], + "type": "text", + "content": "Figure 12. From left to right are training-free multi-conditional combination tasks under: " + }, + { + "bbox": [ + 55, + 274, + 295, + 294 + ], + "type": "inline_equation", + "content": "1/2/3/4" + }, + { + "bbox": [ + 55, + 274, + 295, + 294 + ], + "type": "text", + "content": " conditions." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 314, + 296, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 314, + 296, + 387 + ], + "spans": [ + { + "bbox": [ + 55, + 314, + 296, + 387 + ], + "type": "text", + "content": "More Application Scenarios. Our UniCombine can be easily extended to new scenarios, such as reference-based image stylization. After training a new Condition-LoRA on StyleBooth [10] dataset, our UniCombine is able to integrate the style of the reference image with other conditions successfully, as demonstrated in Fig. 13." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 58, + 399, + 293, + 503 + ], + "blocks": [ + { + "bbox": [ + 58, + 399, + 293, + 503 + ], + "lines": [ + { + "bbox": [ + 58, + 399, + 293, + 503 + ], + "spans": [ + { + "bbox": [ + 58, + 399, + 293, + 503 + ], + "type": "image", + "image_path": "697b9a0d3b2d71d9ac73a42626a46546416d486f912cfdb2949db0c5a79882ad.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 72, + 506, + 277, + 517 + ], + "lines": [ + { + "bbox": [ + 72, + 506, + 277, + 517 + ], + "spans": [ + { + "bbox": [ + 72, + 506, + 277, + 517 + ], + "type": "text", + "content": "Figure 13. Training-free Spatial-Style combination task." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 548, + 128, + 560 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 548, + 128, + 560 + ], + "spans": [ + { + "bbox": [ + 55, + 548, + 128, + 560 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 570, + 296, + 713 + ], + "type": "text", + "content": "We present UniCombine, a DiT-based multi-conditional controllable generative framework capable of handling any combination of conditions, including but not limited to text prompts, spatial maps, and subject images. Extensive experiments on Subject-Insertion, Subject-Spatial, and Multi-Spatial conditional generative tasks demonstrate the state-of-the-art performance of our UniCombine in both training-free and training-based versions. Additionally, we propose the SubjectSpatial200K dataset to address the lack of a publicly available dataset for training and testing multi-conditional generative models. We believe our work can advance the development of the controllable generation field." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 315, + 72, + 373, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 72, + 373, + 83 + ], + "spans": [ + { + "bbox": [ + 315, + 72, + 373, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 315, + 91, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 319, + 91, + 555, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 91, + 555, + 112 + ], + "spans": [ + { + "bbox": [ + 319, + 91, + 555, + 112 + ], + "type": "text", + "content": "[1] G. Bradski. The OpenCV Library. Dr. Dobb's Journal of Software Tools, 2000. 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 319, + 114, + 554, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 114, + 554, + 169 + ], + "spans": [ + { + "bbox": [ + 319, + 114, + 554, + 169 + ], + "type": "text", + "content": "[2] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9650-9660, 2021. 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 319, + 171, + 553, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 171, + 553, + 203 + ], + "spans": [ + { + "bbox": [ + 319, + 171, + 553, + 203 + ], + "type": "text", + "content": "[3] Jiaxuan Chen, Bo Zhang, Qingdong He, Jinlong Peng, and Li Niu. Mureobjectstitch: Multi-reference image composition. arXiv preprint arXiv:2411.07462, 2024. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 319, + 205, + 553, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 205, + 553, + 258 + ], + "spans": [ + { + "bbox": [ + 319, + 205, + 553, + 258 + ], + "type": "text", + "content": "[4] Xi Chen, Lianghua Huang, Yu Liu, Yujun Shen, Deli Zhao, and Hengshuang Zhao. Anydoor: Zero-shot object-level image customization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6593-6602, 2024. 2, 5" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 319, + 261, + 553, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 261, + 553, + 316 + ], + "spans": [ + { + "bbox": [ + 319, + 261, + 553, + 316 + ], + "type": "text", + "content": "[5] Zheng Chong, Xiao Dong, Haoxiang Li, Shiyue Zhang, Wenqing Zhang, Xujie Zhang, Hanqing Zhao, Dongmei Jiang, and Xiaodan Liang. Catvton: Concatenation is all you need for virtual try-on with diffusion models. arXiv preprint arXiv:2407.15886, 2024. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 319, + 318, + 553, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 318, + 553, + 373 + ], + "spans": [ + { + "bbox": [ + 319, + 318, + 553, + 373 + ], + "type": "text", + "content": "[6] Guillaume Couairon, Marlene Careil, Matthieu Cord, Stephane Lathuiliere, and Jakob Verbeek. Zero-shot spatial layout conditioning for text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2174-2183, 2023. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 319, + 374, + 553, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 374, + 553, + 430 + ], + "spans": [ + { + "bbox": [ + 319, + 374, + 553, + 430 + ], + "type": "text", + "content": "[7] Patrick Esser, Sumith Kulal, Andreas Blattmann, Rahim Entezari, Jonas Müller, Harry Saini, Yam Levi, Dominik Lorenz, Axel Sauer, Frederic Boesel, et al. Scaling rectified flow transformers for high-resolution image synthesis, 2024. URL https://arxiv.org/abs/2403.03206, 2.2, 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 319, + 431, + 553, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 431, + 553, + 485 + ], + "spans": [ + { + "bbox": [ + 319, + 431, + 553, + 485 + ], + "type": "text", + "content": "[8] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 319, + 487, + 553, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 487, + 553, + 531 + ], + "spans": [ + { + "bbox": [ + 319, + 487, + 553, + 531 + ], + "type": "text", + "content": "[9] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks. Communications of the ACM, 63(11):139-144, 2020. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 533, + 553, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 533, + 553, + 575 + ], + "spans": [ + { + "bbox": [ + 315, + 533, + 553, + 575 + ], + "type": "text", + "content": "[10] Zhen Han, Chaojie Mao, Zeyinzi Jiang, Yulin Pan, and Jingfeng Zhang. Stylebooth: Image style editing with multimodal instruction. arXiv preprint arXiv:2404.12154, 2024.9" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 578, + 553, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 578, + 553, + 621 + ], + "spans": [ + { + "bbox": [ + 315, + 578, + 553, + 621 + ], + "type": "text", + "content": "[11] Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 624, + 553, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 624, + 553, + 677 + ], + "spans": [ + { + "bbox": [ + 315, + 624, + 553, + 677 + ], + "type": "text", + "content": "[12] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 680, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 680, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 315, + 680, + 553, + 713 + ], + "type": "text", + "content": "[13] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 2" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 127 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 127 + ], + "type": "text", + "content": "[14] Minghui Hu, Jianbin Zheng, Daqing Liu, Chuanxia Zheng, Chaoyue Wang, Dacheng Tao, and Tat-Jen Cham. Cocktail: Mixing multi-modality control for text-conditional image generation. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 129, + 294, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 129, + 294, + 194 + ], + "spans": [ + { + "bbox": [ + 56, + 129, + 294, + 194 + ], + "type": "text", + "content": "[15] Teng Hu, Ran Yi, Haokun Zhu, Liang Liu, Jinlong Peng, Yabiao Wang, Chengjie Wang, and Lizhuang Ma. Stroke-based neural painting and stylization with dynamically predicted painting region. In Proceedings of the 31st ACM International Conference on Multimedia, pages 7470-7480, 2023. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 197, + 294, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 197, + 294, + 228 + ], + "spans": [ + { + "bbox": [ + 56, + 197, + 294, + 228 + ], + "type": "text", + "content": "[16] HuggingFace. Diffusers: State-of-the-art diffusion models. https://github.com/huggingface/diffusers, 2023.8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 231, + 294, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 231, + 294, + 285 + ], + "spans": [ + { + "bbox": [ + 56, + 231, + 294, + 285 + ], + "type": "text", + "content": "[17] Boyuan Jiang, Xiaobin Hu, Donghao Luo, Qingdong He, Chengming Xu, Jinlong Peng, Jiangning Zhang, Chengjie Wang, Yunsheng Wu, and Yanwei Fu. Fitdit: Advancing the authentic garment details for high-fidelity virtual try-on. arXiv preprint arXiv:2411.10499, 2024. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 286, + 294, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 286, + 294, + 351 + ], + "spans": [ + { + "bbox": [ + 56, + 286, + 294, + 351 + ], + "type": "text", + "content": "[18] Ying Jin, Jinlong Peng, Qingdong He, Teng Hu, Hao Chen, Jiafu Wu, Wenbing Zhu, Mingmin Chi, Jun Liu, Yabiao Wang, et al. Dualanodiff: Dual-interrelated diffusion model for few-shot anomaly image generation. Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2025. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 354, + 294, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 354, + 294, + 397 + ], + "spans": [ + { + "bbox": [ + 56, + 354, + 294, + 397 + ], + "type": "text", + "content": "[19] Xuan Ju, Xian Liu, Xintao Wang, Yuxuan Bian, Ying Shan, and Qiang Xu. Brushnet: A plug-and-play image inpainting model with decomposed dual-branch diffusion. arXiv preprint arXiv:2403.06976, 2024. 2, 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 399, + 294, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 399, + 294, + 441 + ], + "spans": [ + { + "bbox": [ + 56, + 399, + 294, + 441 + ], + "type": "text", + "content": "[20] Chanran Kim, Jeongin Lee, Shichang Joung, Bongmo Kim, and Yeul-Min Baek. Instantfamily: Masked attention for zero-shot multi-id image generation. arXiv preprint arXiv:2404.19427, 2024. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 444, + 294, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 444, + 294, + 509 + ], + "spans": [ + { + "bbox": [ + 56, + 444, + 294, + 509 + ], + "type": "text", + "content": "[21] Lingjie Kong, Kai Wu, Xiaobin Hu, Wenhui Han, Jinlong Peng, Chengming Xu, Donghao Luo, Jiangning Zhang, Chengjie Wang, and Yanwei Fu. Anymaker: Zero-shot general object customization via decoupled dual-level id injection. Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2025. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 511, + 294, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 511, + 294, + 565 + ], + "spans": [ + { + "bbox": [ + 56, + 511, + 294, + 565 + ], + "type": "text", + "content": "[22] Nupur Kumari, Bingliang Zhang, Richard Zhang, Eli Shechtman, and Jun-Yan Zhu. Multi-concept customization of text-to-image diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1931-1941, 2023. 2, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 567, + 294, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 567, + 294, + 588 + ], + "spans": [ + { + "bbox": [ + 56, + 567, + 294, + 588 + ], + "type": "text", + "content": "[23] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2023. 2, 3, 4, 5, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 590, + 294, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 590, + 294, + 634 + ], + "spans": [ + { + "bbox": [ + 56, + 590, + 294, + 634 + ], + "type": "text", + "content": "[24] Dongxu Li, Junnan Li, and Steven Hoi. Blip-diffusion: Pretrained subject representation for controllable text-to-image generation and editing. Advances in Neural Information Processing Systems, 36:30146-30166, 2023. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 635, + 294, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 635, + 294, + 689 + ], + "spans": [ + { + "bbox": [ + 56, + 635, + 294, + 689 + ], + "type": "text", + "content": "[25] Pengzhi Li, Qiang Nie, Ying Chen, Xi Jiang, Kai Wu, Yuhuan Lin, Yong Liu, Jinlong Peng, Chengjie Wang, and Feng Zheng. Tuning-free image customization with image and text guidance. In European Conference on Computer Vision, pages 233-250. Springer, 2024. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 692, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 692, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 692, + 294, + 713 + ], + "type": "text", + "content": "[26] Yuheng Li, Haotian Liu, Qingyang Wu, Fangzhou Mu, Jianwei Yang, Jianfeng Gao, Chunyuan Li, and Yong Jae Lee." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 106 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 106 + ], + "type": "text", + "content": "Gligen: Open-set grounded text-to-image generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22511-22521, 2023. 3, 5" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 108, + 553, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 108, + 553, + 161 + ], + "spans": [ + { + "bbox": [ + 316, + 108, + 553, + 161 + ], + "type": "text", + "content": "[27] Kuan Heng Lin, Sicheng Mo, Ben Klingher, Fangzhou Mu, and Bolei Zhou. Ctrl-x: Controlling structure and appearance for text-to-image generation without guidance. Advances in Neural Information Processing Systems, 37: 128911-128939, 2025. 2, 3, 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 163, + 553, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 163, + 553, + 196 + ], + "spans": [ + { + "bbox": [ + 316, + 163, + 553, + 196 + ], + "type": "text", + "content": "[28] Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le. Flow matching for generative modeling. arXiv preprint arXiv:2210.02747, 2022. 2, 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 198, + 553, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 198, + 553, + 230 + ], + "spans": [ + { + "bbox": [ + 316, + 198, + 553, + 230 + ], + "type": "text", + "content": "[29] Xingchao Liu, Chengyue Gong, and Qiang Liu. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022. 2, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 232, + 555, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 232, + 555, + 285 + ], + "spans": [ + { + "bbox": [ + 316, + 232, + 555, + 285 + ], + "type": "text", + "content": "[30] Ron Mokady, Amir Hertz, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Null-text inversion for editing real images using guided diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6038–6047, 2023. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 288, + 553, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 288, + 553, + 342 + ], + "spans": [ + { + "bbox": [ + 316, + 288, + 553, + 342 + ], + "type": "text", + "content": "[31] Chong Mou, Xintao Wang, Liangbin Xie, Yanze Wu, Jian Zhang, Zhongang Qi, and Ying Shan. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 4296-4304, 2024. 2, 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 344, + 553, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 344, + 553, + 387 + ], + "spans": [ + { + "bbox": [ + 316, + 344, + 553, + 387 + ], + "type": "text", + "content": "[32] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 389, + 553, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 389, + 553, + 433 + ], + "spans": [ + { + "bbox": [ + 316, + 389, + 553, + 433 + ], + "type": "text", + "content": "[33] Jinlong Peng, Zekun Luo, Liang Liu, and Boshen Zhang. Frih: fine-grained region-aware image harmonization. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 4478-4486, 2024. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 434, + 553, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 434, + 553, + 488 + ], + "spans": [ + { + "bbox": [ + 316, + 434, + 553, + 488 + ], + "type": "text", + "content": "[34] Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, and Robin Rombach. Sdxl: Improving latent diffusion models for high-resolution image synthesis. arXiv preprint arXiv:2307.01952, 2023. 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 491, + 553, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 491, + 553, + 544 + ], + "spans": [ + { + "bbox": [ + 316, + 491, + 553, + 544 + ], + "type": "text", + "content": "[35] Can Qin, Shu Zhang, Ning Yu, Yihao Feng, Xinyi Yang, Yingbo Zhou, Huan Wang, Juan Carlos Niebles, Caiming Xiong, Silvio Savarese, et al. Unicontrol: A unified diffusion model for controllable visual generation in the wild. arXiv preprint arXiv:2305.11147, 2023. 2, 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 546, + 553, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 546, + 553, + 611 + ], + "spans": [ + { + "bbox": [ + 316, + 546, + 553, + 611 + ], + "type": "text", + "content": "[36] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 613, + 555, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 613, + 555, + 668 + ], + "spans": [ + { + "bbox": [ + 316, + 613, + 555, + 668 + ], + "type": "text", + "content": "[37] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 2, 4" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 670, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 670, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 670, + 553, + 713 + ], + "type": "text", + "content": "[38] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international conference," + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 733, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 733, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 733, + 311, + 742 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 77, + 72, + 296, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 72, + 296, + 95 + ], + "spans": [ + { + "bbox": [ + 77, + 72, + 296, + 95 + ], + "type": "text", + "content": "Munich, Germany, October 5-9, 2015, proceedings, part III 18, pages 234-241. Springer, 2015. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 96, + 296, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 96, + 296, + 140 + ], + "spans": [ + { + "bbox": [ + 56, + 96, + 296, + 140 + ], + "type": "text", + "content": "[39] Litu Rout, Yujia Chen, Nataniel Ruiz, Constantine Caramanis, Sanjay Shakkottai, and Wen-Sheng Chu. Semantic image inversion and editing using rectified stochastic differential equations. arXiv preprint arXiv:2410.10792, 2024. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 141, + 296, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 141, + 296, + 206 + ], + "spans": [ + { + "bbox": [ + 56, + 141, + 296, + 206 + ], + "type": "text", + "content": "[40] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22500-22510, 2023. 2, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 208, + 295, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 208, + 295, + 262 + ], + "spans": [ + { + "bbox": [ + 56, + 208, + 295, + 262 + ], + "type": "text", + "content": "[41] Kihyuk Sohn, Nataniel Ruiz, Kimin Lee, Daniel Castro Chin, Irina Blok, Huiwen Chang, Jarred Barber, Lu Jiang, Glenn Entis, Yuanzhen Li, et al. Styledrop: Text-to-image generation in any style. arXiv preprint arXiv:2306.00983, 2023. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 263, + 295, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 263, + 295, + 297 + ], + "spans": [ + { + "bbox": [ + 56, + 263, + 295, + 297 + ], + "type": "text", + "content": "[42] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502, 2020. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 298, + 295, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 298, + 295, + 342 + ], + "spans": [ + { + "bbox": [ + 56, + 298, + 295, + 342 + ], + "type": "text", + "content": "[43] Yizhi Song, Zhifei Zhang, Zhe Lin, Scott Cohen, Brian Price, Jianming Zhang, Soo Ye Kim, and Daniel Aliaga. Objectstitch: Generative object compositing. arXiv preprint arXiv:2212.00932, 2022. 2, 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 343, + 295, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 343, + 295, + 386 + ], + "spans": [ + { + "bbox": [ + 56, + 343, + 295, + 386 + ], + "type": "text", + "content": "[44] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 388, + 295, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 388, + 295, + 432 + ], + "spans": [ + { + "bbox": [ + 56, + 388, + 295, + 432 + ], + "type": "text", + "content": "[45] Zhenxiong Tan, Songhua Liu, Xingyi Yang, Qiaochu Xue, and Xinchao Wang. *Omnicontrol: Minimal and universal control for diffusion transformer.* arXiv preprint arXiv:2411.15098, 3, 2024. 2, 3, 4, 5, 12" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 434, + 295, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 434, + 295, + 488 + ], + "spans": [ + { + "bbox": [ + 56, + 434, + 295, + 488 + ], + "type": "text", + "content": "[46] Haoxuan Wang, Qingdong He, Jinlong Peng, Hao Yang, Mingmin Chi, and Yabiao Wang. Mamba-yolo-world: Marrying yolo-world with mamba for open-vocabulary detection. IEEE International Conference on Acoustics, Speech, and Signal Processing, 2025. 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 490, + 295, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 490, + 295, + 533 + ], + "spans": [ + { + "bbox": [ + 56, + 490, + 295, + 533 + ], + "type": "text", + "content": "[47] Qixun Wang, Xu Bai, Haofan Wang, Zekui Qin, Anthony Chen, Huaxia Li, Xu Tang, and Yao Hu. Instantid: Zero-shot identity-preserving generation in seconds. arXiv preprint arXiv:2401.07519, 2024. 2, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 535, + 295, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 535, + 295, + 610 + ], + "spans": [ + { + "bbox": [ + 56, + 535, + 295, + 610 + ], + "type": "text", + "content": "[48] Su Wang, Chitwan Sahara, Ceslee Montgomery, Jordi Pont-Tuset, Shai Noy, Stefano Pellegrini, Yasumasa Onoe, Sarah Laszlo, David J Fleet, Radu Soricut, et al. Imagen editor and editbench: Advancing and evaluating text-guided image inpainting. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 18359-18369, 2023. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 612, + 295, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 612, + 295, + 645 + ], + "spans": [ + { + "bbox": [ + 56, + 612, + 295, + 645 + ], + "type": "text", + "content": "[49] Xudong Wang, Trevor Darrell, Sai Saketh Rambhatla, Rohit Girdhar, and Ishan Misra. Instancediffusion: Instance-level control for image generation, 2024. 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 647, + 295, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 647, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 56, + 647, + 295, + 689 + ], + "type": "text", + "content": "[50] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 691, + 295, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 691, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 691, + 295, + 713 + ], + "type": "text", + "content": "[51] Daniel Winter, Asaf Shul, Matan Cohen, Dana Berman, Yael Pritch, Alex Rav-Acha, and Yedid Hoshen. Objectmate: A" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 533 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "text", + "content": "recurrence prior for object insertion and subject-driven generation. arXiv preprint arXiv:2412.08645, 2024. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 96, + 553, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 96, + 553, + 140 + ], + "spans": [ + { + "bbox": [ + 316, + 96, + 553, + 140 + ], + "type": "text", + "content": "[52] Peng Xing, Haofan Wang, Yanpeng Sun, Qixun Wang, Xu Bai, Hao Ai, Renyuan Huang, and Zechao Li. Csgo: Content-style composition in text-to-image generation. arXiv preprint arXiv:2408.16766, 2024. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 141, + 553, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 141, + 553, + 174 + ], + "spans": [ + { + "bbox": [ + 316, + 141, + 553, + 174 + ], + "type": "text", + "content": "[53] XLabs-AI. Flux-controlnet-canny-diffusers. https://huggingface.co/XLabs-AI/flux-controlnet-canny-diffusers,2024.7,8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 175, + 553, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 175, + 553, + 207 + ], + "spans": [ + { + "bbox": [ + 316, + 175, + 553, + 207 + ], + "type": "text", + "content": "[54] XLabs-AI. Flux-controlnet-depth-diffusers. https://huggingface.co/XLabs-AI/flux-controlnet-depth-diffusers,2024.7,8" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 209, + 552, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 209, + 552, + 230 + ], + "spans": [ + { + "bbox": [ + 316, + 209, + 552, + 230 + ], + "type": "text", + "content": "[55] XLabs-AI. Flux-ip-adapter. https://huggingface.co/XLabs-AI/flux-ip-adapter, 2024.7,8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 232, + 553, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 232, + 553, + 297 + ], + "spans": [ + { + "bbox": [ + 316, + 232, + 553, + 297 + ], + "type": "text", + "content": "[56] Binxin Yang, Shuyang Gu, Bo Zhang, Ting Zhang, Xuejin Chen, Xiaoyan Sun, Dong Chen, and Fang Wen. Paint by example: Exemplar-based image editing with diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18381-18391, 2023. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 298, + 553, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 298, + 553, + 331 + ], + "spans": [ + { + "bbox": [ + 316, + 298, + 553, + 331 + ], + "type": "text", + "content": "[57] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In CVPR, 2024. 5" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 333, + 553, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 333, + 553, + 376 + ], + "spans": [ + { + "bbox": [ + 316, + 333, + 553, + 376 + ], + "type": "text", + "content": "[58] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. arXiv preprint arXiv:2308.06721, 2023. 2, 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 377, + 553, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 377, + 553, + 421 + ], + "spans": [ + { + "bbox": [ + 316, + 377, + 553, + 421 + ], + "type": "text", + "content": "[59] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836-3847, 2023. 2, 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 422, + 553, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 422, + 553, + 476 + ], + "spans": [ + { + "bbox": [ + 316, + 422, + 553, + 476 + ], + "type": "text", + "content": "[60] Shihao Zhao, Dongdong Chen, Yen-Chun Chen, Jianmin Bao, Shaozhe Hao, Lu Yuan, and Kwan-Yee K Wong. Uni-controlnet: All-in-one control to text-to-image diffusion models. Advances in Neural Information Processing Systems, 36, 2024. 2, 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 478, + 553, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 478, + 553, + 533 + ], + "spans": [ + { + "bbox": [ + 316, + 478, + 553, + 533 + ], + "type": "text", + "content": "[61] Junhao Zhuang, Yanhong Zeng, Wenran Liu, Chun Yuan, and Kai Chen. A task is worth one word: Learning with task prompts for high-quality versatile image inpainting. In European Conference on Computer Vision, pages 195-211. Springer, 2025. 2, 5" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 138, + 68, + 473, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 68, + 473, + 102 + ], + "spans": [ + { + "bbox": [ + 138, + 68, + 473, + 102 + ], + "type": "text", + "content": "UniCombine: Unified Multi-Conditional Combination with Diffusion Transformer" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "spans": [ + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "type": "text", + "content": "Supplementary Material" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 141, + 225, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 141, + 225, + 155 + ], + "spans": [ + { + "bbox": [ + 55, + 141, + 225, + 155 + ], + "type": "text", + "content": "A1. Dataset Partitioning Scheme" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 161, + 296, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 161, + 296, + 220 + ], + "spans": [ + { + "bbox": [ + 55, + 161, + 296, + 220 + ], + "type": "text", + "content": "In our proposed SubjectSpatial200K dataset, we utilize the ChatGPT-4o assessment scores provided by Subjects200K [45] on Subject Consistency, Composition Structure, and Image Quality to guide the dataset partitioning in our experiments." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 221, + 295, + 293 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 55, + 221, + 295, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 221, + 295, + 245 + ], + "spans": [ + { + "bbox": [ + 55, + 221, + 295, + 245 + ], + "type": "text", + "content": "- Subject Consistency: Ensuring the identity of the subject image is consistent with that of the ground truth image." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 246, + 294, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 246, + 294, + 269 + ], + "spans": [ + { + "bbox": [ + 56, + 246, + 294, + 269 + ], + "type": "text", + "content": "- Composition Structure: Verifying a reasonable composition of the subject and ground truth images." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 270, + 294, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 270, + 294, + 293 + ], + "spans": [ + { + "bbox": [ + 56, + 270, + 294, + 293 + ], + "type": "text", + "content": "- Image Quality: Confirming each image pair maintains high resolution and visual fidelity." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 293, + 295, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 293, + 295, + 317 + ], + "spans": [ + { + "bbox": [ + 55, + 293, + 295, + 317 + ], + "type": "text", + "content": "We partition the dataset into 139,403 training samples and 5,827 testing samples through Algorithm 1." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 57, + 320, + 294, + 466 + ], + "blocks": [ + { + "bbox": [ + 57, + 320, + 294, + 466 + ], + "lines": [ + { + "bbox": [ + 57, + 320, + 294, + 466 + ], + "spans": [ + { + "bbox": [ + 57, + 320, + 294, + 466 + ], + "type": "table", + "html": "
Algorithm 1: Dataset Partitioning Scheme
Input: example
Output: train or test
cs← example["Composite Structure"]
iq← example["Image Quality"]
sc← example["Subject Consistency"]
scores← [cs, iq, sc]
if all(s==5 for s in scores) then
return train;
else if cs≥3 and iq==5 and sc==5 then
return test;
", + "image_path": "081e7579449455ce9a0a6c20f089187297d01d022d3027a0bfa4afb2c5a31aa9.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 483, + 276, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 483, + 276, + 495 + ], + "spans": [ + { + "bbox": [ + 55, + 483, + 276, + 495 + ], + "type": "text", + "content": "A2. More Ablation on CMMDiT Attention" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 502, + 296, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 502, + 296, + 574 + ], + "spans": [ + { + "bbox": [ + 55, + 502, + 296, + 574 + ], + "type": "text", + "content": "More quantitative and qualitative ablation results on the other multi-conditional generative tasks are provided here. The comprehensive ablation results in Tab. A1, Tab. A2, Tab. A3, Fig. A1, Fig. A2, and Fig. A3 demonstrate that the UniCombine performs better with our proposed CMMDiT Attention." + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 61, + 582, + 288, + 628 + ], + "blocks": [ + { + "bbox": [ + 61, + 582, + 288, + 628 + ], + "lines": [ + { + "bbox": [ + 61, + 582, + 288, + 628 + ], + "spans": [ + { + "bbox": [ + 61, + 582, + 288, + 628 + ], + "type": "table", + "html": "
MethodCLIP-I ↑DINO ↑CLIP-T ↑F1 ↑
Ours w/o CMMDiT91.5186.3133.200.16
Ours w/ CMMDiT91.8486.8833.210.17
", + "image_path": "7780b4ae3fc30805750b62e7bb1b1c2e1322a555efcac56b2f8f7632150cf53d.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 55, + 629, + 294, + 651 + ], + "lines": [ + { + "bbox": [ + 55, + 629, + 294, + 651 + ], + "spans": [ + { + "bbox": [ + 55, + 629, + 294, + 651 + ], + "type": "text", + "content": "Table A1. Quantitative ablation of CMMDiT Attention mechanism on training-free Subject-Canny task" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 669, + 208, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 669, + 208, + 682 + ], + "spans": [ + { + "bbox": [ + 55, + 669, + 208, + 682 + ], + "type": "text", + "content": "A3. More Qualitative Results" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "content": "More qualitative results are presented in Fig. A4 and Fig. A5." + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 316, + 144, + 553, + 255 + ], + "blocks": [ + { + "bbox": [ + 316, + 144, + 553, + 255 + ], + "lines": [ + { + "bbox": [ + 316, + 144, + 553, + 255 + ], + "spans": [ + { + "bbox": [ + 316, + 144, + 553, + 255 + ], + "type": "image", + "image_path": "c8c9030826d874847297d6aad99732ff7992fa1efed8eaf2d42146029239dcfe.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 314, + 255, + 553, + 277 + ], + "lines": [ + { + "bbox": [ + 314, + 255, + 553, + 277 + ], + "spans": [ + { + "bbox": [ + 314, + 255, + 553, + 277 + ], + "type": "text", + "content": "Figure A1. Qualitative ablation of CMMDiT Attention mechanism on training-free Subject-Canny task" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "table", + "bbox": [ + 316, + 287, + 551, + 334 + ], + "blocks": [ + { + "bbox": [ + 316, + 287, + 551, + 334 + ], + "lines": [ + { + "bbox": [ + 316, + 287, + 551, + 334 + ], + "spans": [ + { + "bbox": [ + 316, + 287, + 551, + 334 + ], + "type": "table", + "html": "
MethodCLIP-I ↑DINO ↑CLIP-T ↑MSE ↓
Ours w/o CMMDiT90.8385.3833.38547.63
Ours w/ CMMDiT91.1585.7333.41507.40
", + "image_path": "719ea07bb1cd1e89b41905beec203585fbe544e5a1ee5e9ab3f601d287ad2795.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "table_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 316, + 363, + 553, + 472 + ], + "blocks": [ + { + "bbox": [ + 314, + 335, + 553, + 357 + ], + "lines": [ + { + "bbox": [ + 314, + 335, + 553, + 357 + ], + "spans": [ + { + "bbox": [ + 314, + 335, + 553, + 357 + ], + "type": "text", + "content": "Table A2. Quantitative ablation of CMMDiT Attention mechanism on training-free Subject-Depth task" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 316, + 363, + 553, + 472 + ], + "lines": [ + { + "bbox": [ + 316, + 363, + 553, + 472 + ], + "spans": [ + { + "bbox": [ + 316, + 363, + 553, + 472 + ], + "type": "image", + "image_path": "eec53f0d14c8df15f39c097595e64b40514c9c785b2e319a4f3ff0334abfd869.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 474, + 553, + 496 + ], + "lines": [ + { + "bbox": [ + 313, + 474, + 553, + 496 + ], + "spans": [ + { + "bbox": [ + 313, + 474, + 553, + 496 + ], + "type": "text", + "content": "Figure A2. Qualitative ablation of CMMDiT Attention mechanism on training-free Subject-Depth task" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "table", + "bbox": [ + 342, + 506, + 525, + 553 + ], + "blocks": [ + { + "bbox": [ + 342, + 506, + 525, + 553 + ], + "lines": [ + { + "bbox": [ + 342, + 506, + 525, + 553 + ], + "spans": [ + { + "bbox": [ + 342, + 506, + 525, + 553 + ], + "type": "table", + "html": "
MethodCLIP-T ↑F1 ↑MSE ↓
Ours w/o CMMDiT33.700.17524.04
Ours w/ CMMDiT33.700.18519.53
", + "image_path": "825bdf58c053f99cc780532dc82a60487da430bb38579c85ad77eb49436315d5.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "table_body" + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 554, + 553, + 576 + ], + "lines": [ + { + "bbox": [ + 313, + 554, + 553, + 576 + ], + "spans": [ + { + "bbox": [ + 313, + 554, + 553, + 576 + ], + "type": "text", + "content": "Table A3. Quantitative ablation of CMMDiT Attention mechanism on training-free Multi-Spatial task" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 316, + 583, + 553, + 693 + ], + "blocks": [ + { + "bbox": [ + 316, + 583, + 553, + 693 + ], + "lines": [ + { + "bbox": [ + 316, + 583, + 553, + 693 + ], + "spans": [ + { + "bbox": [ + 316, + 583, + 553, + 693 + ], + "type": "image", + "image_path": "4009d7abc995632fbc8e9c0c98b070e0d250aff3285f29dd8f899b9c93e1e20e.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 694, + 553, + 715 + ], + "lines": [ + { + "bbox": [ + 313, + 694, + 553, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 694, + 553, + 715 + ], + "type": "text", + "content": "Figure A3. Qualitative ablation of CMMDiT Attention mechanism on training-free Multi-Spatial task" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 77, + 291, + 680 + ], + "blocks": [ + { + "bbox": [ + 57, + 77, + 291, + 680 + ], + "lines": [ + { + "bbox": [ + 57, + 77, + 291, + 680 + ], + "spans": [ + { + "bbox": [ + 57, + 77, + 291, + 680 + ], + "type": "image", + "image_path": "adfac4a9eebfdf05d1126a0a351b1aaafa113e3c17e6ad1d0d3ccee8c001ec91.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 159, + 691, + 451, + 703 + ], + "lines": [ + { + "bbox": [ + 159, + 691, + 451, + 703 + ], + "spans": [ + { + "bbox": [ + 159, + 691, + 451, + 703 + ], + "type": "text", + "content": "Figure A4. More qualitative results on Multi-Spatial and Subject-Insertion tasks." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 319, + 77, + 553, + 680 + ], + "blocks": [ + { + "bbox": [ + 319, + 77, + 553, + 680 + ], + "lines": [ + { + "bbox": [ + 319, + 77, + 553, + 680 + ], + "spans": [ + { + "bbox": [ + 319, + 77, + 553, + 680 + ], + "type": "image", + "image_path": "721139a8e4ed891aeab8a9eecb8d33d046e4e4574b78dabb5cd359d3ae06e6b6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 77, + 291, + 681 + ], + "blocks": [ + { + "bbox": [ + 55, + 77, + 291, + 681 + ], + "lines": [ + { + "bbox": [ + 55, + 77, + 291, + 681 + ], + "spans": [ + { + "bbox": [ + 55, + 77, + 291, + 681 + ], + "type": "image", + "image_path": "179cb411321ca8ecf6e101c92a53a0ef8cbc9728a7f08f18d3c37f263493e513.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 160, + 691, + 448, + 703 + ], + "lines": [ + { + "bbox": [ + 160, + 691, + 448, + 703 + ], + "spans": [ + { + "bbox": [ + 160, + 691, + 448, + 703 + ], + "type": "text", + "content": "Figure A5. More qualitative results on Subject-Depth and Subject-Canny tasks." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 319, + 77, + 553, + 681 + ], + "blocks": [ + { + "bbox": [ + 319, + 77, + 553, + 681 + ], + "lines": [ + { + "bbox": [ + 319, + 77, + 553, + 681 + ], + "spans": [ + { + "bbox": [ + 319, + 77, + 553, + 681 + ], + "type": "image", + "image_path": "6cc95ee7d4dc5bf05bbb25255f5081767689dccf861e6597d6fd8d83a62792f9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09501/7c196e4e-1362-4974-a470-65c83d863927_content_list.json b/data/2025/2503_09xxx/2503.09501/7c196e4e-1362-4974-a470-65c83d863927_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..9d74bc60b10a87f4315148a9baecb66eca334464 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/7c196e4e-1362-4974-a470-65c83d863927_content_list.json @@ -0,0 +1,6351 @@ +[ + { + "type": "text", + "text": "ReMA: Learning to Meta-think for LLMs with Multi-agent Reinforcement Learning", + "text_level": 1, + "bbox": [ + 214, + 122, + 782, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ziyu Wan $^{1,2*}$ , Yunxiang Li $^{3*}$ , Xiaoyu Wen $^{1,2}$ , Yan Song $^{4}$ , Hanjing Wang $^{1}$ , Linyi Yang $^{4}$ , Mark Schmidt $^{3}$ , Jun Wang $^{4}$ , Weinan Zhang $^{1}$ , Shuyue Hu $^{2\\ddagger}$ , Ying Wen $^{1\\ddagger}$", + "bbox": [ + 200, + 223, + 795, + 256 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 Shanghai Jiao Tong University", + "$^{2}$ Shanghai Artificial Intelligence Laboratory", + "3 University of British Columbia", + "4 University College London" + ], + "bbox": [ + 351, + 268, + 643, + 328 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 363, + 537, + 378 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent research on Reasoning of Large Language Models (LLMs) has sought to further enhance their performance by integrating meta-thinking—enabling models to monitor, evaluate, and control their reasoning processes for more adaptive and effective problem-solving. However, current single-agent work lacks a specialized design for acquiring meta-thinking, resulting in low efficacy. To address this challenge, we introduce Reinforced Meta-thinking Agents (ReMA), a novel framework that leverages Multi-Agent Reinforcement Learning (MARL) to elicit meta-thinking behaviors, encouraging LLMs to think about thinking. ReMA decouples the reasoning process into two hierarchical agents: a high-level meta-thinking agent responsible for generating strategic oversight and plans, and a low-level reasoning agent for detailed executions. Through iterative reinforcement learning with aligned objectives, these agents explore and learn collaboration, leading to improved generalization and robustness. Empirical results from single-turn experiments demonstrate that ReMA outperforms single-agent RL baselines on complex reasoning tasks, including competitive-level mathematical benchmarks and LLM-as-a-Judge benchmarks. Additionally, we further extend ReMA to multi-turn interaction settings, leveraging turn-level ratio and parameter sharing to improve efficiency. Comprehensive ablation studies further illustrate the evolving dynamics of each distinct agent, providing valuable insights into how the meta-thinking reasoning process enhances the reasoning capabilities of LLMs. Our code can be found in https://github.com/ziyuwan/ReMA-public", + "bbox": [ + 228, + 393, + 766, + 684 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 709, + 313, + 724 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large language models (LLMs) have demonstrated remarkable capabilities in knowledge understanding and complex reasoning tasks [Chowdhery et al., 2023, Achiam et al., 2023, Anil et al., 2023, Dubey et al., 2024]. The paradigm in developing LLM-based reasoning models is shifting from scaling training-time computation towards scaling test-time computation [Snell et al., 2024]. Recent advancements, such as OpenAI-o1 [OpenAI, 2024], Deepseek R1 [DeepSeek-AI et al., 2025], and Gemini 2.0 Flash Thinking [DeepMind, 2025], have demonstrated that allowing LLMs to think before generating answers can significantly enhance performance and lead to the emergence of human-like reasoning patterns. These patterns like \"Wait, hold on.\" or \"Let's break this down.\"", + "bbox": [ + 169, + 739, + 826, + 852 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.09501v3 [cs.AI] 27 May 2025", + "bbox": [ + 22, + 263, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution.", + "bbox": [ + 189, + 859, + 315, + 873 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$^{\\dagger}$ Work done during internship at Shanghai Artificial Intelligence Laboratory", + "bbox": [ + 192, + 873, + 640, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Corresponding Author", + "bbox": [ + 192, + 887, + 334, + 901 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Preprint. Under review.", + "bbox": [ + 171, + 922, + 313, + 936 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Question: $T = 9.5$ . If $\\log_2 x^T - \\log_4 x = \\log_8 x^k$ is an identity for all $x > 0$ , compute the value of $k$ .", + "bbox": [ + 305, + 93, + 691, + 106 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/fd5b40d644792636991a20db79d8c4202f072f2e023bbd6106c186898b906286.jpg", + "image_caption": [ + "Figure 1: Left: A construction-based method that fine-tunes LLMs using rejection sampling, searching among combinations of pre-defined templates. Middle: R1-like method learns to mix meta-thinking and detailed reasoning steps during training. Right: Our method ReMA separates the meta-thinking and reasoning steps in a multi-agent system and updated by reinforcement learning." + ], + "image_footnote": [], + "bbox": [ + 178, + 108, + 823, + 296 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "indicate that LLMs can develop a form of meta-thinking abilities that can generalize well to out-of-distribution (OOD) tasks [Xiang et al., 2025]. Meta-thinking, also known as metacognitive skills [Flavell, 1979], is an ability traditionally considered uniquely human [Didolkar et al., 2024].", + "bbox": [ + 169, + 380, + 823, + 422 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To cultivate meta-thinking patterns from LLMs themselves, recent construction-based supervised approaches leverage supervised finetuning on structured reasoning trajectories. Specifically, these methods sampling reasoning trajectories from predefined meta-thinking templates and then use supervised finetuning (SFT) or direct preference optimization (DPO) [Rafailov et al., 2023] to teach LLMs imitate these patterns [Qi et al., 2024, Yue et al., Xi et al., 2024, Yang et al., 2025, Muenighoff et al., 2025, Ye et al., 2025c]. However, such methods lack sufficient flexibility for LLMs to explore suitable meta-thinking patterns. Thus, they often fail to generalize to out-of-distribution (OOD) problems, leading to unstable performance on unseen data [Kirk et al., Chu et al., 2025]. Besides construction-based methods, R1-like single-agent reinforcement learning (SARL) has also been adopted for meta-thinking in reasoning [DeepSeek-AI et al., 2025, Xie et al., 2025]. However, these SARL attempts typically rely on strong foundational models for easier exploration or extensive task-specific fine-tuning for stable training [Xu et al., 2025, Gandhi et al., 2025]. Furthermore, SARL needs to learn meta-thinking and reasoning within a single forward pass, seeking to capture complex reasoning structures purely in an autoregressive manner [Xie et al., 2025]. This can potentially lead to issues such as inefficient exploration as well as reduced readability and early convergence to local optima [DeepSeek-AI et al., 2025, Xiang et al., 2025].", + "bbox": [ + 169, + 429, + 826, + 650 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address these limitations, we introduce Reinforced Meta-thinking Agents (ReMA), a novel framework that leverages multi-agent reinforcement learning (MARL) to encourage LLMs to think about thinking. Our approach employs a multi-agent system (MAS) composed of a high-level meta-thinking agent, responsible for strategic oversight and instruction generation, and a low-level reasoning agent tasked with detailed executing processes based on provided guidance. We compare the inference process among the construction-based method, R1-like method, and ReMA in Fig. 1. Since MAS distributes the exploration space of SARL into multiple agents, it enables each agent to explore more structurally and efficiently during training. Then we apply reinforcement learning on each agent with aligned reward functions. In this way, ReMA effectively balances the trade-off between generalization capability and exploration efficiency. As a result, they can learn to play the best of their role (either to meta-think or to follow instructions), at the present of the other agent.", + "bbox": [ + 169, + 656, + 826, + 808 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To our knowledge, we are the first to formally define and optimize a multi-agent meta-thinking reasoning process (MAMRP) through multi-agent reinforcement learning. Our extensive experiments span both math reasoning and LLM-as-a-Judge tasks, where ReMA consistently achieves the highest average performance across three backbone pretrained models. We further extend ReMA to multi-turn interaction settings on math reasoning tasks, implementing turn-level ratio to optimize trajectory returns and stabilize training. Through comprehensive ablation studies, we illustrate the evolving dynamics between agents, revealing unexpected interaction patterns such as role reversals", + "bbox": [ + 169, + 814, + 823, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "under different reward settings. These findings provide valuable insights into how meta-thinking processes enhance the reasoning capabilities of LLMs.", + "bbox": [ + 169, + 90, + 823, + 122 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Preliminaries", + "text_level": 1, + "bbox": [ + 171, + 140, + 320, + 157 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we outline the formulation of the vanilla reasoning process (Sec. 2.1) and the representative training methods (Sec. 2.2) along with the notation used throughout the paper.", + "bbox": [ + 169, + 172, + 823, + 202 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Vanilla Reasoning Process (VRP)", + "text_level": 1, + "bbox": [ + 171, + 218, + 444, + 234 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The probability of generating a response $\\mathbf{y}$ equals the product of its stepwise probabilities. Given a model $\\pi_{\\theta}$ and a prompt $\\mathbf{x} = (x_1, \\ldots, x_N)$ , the vanilla reasoning process (VRP) autoregressively produces a response $\\mathbf{y} = (y_1, \\ldots, y_L)$ with", + "bbox": [ + 169, + 244, + 823, + 289 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\pi_ {\\theta} (\\mathbf {y} | \\mathbf {x}) = \\prod_ {l = 1} ^ {L} \\pi_ {\\theta} (y _ {l} | x _ {1}, x _ {2}, \\dots x _ {N}, y _ {1}, \\dots , y _ {l - 1}) = \\prod_ {l = 1} ^ {L} \\pi_ {\\theta} (\\mathbf {y} _ {l} | \\mathbf {x}, \\mathbf {y} _ {< l})\n$$\n", + "text_format": "latex", + "bbox": [ + 263, + 296, + 730, + 339 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The response usually contains intermediate reasoning steps before arriving at the final answer, this process is also known as chain-of-thought (CoT) [Wei et al., 2022], which can be represented as:", + "bbox": [ + 169, + 345, + 823, + 377 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} \\xrightarrow {\\text {r e a s o n i n g s t e p s}} \\mathbf {y} \\sim \\mathbf {a}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 421, + 385, + 823, + 406 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathbf{a}$ is the extracted final answer, which is included in the answer $\\mathbf{y}$ .", + "bbox": [ + 171, + 412, + 645, + 429 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 Training VRP via Reinforcement Learning", + "text_level": 1, + "bbox": [ + 171, + 445, + 511, + 462 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "RL frames VRP decoding process as a deterministic, token-level Markov Decision process (MDP) [Wang et al., 2024a]. Its objective is", + "bbox": [ + 169, + 470, + 823, + 501 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {J} (\\theta) = \\mathbb {E} _ {(\\mathbf {x}, \\mathbf {y} ^ {*}) \\sim \\mathcal {D}, \\mathbf {y} \\sim \\pi_ {\\theta}} \\left[ R (\\mathbf {y}, \\mathbf {y} ^ {*}) \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 508, + 620, + 527 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $R(\\cdot, \\cdot)$ represents a reward function comparing generated answer $\\mathbf{y}$ with the golden answer $\\mathbf{y}^*$ for any question $\\mathbf{x}$ sampled from dataset $\\mathcal{D}$ .", + "bbox": [ + 169, + 534, + 823, + 564 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To compute the gradient $\\nabla_{\\theta}\\mathcal{J}(\\theta)$ , computationally efficient algorithms GRPO [Shao et al., 2024] and REINFORCE++ [Hu, 2025] are widely adopted. Take GRPO as an example, given a question-answer pair $\\mathbf{x},\\mathbf{y}^*$ and a group of $G$ generated responses $\\mathbf{y}_i$ , denote $\\mathbf{y}_{i,j}$ as the $j$ -th token of the $i$ -th response, it optimizes the following token-level objective:", + "bbox": [ + 169, + 566, + 823, + 621 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {J} (\\boldsymbol {\\theta}) = \\mathbb {E} _ {(\\mathbf {x}, \\mathbf {y} ^ {*}) \\sim \\mathcal {D}, \\{\\mathbf {y} _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\boldsymbol {\\theta} _ {\\mathrm {o l d}}} (\\cdot | \\mathbf {x})} \\\\ \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{| \\mathbf {y} _ {i} |} \\sum_ {j = 1} ^ {| \\mathbf {y} _ {i} |} \\left(\\min \\left(r _ {i, j} (\\theta) \\hat {A} _ {i, j}, \\operatorname {c l i p} \\left(r _ {i, j} (\\theta), 1 - \\epsilon , 1 + \\epsilon\\right) \\hat {A} _ {i, j}\\right) - \\beta D _ {\\mathrm {K L}} \\left(\\pi_ {\\theta} \\| \\pi_ {\\text {r e f}}\\right)\\right) \\right], \\tag {2} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 189, + 627, + 823, + 688 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where the token-level ratio $r_{i,j}(\\theta)$ and the group-normalized advantage $\\hat{A}_{i,j}$ are defined as:", + "bbox": [ + 174, + 696, + 776, + 715 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nr _ {i, j} (\\theta) = \\frac {\\pi_ {\\theta} \\left(\\mathbf {y} _ {i , j} \\mid \\mathbf {x} , \\mathbf {y} _ {i , < j}\\right)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} \\left(\\mathbf {y} _ {i , j} \\mid \\mathbf {x} , \\mathbf {y} _ {i , < j}\\right)}, \\hat {A} _ {i, j} = \\frac {R _ {i} - \\operatorname {m e a n} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}{\\operatorname {s t d} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}.\n$$\n", + "text_format": "latex", + "bbox": [ + 285, + 722, + 709, + 760 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "However, RL on base LLMs that haven't been well-aligned may suffer from issues like poor readability and language mixing, preventing researchers from verifying, understanding, and further developing their LLMs. And huge searching space makes efficient learning of meta-thinking daunting.", + "bbox": [ + 169, + 773, + 823, + 818 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 171, + 837, + 272, + 853 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we present Reinforced Meta-thinking Agents (ReMA), a RL method integrating meta-thinking into the reasoning process of LLM under multi-agent settings (Sec. 3.1), then describe the learning process enabled by MARL of single- and multi-turn LLM setting (Secs. 3.2.1 and 3.2.2).", + "bbox": [ + 169, + 869, + 825, + 912 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Deploying Meta-Thinking Reasoning Process for LLMs", + "text_level": 1, + "bbox": [ + 171, + 90, + 599, + 106 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Beyond VRP (Sec. 2.1), recent studies [Muennighoff et al., 2025, Ye et al., 2025c] have shown that integrating meta-thinking behaviors in reasoning process can largely improve the accuracy of the final answers. By integrating Meta-thinking, ReMA decomposes problem solving into two sequential phases: a meta-thinking phase that plans, monitors, or revises strategy, followed by a reasoning phase that produces the detailed solution. We analyse Meta-thinking Reasoning Process along two orthogonal axes—single- vs. multi-agent and single- vs. multi-turn.", + "bbox": [ + 169, + 116, + 823, + 200 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In a single-agent setting, such a process calls LLM once and generates meta-thinking and the following reasoning autoregressively. We formulate the meta-thinking reasoning process (MRP) below:", + "bbox": [ + 169, + 205, + 823, + 234 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {y} \\sim \\pi_ {\\theta} (\\mathbf {y} \\mid \\mathbf {x}, \\mathbf {m}) \\cdot \\pi_ {\\theta} (\\mathbf {m} \\mid \\mathbf {x}), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 242, + 823, + 258 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{m}$ and $\\mathbf{y}$ are the output of meta-thinking and reasoning respectively. We present the procedure as shown below:", + "bbox": [ + 169, + 263, + 823, + 290 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} \\xrightarrow {\\text {m e t a - t h i n k i n g}} \\mathbf {m} \\xrightarrow {\\text {r e a s o n i n g s t e p s}} \\mathbf {y} \\sim \\mathbf {a}. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 375, + 291, + 823, + 310 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Exploring MRP reasoning through a single-agent approach is often inefficient, as it requires the language model to simultaneously master both meta-thinking and detailed problem-solving within one call. Prior research has demonstrated that activating different model capabilities through specialized agents significantly improves MRP exploration efficiency. To leverage this insight, we decouple meta-thinking and reasoning into two separate LLM agents: a high-level agent dedicated to generating meta-thinking, and a low-level agent focused on executing reasoning steps.", + "bbox": [ + 169, + 313, + 823, + 397 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "During a conversation, the high-level and low-level agents (i.e., $\\pi_h$ and $\\pi_l$ ) act in an interleaving manner. The high-level agent generates and summarizes meta-thoughts from the prompt and interaction history, while the low-level agent executes detailed problem-solving under those instructions. We formulate the multi-agent meta-thinking reasoning process (MAMRP) as follows:", + "bbox": [ + 169, + 402, + 823, + 458 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {y} \\sim \\pi_ {l} (\\mathbf {y} \\mid \\mathbf {x}, \\mathbf {m}) \\pi_ {h} (\\mathbf {m} \\mid \\mathbf {x}). \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 400, + 465, + 823, + 481 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "While the single-turn MAMRP offers a straightforward approach, it lacks the ability to perform immediate and fine-grained cognitive switching during the reasoning process, which limits its effectiveness on complex, long-horizon planning tasks. Therefore, we extend Eq. (5) and formulate the multi-turn MAMRP as follows:", + "bbox": [ + 169, + 487, + 823, + 542 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {y} _ {T} \\sim \\prod_ {t = 1} ^ {T} \\pi_ {l} \\left(\\mathbf {y} _ {t} \\mid \\mathbf {x}, \\{\\mathbf {m}, \\mathbf {y} \\} _ {< t}, \\mathbf {m} _ {t}\\right) \\pi_ {h} \\left(\\mathbf {m} _ {t} \\mid \\mathbf {x}, \\{\\mathbf {m}, \\mathbf {y} \\} _ {< t}\\right) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 549, + 823, + 589 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $T$ is the number of turns. Similarly, we present the process with a directed graph:", + "bbox": [ + 171, + 595, + 750, + 611 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} \\xrightarrow [ \\pi_ {h} ]{\\text {m e t a - t h i n k i n g}} \\mathbf {m} _ {1} \\xrightarrow [ \\pi_ {l} ]{\\text {r e a s o n i n g}} \\mathbf {y} _ {1} \\xrightarrow [ \\pi_ {h} ]{\\text {m e t a - t h i n k i n g}} \\mathbf {m} _ {2} \\xrightarrow [ \\pi_ {l} ]{\\text {r e a s o n i n g}} \\mathbf {y} _ {2} \\xrightarrow [ \\pi_ {h} ]{\\text {m e t a - t h i n k i n g}} \\dots \\xrightarrow [ \\pi_ {l} ]{\\text {r e a s o n i n g}} \\mathbf {y} _ {T} \\sim \\mathbf {a}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 619, + 823, + 646 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As a complex reasoning system, MAMRP provides various optimization opportunities in scaling inference-time computation. We leave further discussion of these aspects in Appendix C.1.", + "bbox": [ + 169, + 652, + 823, + 680 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Training MAMRP: A Multi-Agent RL Method", + "text_level": 1, + "bbox": [ + 171, + 696, + 539, + 710 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Multi-agent RL, unlike single-agent RL in a deterministic MDP, must contend with stochastic, nonstationary dynamics and rewards, making optimization more challenging. We start by considering an easier case, the optimization of single-turn MAMRP.", + "bbox": [ + 169, + 720, + 823, + 763 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.1 Optimizing Single-turn MAMRP", + "text_level": 1, + "bbox": [ + 171, + 777, + 455, + 792 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To train the system from Sec. 3.1, we embed it as a Markov Game between the two agents. Suppose the two LLM agents are parameterized by $\\theta_h$ and $\\theta_l$ , respectively. Define a joint hierarchical policy over sequential decisions $\\mathbf{m}$ and $\\mathbf{y}$ :", + "bbox": [ + 169, + 801, + 823, + 844 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {y} \\sim \\pi_ {\\left(\\theta_ {h}, \\theta_ {l}\\right)} (\\mathbf {y} \\mid \\mathbf {x}) := \\pi_ {\\theta_ {l}} (\\mathbf {y} \\mid \\mathbf {x}, \\mathbf {m}) \\cdot \\pi_ {\\theta_ {h}} (\\mathbf {m} \\mid \\mathbf {x}), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 328, + 851, + 823, + 868 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Let $R(\\mathbf{y}, \\mathbf{y}^*)$ denote the final reward serves as the objective function $\\mathcal{J}(\\theta_h, \\theta_l)$ for the joint policy:", + "bbox": [ + 169, + 875, + 818, + 890 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {J} \\left(\\theta_ {h}, \\theta_ {l}\\right) = \\mathbb {E} _ {\\mathbf {x}, \\mathbf {y} ^ {*}} \\mathbb {E} _ {\\mathbf {y} \\sim \\pi \\left(\\theta_ {h}, \\theta_ {l}\\right)} R (\\mathbf {y}, \\mathbf {y} ^ {*}). \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 897, + 823, + 915 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/f246502d1a54bf77abbf1b84a3d339ae985d445d9515b42b122262754119fa92.jpg", + "image_caption": [ + "RL for VRP & MRP" + ], + "image_footnote": [], + "bbox": [ + 176, + 102, + 344, + 268 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/7f3231499e748935cfc4e1cfa6d56049cf9d1cab539e1879a81522875ddbfa66.jpg", + "image_caption": [ + "ReMA with Separate Parameters" + ], + "image_footnote": [], + "bbox": [ + 344, + 103, + 602, + 268 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/c1b6f6aaf161ccb2d707eb51858e4b49250221c289f22f7d7fd725b003912a00.jpg", + "image_caption": [ + "ReMA with Shared Parameters", + "Figure 2: Comparison of training pipelines. Left: RL training of VRP and MRP, where a single LM agent is updated either with mixed (VRP) or explicit (MRP) meta-thinking. Middle: ReMA with separate parameters for the high-level (meta-thinking) and low-level (reasoning) agents; training alternates between freezing one agent and updating the other. Right: ReMA with shared parameters and multi-turn interactions: both agents share the same parameters and are distinguished by their system prompts. Training employs a turn-level ratio for stable multi-turn reinforcement learning and efficient updates, ensuring each turn's contribution is controlled to prevent instability." + ], + "image_footnote": [], + "bbox": [ + 602, + 103, + 821, + 268 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "During optimization procedure, the high-level policy $\\pi_{\\theta_h}$ and low-level policy $\\pi_{\\theta_l}$ aim to maximize their respective rewards independently. The optimization goals for agents are:", + "bbox": [ + 169, + 393, + 823, + 422 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\theta_ {h} ^ {*} = \\arg \\max _ {\\theta_ {h}} \\mathbb {E} _ {(\\mathbf {x}, \\mathbf {y} ^ {*}) \\sim \\mathcal {D}, \\mathbf {m} \\sim \\pi_ {\\theta_ {h}}, \\mathbf {y} \\sim \\pi_ {\\theta_ {l} ^ {*}}} \\left[ R _ {h} (\\mathbf {m}, \\mathbf {y}, \\mathbf {y} ^ {*}) \\right], \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 323, + 429, + 823, + 452 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\theta_ {l} ^ {*} \\left(\\theta_ {h}\\right) = \\arg \\max _ {\\theta_ {l}} \\mathbb {E} _ {\\left(\\mathbf {x}, \\mathbf {y} ^ {*}\\right) \\sim \\mathcal {D}, \\mathbf {m} \\sim \\pi_ {\\theta_ {h}}, \\mathbf {y} \\sim \\pi_ {\\theta_ {l}}} \\left[ R _ {l} \\left(\\mathbf {m}, \\mathbf {y}, \\mathbf {y} ^ {*}\\right) \\right], \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 297, + 454, + 823, + 478 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $R_{h}$ and $R_{l}$ are policies' individual reward functions, including $R$ and regularization according to tasks and models, e.g., different format rewards (refer to Appendix C.2 for details). The detailed algorithm is in the Algorithm 1. We illustrate the MAMRP inference procedure and the proposed training method in Fig. 2. We also provide an analysis of different loss functions in Appendix C.5.", + "bbox": [ + 169, + 486, + 823, + 544 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2.2 Scaling up to Multi-turn MAMRP", + "text_level": 1, + "bbox": [ + 171, + 559, + 465, + 574 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To scale up to multi-turn MAMRP, we can still adopt the iterative training strategy in Sec. 3.2.1. However, we make some changes to improve the efficiency of rollout and training.", + "bbox": [ + 169, + 583, + 823, + 612 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "First, we implement a parameter-sharing strategy where both high-level and low-level agents utilize identical model weights $\\theta$ , distinguished only by role-specific system prompts $S_{h}$ and $S_{l}$ . Formally, we define $\\pi_h = \\pi_\\theta (\\cdot |S_h,\\cdot)$ and $\\pi_l = \\pi_\\theta (\\cdot |S_l,\\cdot)$ , sharing the same underlying parameters rather than maintaining separate model instances. This approach eliminates the need for frequent model swapping on GPU during rollout, avoiding inefficient wait times, while enabling larger batch sizes during training to simultaneously optimize policies for both meta-thinking and reasoning roles.", + "bbox": [ + 169, + 618, + 823, + 702 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Second, we propose a multi-turn GRPO with turn-level ratio to address the challenges of multi-turn MAMRP. The trajectory-level averaged objective with turn-level ratio of $\\pi_{l}$ is defined as (The objective of $\\pi_h$ is the similar but with different system prompt):", + "bbox": [ + 169, + 705, + 823, + 747 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {J} (\\boldsymbol {\\theta}) = \\mathbb {E} _ {(\\mathbf {x}, \\mathbf {y} ^ {*}) \\sim \\mathcal {D}, \\{(\\mathbf {m} _ {i}, \\mathbf {y} _ {i}) \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {\\mathrm {o l d}}} (\\cdot | \\mathbf {x})\n$$\n", + "text_format": "latex", + "bbox": [ + 171, + 753, + 434, + 772 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left. \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{T _ {i}} \\sum_ {t = 1} ^ {T _ {i}} \\frac {1}{| \\mathbf {y} _ {i , t} |} \\sum_ {j = 1} ^ {| \\mathbf {y} _ {i, t} |} \\left(\\min \\left(r _ {i, t} (\\theta) \\hat {A} _ {i, t, j}, \\operatorname {c l i p} \\left(r _ {i, t} (\\theta), 1 - \\epsilon , 1 + \\epsilon\\right) \\hat {A} _ {i, t, j}\\right) - \\beta D _ {\\mathrm {K L}} \\left(\\pi_ {\\theta} \\| \\pi_ {\\text {r e f}}\\right)\\right) \\right] \\right. \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 205, + 775, + 823, + 825 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{y}_{i,t,j}$ is the $j$ -th token at turn $t$ of the reasoning agent of the $i$ -th trajectory. And the turn-level ratio for clipping is defined as:", + "bbox": [ + 171, + 825, + 823, + 854 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nr _ {i, t} (\\theta) = \\frac {1}{| \\mathbf {y} _ {i , t} |} \\sum_ {j = 1} ^ {| \\mathbf {y} _ {i, t} |} r _ {i, t, j} (\\theta) = \\frac {1}{| \\mathbf {y} _ {i , t} |} \\sum_ {j = 1} ^ {| \\mathbf {y} _ {i, t} |} \\frac {\\pi_ {\\theta} \\left(\\mathbf {y} _ {i , t , j} \\mid \\mathbf {x} , \\left\\{\\mathbf {m} _ {i , ,} , \\mathbf {y} _ {i ,} \\right\\} _ {< t} , \\mathbf {m} _ {i , t} , \\mathbf {y} _ {i , t , < j}\\right)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} \\left(\\mathbf {y} _ {i , t , j} \\mid \\mathbf {x} , \\left\\{\\mathbf {m} _ {i , ,}, \\mathbf {y} _ {i ,} \\right\\} _ {< t} , \\mathbf {m} _ {i , t} , \\mathbf {y} _ {i , t , < j}\\right)}. \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 189, + 864, + 823, + 907 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The introduction of the turn-level ratio serves two key purposes. First, using a token-level ratio (Eq. (2)) in the objective introduces bias for multi-turn training, as it averages over all tokens in a trajectory. This means that tokens within longer turns (those containing more tokens) can disproportionately influence the overall loss, and averaging at the token level may encourage excessively long single-turn responses. Second, clipping each token independently risks instability during training.", + "bbox": [ + 169, + 90, + 823, + 161 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In contrast, the turn-level ratio aligns more closely with the underlying MDP formulation by treating all tokens within a turn as a single action and applying clipping at the turn level. Intuitively, this approach stabilizes training by preventing the LLM from making unstable updates that could result in extreme outputs, such as overly long repetitions or incoherent text. We conduct experimental verification in subsequent empirical results (Sec. 4.3).", + "bbox": [ + 169, + 167, + 826, + 238 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 171, + 258, + 313, + 275 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To evaluate the effectiveness and efficiency of ReMA, we conduct experiments on challenging benchmarks for two types of tasks: mathematical reasoning and LLM-as-a-Judge with three different LLMs. Then, we investigate the models' performance in both single- & multi-turn settings. Finally, we provide ablation studies and qualitative analyses of our method.", + "bbox": [ + 169, + 291, + 823, + 347 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Experiment Settings", + "text_level": 1, + "bbox": [ + 171, + 364, + 356, + 380 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We first analyze the single-turn case of ReMA, i.e., $T = 1$ . The high-level agent generates a complete meta-thinking trace in one shot, and the low-level agent follows the instructions and outputs the final results. Single-turn ReMA reduces stochasticity and training cost while our experiments show that it still provides meaningful performance gains.", + "bbox": [ + 169, + 391, + 823, + 448 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Benchmarks We conduct experiments on two types of tasks: mathematical reasoning and LLM-as-a-Judge. For mathematical reasoning experiments, we train models on 7.5k training samples in MATH [Hendrycks et al., 2021] and use MATH500 [Lightman et al., 2023] as the in-distribution test dataset. Additionally, we test the optimized models on out-of-distribution datasets: GSM8K [Cobbe et al., 2021], AIME24 $^{4}$ , AMC23 $^{5}$ , GaoKao2023En [Zhang et al., 2023], Minerva Math [Lewkowycz et al., 2022], and Olympiad Bench [He et al., 2024].", + "bbox": [ + 169, + 464, + 823, + 547 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For LLM-as-a-Judge benchmarks, we train models on RewardBench [Lambert et al., 2024]. Specifically, we convert the original data into a pair-ranking format and split it into a training set of 5k items and a test set of 970 items, denoted as RewardBench970. The models are also tested on JudgeBench [Tan et al., 2024] to assess out-of-distribution performance. We refer to Appendix D.1.2 for detailed comparisons and results.", + "bbox": [ + 169, + 553, + 823, + 623 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines, Models, Training Settings We compare pass@1 performance across the following methods: (1) VRP (CoT, step-by-step prompting, Sec. 3.1); (2) $\\mathbf{VRP}_{\\mathbf{RL}}$ (RL under VRP); (3) $\\mathbf{MRP}_{\\mathbf{RL}}$ (RL under MRP with high-level task analysis, Eq. (4)), and (4) ReMA (ours, RL under MAMRP, Eq. (7)).", + "bbox": [ + 169, + 640, + 823, + 696 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We train and test Llama-3-8B-Instruct, Llama-3.1-8B-Instruct [Dubey et al., 2024], and Qwen2.5-7B-Instruct [Team, 2024] on mathematical reasoning benchmarks. For LLM-as-a-judge benchmarks, we train and test Llama-3.1-8B-Instruct and Qwen2.5-7B-Instruct. We use instruct-tuned LLMs to prompt them to perform VRP, MRP, and MAMRP directly during training. Unless specified, we use two separate copies of the same model for high- and low-level agents in ReMA. We use the base reward setting in Appendix C.2 by default. And for the underlying RL algorithm, we use REINFORCE++ [Hu, 2025]. We refer to Appendix D for detailed training settings.", + "bbox": [ + 169, + 700, + 823, + 800 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 Results of Single-turn ReMA", + "text_level": 1, + "bbox": [ + 171, + 816, + 415, + 832 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Question 1. Does single-turn ReMA outperforms baselines on both in-distribution and out-of-distribution test sets?", + "bbox": [ + 169, + 844, + 823, + 872 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "4https://huggingface.co/datasets/AI-MO/aimo-validation-aime", + "bbox": [ + 189, + 883, + 712, + 897 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "5https://huggingface.co/datasets/AI-MO/aimo-validation-amc", + "bbox": [ + 192, + 898, + 704, + 910 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1: Performance on in-distribution test sets and out-of-distribution test sets. We also report the improvement/degradation w.r.t. basic CoT performance(VRP). On average, ReMA outperforms all baselines. Particularly on out-of-distribution datasets, ReMA achieves the highest performance on most of the benchmarks.", + "bbox": [ + 169, + 99, + 823, + 154 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/032ea026c484b25fb0540e17ead1ad673917359410de718a424734cd30d558a2.jpg", + "table_caption": [ + "(a) Performance on math benchmarks" + ], + "table_footnote": [], + "table_body": "
ModelBenchmarkVRP(CoT)\\( \\mathbf{V R P_{R L}} \\)\\( \\mathbf{M R P_{R L}} \\)ReMA(Ours)
Llama3-8B-InstructMATH50030.8033.40 (+2.60)32.80 (+2.00)33.80 (+3.00)
GSM8K67.4881.80 (+14.32)79.68 (+12.20)79.38 (+11.90)
AIME240.000.00 (+0.00)3.33 (+3.33)0.00 (+0.00)
AMC232.5010.00 (+7.50)12.50 (+10.00)22.50 (+20.00)
Gaokao2023en22.3427.53 (+5.19)23.38 (+1.04)28.57 (+6.23)
Minerva Math8.8216.54 (+7.72)18.01 (+9.19)13.97 (+5.15)
Olympiad Bench8.448.89 (+0.45)9.33 (+0.89)8.89 (+0.45)
Average20.0525.45 (+5.40)25.58 (+5.53)26.73 (+6.68)
Llama3.1-8B-InstructMATH50050.8050.20 (-0.60)48.60 (-2.20)53.20 (+2.40)
GSM8K86.0584.53 (-1.52)85.37 (-0.68)87.26 (+1.21)
AIME2410.003.33 (-6.67)6.67 (-3.33)13.33 (+3.33)
AMC2327.5012.50 (-15.00)30.00 (+2.50)20.00 (-7.50)
Gaokao2023en38.9636.10 (-2.86)37.14 (-1.82)37.14 (-1.82)
Minerva Math22.7926.84 (+4.05)25.37 (+2.58)28.31 (+5.52)
Olympiad Bench15.1119.70 (+4.59)15.70 (+0.59)19.56 (+4.45)
Average35.8933.32 (-2.57)35.55 (-0.34)36.97 (+1.08)
Qwen2.5-7B-InstructMATH50075.0077.20 (+2.20)76.40 (+1.40)74.40 (-0.60)
GSM8K92.0491.36 (-0.68)91.81 (-0.23)90.60 (-1.44)
AIME246.676.67 (+0.00)10.00 (+3.33)20.00 (+13.33)
AMC2347.5050.00 (+2.50)52.50 (+5.00)57.50 (+10.00)
Gaokao2023en56.6254.81 (-1.81)55.06 (-1.56)57.92 (+1.30)
Minerva Math35.6634.93 (-0.73)32.35 (-3.31)34.93 (-0.73)
Olympiad Bench38.2238.37 (+0.15)37.78 (-0.44)36.30 (-1.92)
Average50.2450.48 (+0.24)50.84 (+0.60)53.09 (+2.85)
", + "bbox": [ + 205, + 172, + 792, + 539 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/71aca539c79024c7c54c8b500119e08584e1cf23534f6bae0662ad5dc7fbc4aa.jpg", + "table_caption": [ + "(b) Performance on LLM-as-a-Judge benchmarks" + ], + "table_footnote": [], + "table_body": "
ModelBenchmarkVRP(CoT)\\( \\mathbf{V R P_{R L}} \\)\\( \\mathbf{M R P_{R L}} \\)ReMA(Ours)
Llama3.1-8B-InstructRewardBench97069.4882.89 (+13.41)81.13 (+11.65)83.71 (+14.23)
JudgeBench51.2951.94 (+0.65)52.90 (+1.61)52.90 (+1.61)
Average60.3967.41 (+7.02)67.02 (+6.63)68.31 (+7.92)
Qwen2.5-7B-InstructRewardBench97078.5685.36 (+6.80)86.49 (+7.93)83.51 (+4.95)
JudgeBench58.3956.94 (-1.45)58.39 (+0.00)56.94 (-1.45)
Average68.4771.15 (+2.68)72.44 (+3.97)70.22 (+1.75)
", + "bbox": [ + 202, + 561, + 794, + 698 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 1 compares the greedy decoding performance of ReMA against various RL baselines across mathematical benchmarks (Table 1a) and LLM-as-a-Judge benchmarks (Table 1b). Results across different LLMs indicate that, on average, ReMA outperforms all baselines, achieving a maximum improvement of $6.68\\%$ on mathematical benchmarks and $8.49\\%$ on LLM-as-a-Judge benchmarks.", + "bbox": [ + 169, + 738, + 823, + 794 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Notably, ReMA achieves the highest performance on most benchmarks, particularly on out-of-distribution datasets, with a maximum improvement of $20\\%$ on AMC23 for Llama3-8B-Instruct, $13.33\\%$ on AIME24 for Qwen2.5-7B-Instruct, $14.23\\%$ on RewardBench970 for Llama3.1-8B-Instruct. These results demonstrate the superior out-of-distribution generalization ability conferred by the meta-thinking mechanism in ReMA. However, we observe that the accuracy gains from RL training on instruction-tuned LMs are smaller than from base models (Sec. 4.2.1). This may be due to the higher initial performance and the relatively fixed output distribution of instruction-tuned models, which limits the improvement and peak performance in RL.", + "bbox": [ + 169, + 800, + 823, + 911 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/37ed86a4d76064b8e7dc589771be62484945d0a0fbaf36c2bdeedac73830355d.jpg", + "image_caption": [ + "Figure 3: An RL experiment with 3 training schemes. While RL from SFT excels on easier problems, RL under Meta-thinking shows superior generalization to harder problems like AIME24." + ], + "image_footnote": [], + "bbox": [ + 186, + 82, + 808, + 258 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7cef31214b1e22f8feceb774c7c2fda0d8822a64668c93c676a445aa329e03c2.jpg", + "image_caption": [ + "Figure 4: Average problem difficulty by action type during training. Left: 1B LM collapses to the EMPTY action. Right: 8B LM adapts to a more complex meta-thinking strategy for harder problems." + ], + "image_footnote": [], + "bbox": [ + 173, + 301, + 485, + 417 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8067de4a34648160295745db52239f10161f855ef4f748f78627686ff344515e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 301, + 818, + 419 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2.1 Meta-thoughts boost low-level generalization", + "text_level": 1, + "bbox": [ + 171, + 468, + 537, + 484 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Question 2. Can Reasoning benefit from Meta-thinking?", + "text_level": 1, + "bbox": [ + 171, + 493, + 545, + 508 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Here we provide a tiny but motivating example of how ReMA gives better learning dynamics. We use Qwen2.5-Math-7B [Yang et al., 2024] as the starting base model, MATH (level 3-5, about $5.5\\mathrm{K}$ number of instances) as the training dataset, and we compare three reinforcement learning training schemes, in particular: (1) RL from Base: train the base model directly on MATH with binary outcome reward; (2) RL from SFT: SFT the base model with GPT-4o's CoT answers; then RL on train dataset with binary outcome reward; (3) RL under Meta-thinking: SFT the base model with GPT-4o's meta-thinking plans; then RL on train dataset with binary outcome reward.", + "bbox": [ + 169, + 513, + 823, + 612 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The models are evaluated on 3 benchmarks (Fig. 3). SFT brings the best initial accuracy on in-distribution and easier sets, but fails to improve on harder ones. RL from Base yields limited gains. In contrast, RL under Meta-thinking achieves the best learning dynamics and generalizes better to challenging problems (AIME24). See Appendix F.1 for case studies.", + "bbox": [ + 169, + 618, + 823, + 674 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2.2 Diverse meta-thinking characteristics of LLMs", + "text_level": 1, + "bbox": [ + 171, + 691, + 550, + 708 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Question 3. How well can LLMs learn to meta-think?", + "text_level": 1, + "bbox": [ + 171, + 717, + 529, + 732 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To further analyze meta-thinking behaviors, we train models with structured JSON-format actions inspired by Yue et al.. The meta-thinking agent generate two entries in one LM call, first selects from three actions: DECOMPOSE (breaking into subproblems), REWRITE (simplifying the problem), or EMPTY (direct solving), then generates the corresponding text. We compare Llama-3.1-8B-Instruct and Llama-3.2-1B-Instruct to study scale effects (two 1B models vs two 8B models) on meta-thinking agent's training. We use vLLM guided JSON decoding [Dong et al., 2024] for valid formatting and base reward (reasoning agent's solution accuracy with format constraints).", + "bbox": [ + 169, + 738, + 823, + 835 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We observe that smaller LMs produce simpler outputs, likely due to limited capacity to maintain valid JSON formatting while exploring diverse reasoning strategies. As Fig. 4 shows, smaller LMs like Llama-3.2-1B-Instruct quickly converge to the simplest EMPTY action to avoid formatting penalties, while larger LMs like Llama-3.1-8B-Instruct can adapt meta-thinking strategies based on problem difficulty. See Appendix F.3 for detailed case studies.", + "bbox": [ + 169, + 842, + 823, + 912 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7be77f52a452cae631a3999a5c1a37f14186bd400761aa6b2200170fc2818d62.jpg", + "image_caption": [ + "Figure 5: Training results of multi-turn Figure 6: Ablations of multi-turn ReMA on a tiny subReMA on MATH-Level3-5-8K under different set of MATH, we only show here the training curves of different training and rollout configurations." + ], + "image_footnote": [], + "bbox": [ + 176, + 78, + 467, + 210 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/0a12a930e142fe2cdce1144a9d1bcf93a65808b7f66fd19e01bc8a6c088ffc04.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 482, + 78, + 821, + 210 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.3 Extending ReMA to Multi-turn MAMRP", + "text_level": 1, + "bbox": [ + 169, + 287, + 501, + 304 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We further extend ReMA to multi-turn MAMRP settings, enabling multiple rounds of interaction between the meta-thinking agent and the reasoning agent as defined in Eq. (7).", + "bbox": [ + 169, + 321, + 823, + 351 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Unlike the inherent VRP capabilities of most LLMs, multi-turn ReMA requires initial bootstrapping. Thus, we constructed a supervised fine-tuning dataset (about 0.8K samples) from LIMO [Ye et al., 2025c] using GPT-4o to establish the starting point for multi-turn interaction capabilities. Then we finetune Qwen2.5-7B before RL training.", + "bbox": [ + 169, + 356, + 823, + 412 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "As described in Sec.3.2.2, we deploy the proposed GRPO with turn-level ratio clipping and trajectory-level averaging loss during training. And we remove the KL-divergence term to allow more flexible exploration. By default, the agents share the same parameters and are simultaneously updated using their trajectories. We refer to details in Appendix D.2.", + "bbox": [ + 169, + 417, + 823, + 474 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.3.1 Results and Ablations", + "text_level": 1, + "bbox": [ + 171, + 507, + 377, + 522 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Question 4. Can ReMA be scaled to multi-turn settings?", + "text_level": 1, + "bbox": [ + 169, + 537, + 545, + 554 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "There are two key points revealed by our multi-turn ReMA experiments, as shown in Fig. 5. On one hand, the algorithm can demonstrate effective convergence on the training set, with accuracy steadily increasing from approximately $55\\%$ to $70\\%$ during training. It also achieves an average performance gain of about $5\\%$ across all seven test benchmarks, indicating stable improvements on out-of-distribution data. (Experiment with the rollout config of turn30_token512, see Appendix D.2.2 and Fig. 8 for more details.)", + "bbox": [ + 169, + 559, + 823, + 643 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "On the other hand, we observe that the performance of multi-turn ReMA is highly sensitive to hyperparameters such as the maximum response length per turn and the maximum number of turns. For certain configurations, the model either collapses into producing massive repetitions within a single turn or generates empty responses after only a few turns. Similar phenomena have been reported in concurrent works such as RAGEN [Wang et al., 2025], where these issues are attributed to the lack of fine-grained, reasoning-aware guidance. As a result, multi-turn RL becomes susceptible to long-horizon credit assignment challenges and state drift, often leading to reduced exploration diversity—a phenomenon referred to as the \"Echo Trap\". To address this challenge, it is essential to comprehensively explore the training recipe w.r.t. model, data, and algorithm.", + "bbox": [ + 169, + 648, + 825, + 773 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Question 5. How does parameter sharing and turn-level ratio affect multi-turn ReMA?", + "text_level": 1, + "bbox": [ + 169, + 779, + 746, + 795 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "As shown in Fig. 6, we compare different configurations on a smaller dataset consisting of 133 samples—19 from each of the 7 MATH problem types—to evaluate sample efficiency and convergence speed. First, all configurations eventually achieve nearly $100\\%$ accuracy on the training dataset. Notably, the trajectory-level loss with turn-level ratio (Turn-Ratio, Eq. (13)) demonstrates substantially better sample efficiency than its token-level variants (Eq. (2)), reaching higher training rewards with fewer steps. We also present the training curve of separate weight setting, the empirical results show that shared parameters with simultaneous updates converge noticeably faster.", + "bbox": [ + 169, + 800, + 825, + 912 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 174, + 90, + 299, + 104 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this paper, we introduced ReMA, a novel framework that leverages multi-agent reinforcement learning to elicit meta-thinking in large language models. By explicitly separating meta-thinking and reasoning processes into distinct agents, our approach enhances both exploration during training and the interpretability of model outputs. We tailored RL algorithms and reward functions to ensure reliable performance. Through comprehensive experiments on mathematical reasoning and LLM-as-a-Judge benchmarks, ReMA consistently achieved superior results, particularly on out-of-distribution datasets. We further extend ReMA to multi-turn settings, enabling the framework to handle more complex reasoning scenarios that require more communication between agents. Our ablations demonstrate how effective coordination between agents evolves, highlighting the promise of reinforcement learning and structured agents' collaboration for advancing the capabilities of language models in complex reasoning tasks.", + "bbox": [ + 174, + 119, + 823, + 272 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 491, + 935, + 506, + 946 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 89, + 269, + 106 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023.", + "Elif Akata, Lion Schulz, Julian Coda-Forno, Seong Joon Oh, Matthias Bethge, and Eric Schulz. Playing repeated games with large language models. arXiv preprint arXiv:2305.16867, 2023.", + "Cem Anil, Guodong Zhang, Yuhuai Wu, and Roger B. Grosse. Learning to give checkable answers with prover-verifier games. CoRR, abs/2108.12099, 2021. URL https://arxiv.org/abs/2108.12099.", + "Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: A family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 1, 2023.", + "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020.", + "Jiaqi Chen, Yuxian Jiang, Jiachen Lu, and Li Zhang. S-agents: Self-organizing agents in open-ended environments. arXiv preprint arXiv:2402.04578, 2024a.", + "Qiguang Chen, Libo Qin, Jiaqi WANG, Jingxuan Zhou, and Wanxiang Che. Unlocking the capabilities of thought: A reasoning boundary framework to quantify and optimize chain-of-thought. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024b. URL https://openreview.net/forum?id=pC44UMwy2v.", + "Shuhao Chen, Weisen Jiang, Baijiong Lin, James T Kwok, and Yu Zhang. Routersc: Query-based router by dual contrastive learning for assembling large language models. arXiv preprint arXiv:2409.19886, 2024c.", + "Weize Chen, Yusheng Su, Jingwei Zuo, Cheng Yang, Chenfei Yuan, Chi-Min Chan, Heyang Yu, Yaxi Lu, Yi-Hsin Hung, Chen Qian, et al. Agentverse: Facilitating multi-agent collaboration and exploring emergent behaviors. In The Twelfth International Conference on Learning Representations, 2023.", + "Yongchao Chen, Jacob Arkin, Charles Dawson, Yang Zhang, Nicholas Roy, and Chuchu Fan. Autotamp: Autoregressive task and motion planning with llms as translators and checkers. In 2024 IEEE International conference on robotics and automation (ICRA), pages 6695-6702. IEEE, 2024d.", + "Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. Palm: Scaling language modeling with pathways. Journal of Machine Learning Research, 24(240): 1-113, 2023.", + "Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V Le, Sergey Levine, and Yi Ma. Sft memorizes, rl generalizes: A comparative study of foundation model post-training. arXiv preprint arXiv:2501.17161, 2025.", + "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.", + "Google DeepMind. Gemini flash thinking, 2025. URL https://deepmind.google/technologies/gemini/flash-thinking/. Accessed: 2025-01-29.", + "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao," + ], + "bbox": [ + 171, + 112, + 825, + 912 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun, T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W. L. Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X. Q. Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y. K. Li, Y. Q. Wang, Y. X. Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong, Ying He, Yishi Piao, Yisong Wang, Yixuan Tan, Yiyang Ma, Yiyuan Liu, Yongqiang Guo, Yuan Ou, Yuduan Wang, Yue Gong, Yuheng Zou, Yujia He, Yunfan Xiong, Yuxiang Luo, Yuxiang You, Yuxuan Liu, Yuyang Zhou, Y. X. Zhu, Yanhong Xu, Yanping Huang, Yaohui Li, Yi Zheng, Yuchen Zhu, Yunxian Ma, Ying Tang, Yukun Zha, Yuting Yan, Z. Z. Ren, Zehui Ren, Zhangli Sha, Zhe Fu, Zhean Xu, Zhenda Xie, Zhengyan Zhang, Zhewen Hao, Zhicheng Ma, Zhigang Yan, Zhiyu Wu, Zihui Gu, Zijia Zhu, Zijun Liu, ZiLin Li, Ziwei Xie, Ziyang Song, Zizheng Pan, Zhen Huang, Zhipeng Xu, Zhongyu Zhang and Zhen Zhang. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. 2025. URL https://arxiv.org/abs/2501.12948.", + "Aniket Didolkar, Anirudh Goyal, Nan Rosemary Ke, Siyuan Guo, Michal Valko, Timothy Lillicrap, Danilo Rezende, Yoshua Bengio, Michael Mozer, and Sanjeev Arora. Metacognitive capabilities of llms: An exploration in mathematical problem solving. arXiv preprint arXiv:2405.12205, 2024.", + "Dujian Ding, Ankur Mallick, Chi Wang, Robert Sim, Subhabrata Mukherjee, Victor Ruhle, Laks VS Lakshmanan, and Ahmed Hassan Awadallah. Hybrid llm: Cost-efficient and quality-aware query routing. arXiv preprint arXiv:2404.14618, 2024.", + "Kefan Dong and Tengyu Ma. Stp: Self-play llm theorem provers with iterative conjecturing and proving, 2025. URL https://arxiv.org/abs/2502.00212.", + "Yixin Dong, Charlie F Ruan, Yaxing Cai, Ruihang Lai, Ziyi Xu, Yilong Zhao, and Tianqi Chen. Xgrammar: Flexible and efficient structured generation engine for large language models. arXiv preprint arXiv:2411.15100, 2024.", + "Yilun Du, Shuang Li, Antonio Torralba, Joshua B Tenenbaum, and Igor Mordatch. Improving factuality and reasoning in language models through multiagent debate. In *Forty-first International Conference on Machine Learning*, 2023.", + "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024.", + "Andrew Estornell, Jean-Francois Ton, Yuanshun Yao, and Yang Liu. Acc-debate: An actor-critic approach to multi-agent debate, 2024. URL https://arxiv.org/abs/2411.00053.", + "John H Flavell. Metacognition and cognitive monitoring: A new area of cognitive-developmental inquiry. American psychologist, 34(10):906, 1979.", + "Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D. Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars. 2025. URL https://arxiv.org/abs/2503.01307.", + "Peizhong Gao, Ao Xie, Shaoguang Mao, Wenshan Wu, Yan Xia, Haipeng Mi, and Furu Wei. Meta reasoning for large language models. arXiv preprint arXiv:2406.11698, 2024." + ], + "bbox": [ + 171, + 90, + 823, + 912 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Alex Graves. Sequence transduction with recurrent neural networks. arXiv preprint arXiv:1211.3711, 2012.", + "Fatemeh Haji, Mazal Bethany, Maryam Tabar, Jason Chiang, Anthony Rios, and Peyman Najafirad. Improving llm reasoning with multi-agent tree-of-thought validator agent, 2024. URL https://arxiv.org/abs/2409.11527.", + "Rui Hao, Linmei Hu, Weijian Qi, Qingliu Wu, Yirui Zhang, and Liqiang Nie. Chatlm network: More brains, more intelligence. AI Open, 2025.", + "Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, et al. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008, 2024.", + "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874, 2021.", + "Sirui Hong, Xiawu Zheng, Jonathan Chen, Yuheng Cheng, Jinlin Wang, Ceyao Zhang, Zili Wang, Steven Ka Shing Yau, Zijuan Lin, Liyang Zhou, et al. Metagpt: Meta programming for multiagent collaborative framework. arXiv preprint arXiv:2308.00352, 3(4):6, 2023.", + "Jian Hu. Reinforce++: A simple and efficient approach for aligning large language models. arXiv preprint arXiv:2501.03262, 2025.", + "Jian Hu, Xibin Wu, Zilin Zhu, Xianyu, Weixun Wang, Dehao Zhang, and Yu Cao. Openrlhf: An easy-to-use, scalable and high-performance rlhf framework. arXiv preprint arXiv:2405.11143, 2024a.", + "Qitian Jason Hu, Jacob Bieker, Xiuyu Li, Nan Jiang, Benjamin Keigwin, Gaurav Ranganath, Kurt Keutzer, and Shriyash Kaustubh Upadhyay. Routerbench: A benchmark for multi-llm routing system. arXiv preprint arXiv:2403.12031, 2024b.", + "Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024.", + "Fangkai Jiao, Geyang Guo, Xingxing Zhang, Nancy F Chen, Shafiq Joty, and Furu Wei. Preference optimization for reasoning with pseudo feedback. arXiv preprint arXiv:2411.16345, 2024.", + "Bowen Jin, Hansi Zeng, Zhenrui Yue, Jinsung Yoon, Sercan Arik, Dong Wang, Hamed Zamani, and Jiawei Han. Search-r1: Training llms to reason and leverage search engines with reinforcement learning, 2025.", + "Jan Hendrik Kirchner, Yining Chen, Harri Edwards, Jan Leike, Nat McAleese, and Yuri Burda. Prover-verifier games improve legibility of llm outputs. arXiv preprint arXiv:2407.13692, 2024.", + "Robert Kirk, Ishita Mediratta, Christoforos Nalmpantis, Jelena Luketina, Eric Hambro, Edward Grefenstette, and Roberta Raileanu. Understanding the effects of rlhf on llm generalisation and diversity. In The Twelfth International Conference on Learning Representations.", + "Aviral Kumar, Vincent Zhuang, Rishabh Agarwal, Yi Su, John D Co-Reyes, Avi Singh, Kate Baumli, Shariq Iqbal, Colton Bishop, Rebecca Roelofs, et al. Training language models to self-correct via reinforcement learning. arXiv preprint arXiv:2409.12917, 2024.", + "Nathan Lambert, Valentina Pyatkin, Jacob Morrison, LJ Miranda, Bill Yuchen Lin, Khyathi Chandu, Nouha Dziri, Sachin Kumar, Tom Zick, Yejin Choi, et al. Rewardbench: Evaluating reward models for language modeling. arXiv preprint arXiv:2403.13787, 2024.", + "Pat Langley, Kirstin Cummings, and Daniel Shapiro. Hierarchical skills and cognitive architectures. In Proceedings of the annual meeting of the cognitive science society, volume 26, 2004." + ], + "bbox": [ + 171, + 90, + 825, + 912 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Aitor Lewkowycz, Anders Andreassen, David Dohan, Ethan Dyer, Henryk Michalewski, Vinay Ramasesh, Ambrose Slone, Cem Anil, Imanol Schlag, Theo Gutman-Solo, et al. Solving quantitative reasoning problems with language models. Advances in Neural Information Processing Systems, 35:3843-3857, 2022.", + "Ming Li, Jiuhai Chen, Lichang Chen, and Tianyi Zhou. Can llms speak for diverse people? tuning llms via debate to generate controllable controversial statements. arXiv preprint arXiv:2402.10614, 2024.", + "Tian Liang, Zhiwei He, Wenxiang Jiao, Xing Wang, Yan Wang, Rui Wang, Yujiu Yang, Shuming Shi, and Zhaopeng Tu. Encouraging divergent thinking in large language models through multiagent debate. arXiv preprint arXiv:2305.19118, 2023.", + "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. arXiv preprint arXiv:2305.20050, 2023.", + "Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024.", + "Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective. CoRR, abs/2503.20783, 2025. doi: 10.48550/ARXIV.2503.20783. URL https://doi.org/10.48550/arXiv.2503.20783.", + "Chengdong Ma, Ziran Yang, Minquan Gao, Hai Ci, Jun Gao, Xuehai Pan, and Yaodong Yang. Red teaming game: A game-theoretic framework for red teaming language models. arXiv preprint arXiv:2310.00322, 2023.", + "Hao Ma, Tianyi Hu, Zhiqiang Pu, Boyin Liu, Xiaolin Ai, Yanyan Liang, and Min Chen. Coevolving with the other you: Fine-tuning LLM with sequential cooperative multi-agent reinforcement learning. CoRR, abs/2410.06101, 2024. doi: 10.48550/ARXIV.2410.06101. URL https://doi.org/10.48550/arXiv.2410.06101.", + "Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegrefe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, et al. Self-refine: Iterative refinement with self-feedback. Advances in Neural Information Processing Systems, 36:46534-46594, 2023.", + "Dakota Mahan, Duy Van Phung, Rafael Rafailov, Chase Blagden, Nathan Lile, Louis Castricato, Jan-Philipp Franken, Chelsea Finn, and Alon Albalak. Generative reward models. arXiv preprint arXiv:2410.12832, 2024.", + "Sumeet Ramesh Motwani, Chandler Smith, Rocktim Jyoti Das, Markian Rybchuk, Philip H. S. Torr, Ivan Laptev, Fabio Pizzati, Ronald Clark, and Christian Schroeder de Witt. Malt: Improving reasoning with multi-agent llm training, 2024. URL https://arxiv.org/abs/2412.01928.", + "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025.", + "OpenAI. Openai o1 system card, 2024. URL https://openai.com/ol/.", + "Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in neural information processing systems, 35: 27730-27744, 2022.", + "Chanwoo Park, Seungju Han, Xingzhi Guo, Asuman Ozdaglar, Kaiqing Zhang, and Joo-Kyung Kim. Maporl: Multi-agent post-co-training for collaborative large language models with reinforcement learning. 2025. URL https://arxiv.org/abs/2502.18439." + ], + "bbox": [ + 173, + 90, + 825, + 912 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ethan Perez, Saffron Huang, Francis Song, Trevor Cai, Roman Ring, John Aslanides, Amelia Glaese, Nat McAleese, and Geoffrey Irving. Red teaming language models with language models. arXiv preprint arXiv:2202.03286, 2022.", + "Israel Puerta-Merino, Carlos Núñez-Molina, Pablo Mesejo, and Juan Fernández-Olivares. A roadmap to guide the integration of llms in hierarchical planning. arXiv preprint arXiv:2501.08068, 2025.", + "Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual reasoning makes smaller llms stronger problem-solvers. arXiv preprint arXiv:2408.06195, 2024.", + "Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, and Pengfei Liu. O1 replication journey: A strategic progress report - part 1, 2024. URL https://arxiv.org/abs/2410.18982.", + "Lv Qingsong, Yangning Li, Zihua Lan, Zishan Xu, Jiwei Tang, Yinghui Li, Wenhao Jiang, Hai-Tao Zheng, and Philip S. Yu. Raise: Reinforenced adaptive instruction selection for large language models, 2025.", + "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023.", + "Krishan Rana, Jesse Haviland, Sourav Garg, Jad Abou-Chakra, Ian Reid, and Niko Suenderhauf. Sayplan: Grounding large language models using 3d scene graphs for scalable robot task planning. arXiv preprint arXiv:2307.06135, 2023.", + "Swarnadeep Saha, Xian Li, Marjan Ghazvininejad, Jason Weston, and Tianlu Wang. Learning to plan & reason for evaluation with thinking-llm-as-a-judge. arXiv preprint arXiv:2501.18099, 2025a.", + "Swarnadeep Saha, Xian Li, Marjan Ghazvininejad, Jason Weston, and Tianlu Wang. Learning to plan & reason for evaluation with thinking-llm-as-a-judge, 2025b. URL https://arxiv.org/abs/2501.18099.", + "John Schulman, Sergey Levine, Pieter Abbeel, Michael I. Jordan, and Philipp Moritz. Trust region policy optimization. In Francis R. Bach and David M. Blei, editors, Proceedings of the 32nd International Conference on Machine Learning, ICML 2015, Lille, France, 6-11 July 2015, volume 37 of JMLR Workshop and Conference Proceedings, pages 1889-1897. JMLR.org, 2015. URL http://proceedings.mlr.press/v37/schulman15.html.", + "John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. CoRR, abs/1707.06347, 2017. URL http://arxiv.org/abs/1707.06347.", + "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024.", + "Maohao Shen, Guangtao Zeng, Zhenting Qi, Zhang-Wei Hong, Zhenfang Chen, Wei Lu, Gregory Wornell, Subhro Das, David Cox, and Chuang Gan. Satori: Reinforcement learning with chain-of-action-thought enhances llm reasoning via autoregressive search, 2025. URL https:// arxiv.org/abs/2502.02508.", + "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv: 2409.19256, 2024.", + "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024.", + "Chan Hee Song, Jiaman Wu, Clayton Washington, Brian M Sadler, Wei-Lun Chao, and Yu Su. Llm-planner: Few-shot grounded planning for embodied agents with large language models. In Proceedings of the IEEE/CVF international conference on computer vision, pages 2998-3009, 2023." + ], + "bbox": [ + 171, + 90, + 825, + 910 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Dimitris Stripelis, Zijian Hu, Jipeng Zhang, Zhaozhuo Xu, Alay Dilipbhai Shah, Han Jin, Yuhang Yao, Salman Avestimehr, and Chaoyang He. Tensoropera router: A multi-model router for efficient llm inference. arXiv preprint arXiv:2408.12320, 2024.", + "Vighnesh Subramaniam, Yilun Du, Joshua B. Tenenbaum, Antonio Torralba, Shuang Li, and Igor Mordatch. Multiagent finetuning: Self improvement with diverse reasoning chains, 2025. URL https://arxiv.org/abs/2501.05707.", + "Chuanneng Sun, Songjun Huang, and Dario Pompili. Retrieval-augmented hierarchical in-context reinforcement learning and hindsight modular reflections for task planning with llms. arXiv preprint arXiv:2408.06520, 2024.", + "Richard Sutton. The bitter lesson. Incomplete Ideas (blog), 13(1):38, 2019.", + "Sijun Tan, Siyuan Zhuang, Kyle Montgomery, William Y Tang, Alejandro Cuadron, Chenguang Wang, Raluca Ada Popa, and Ion Stoica. Judgebench: A benchmark for evaluating llm-based judges. arXiv preprint arXiv:2410.12784, 2024.", + "Xiangru Tang, Anni Zou, Zhuosheng Zhang, Ziming Li, Yilun Zhao, Xingyao Zhang, Arman Cohan, and Mark Gerstein. Medagents: Large language models as collaborators for zero-shot medical reasoning. arXiv preprint arXiv:2311.10537, 2023.", + "Qwen Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwenlm.github.io/blog/qwen2.5/.", + "Jun Wang, Meng Fang, Ziyu Wan, Muning Wen, Jiachen Zhu, Anjie Liu, Ziqin Gong, Yan Song, Lei Chen, Lionel M Ni, et al. Openr: An open source framework for advanced reasoning with large language models. arXiv preprint arXiv:2410.09671, 2024a.", + "Tianlu Wang, Ilia Kulikov, Olga Golovneva, Ping Yu, Weizhe Yuan, Jane Dwivedi-Yu, Richard Yuanzhe Pang, Maryam Fazel-Zarandi, Jason Weston, and Xian Li. Self-taught evaluators. arXiv preprint arXiv:2408.02666, 2024b.", + "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. arXiv preprint arXiv:2203.11171, 2022.", + "Yuqing Wang and Yun Zhao. Metacognitive prompting improves understanding in large language models. arXiv preprint arXiv:2308.05342, 2023.", + "Zhenhailong Wang, Shaoguang Mao, Wenshan Wu, Tao Ge, Furu Wei, and Heng Ji. Unleashing the emergent cognitive synergy in large language models: A task-solving agent through multi-personal self-collaboration. arXiv preprint arXiv:2307.05300, 2023.", + "Zihan Wang, Kangrui Wang, Qineng Wang, Pingyue Zhang, Linjie Li, Zhengyuan Yang, Kefan Yu, Minh Nhat Nguyen, Licheng Liu, Eli Gottlieb, Monica Lam, Yiping Lu, Kyunghyun Cho, Jiajun Wu, Li Fei-Fei, Lijuan Wang, Yejin Choi, and Manling Li. Ragen: Understanding self-evolution in llm agents via multi-turn reinforcement learning, 2025.", + "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022.", + "Sean Welleck, Ximing Lu, Peter West, Faeze Brahman, Tianxiao Shen, Daniel Khashabi, and Yejin Choi. Generating sequences by learning to self-correct. arXiv preprint arXiv:2211.00053, 2022.", + "Muning Wen, Ziyu Wan, Weinan Zhang, Jun Wang, and Ying Wen. Reinforcing language agents via policy optimization with action decomposition. CoRR, abs/2405.15821, 2024. doi: 10.48550/ ARXIV.2405.15821. URL https://doi.org/10.48550/arXiv.2405.15821.", + "Zhiheng Xi, Dingwen Yang, Jixuan Huang, Jiafu Tang, Guanyu Li, Yiwen Ding, Wei He, Boyang Hong, Shihan Do, Wenyu Zhan, et al. Enhancing llm reasoning via critique models with test-time and training-time supervision. arXiv preprint arXiv:2411.16579, 2024." + ], + "bbox": [ + 171, + 90, + 825, + 912 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Violet Xiang, Charlie Snell, Kanishk Gandhi, Alon Albalak, Anikait Singh, Chase Blagden, Duy Phung, Rafael Rafailov, Nathan Lile, Dakota Mahan, et al. Towards system 2 reasoning in llms: Learning how to think with meta chain-of-though. arXiv preprint arXiv:2501.04682, 2025.", + "Yihang Xiao, Jinyi Liu, Yan Zheng, Xiaohan Xie, Jianye Hao, Mingzhi Li, Ruitao Wang, Fei Ni, Yuxiao Li, Jintian Luo, et al. Cellagent: An llm-driven multi-agent framework for automated single-cell data analysis. BioRxiv, pages 2024-05, 2024.", + "Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768, 2025.", + "Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, et al. Towards large reasoning models: A survey of reinforced reasoning with large language models. arXiv preprint arXiv:2501.09686, 2025.", + "Prateek Yadav, Tu Vu, Jonathan Lai, Alexandra Chronopoulou, Manaal Faruqui, Mohit Bansal, and Tsendsuren Munkhdalai. What matters for model merging at scale? arXiv preprint arXiv:2410.03617, 2024.", + "Xue Yan, Yan Song, Xinyu Cui, Filippos Christianos, Haifeng Zhang, David Henry Mguni, and Jun Wang. Ask more, know better: Reinforce-learned prompt questions for decision making with large language models. arXiv preprint arXiv:2310.18127, 2023.", + "An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, Keming Lu, Mingfeng Xue, Runji Lin, Tianyu Liu, Xingzhang Ren, and Zhenru Zhang. Qwen2.5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024.", + "Ling Yang, Zhaochen Yu, Bin Cui, and Mengdi Wang. Reasonflux: Hierarchical llm reasoning via scaling thought templates. arXiv preprint arXiv:2502.06772, 2025.", + "Guanghao Ye, Khiem Duc Pham, Xinzhi Zhang, Sivakanth Gopi, Baolin Peng, Beibin Li, Janardhan Kulkarni, and Huseyin A Inan. On the emergence of thinking in llms i: Searching for the right intuition. arXiv preprint arXiv:2502.06773, 2025a.", + "Peijun Ye, Tao Wang, and Fei-Yue Wang. A survey of cognitive architectures in the past 20 years. IEEE transactions on cybernetics, 48(12):3280-3290, 2018.", + "Yaowen Ye, Cassidy Laidlaw, and Jacob Steinhardt. Iterative label refinement matters more than preference optimization under weak supervision. arXiv preprint arXiv:2501.07886, 2025b.", + "Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning. arXiv preprint arXiv:2502.03387, 2025c.", + "Le Yu, Bowen Yu, Haiyang Yu, Fei Huang, and Yongbin Li. Language models are super mario: Absorbing abilities from homologous models as a free lunch. In *Forty-first International Conference on Machine Learning*, 2024.", + "Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, Haibin Lin, Zhiqi Lin, Bole Ma, Guangming Sheng, Yuxuan Tong, Chi Zhang, Mofan Zhang, Wang Zhang, Hang Zhu, Jinhua Zhu, Jiaze Chen, Jiangjie Chen, Chengyi Wang, Hongli Yu, Weinan Dai, Yuxuan Song, Xiangpeng Wei, Hao Zhou, Jingjing Liu, Wei-Ying Ma, Ya-Qin Zhang, Lin Yan, Mu Qiao, Yonghui Wu, and Mingxuan Wang. Dapo: An open-source llm reinforcement learning system at scale, 2025.", + "Murong Yue, Wenlin Yao, Haitao Mi, Dian Yu, Ziyu Yao, and Dong Yu. Dots: Learning to reason dynamically in llms via optimal reasoning trajectories search. In The Thirteenth International Conference on Learning Representations.", + "Murong Yue, Wenlin Yao, Haitao Mi, Dian Yu, Ziyu Yao, and Dong Yu. DOTS: learning to reason dynamically in llms via optimal reasoning trajectories search. CoRR, abs/2410.03864, 2024. doi: 10.48550/ARXIV.2410.03864. URL https://doi.org/10.48550/arXiv.2410.03864." + ], + "bbox": [ + 173, + 90, + 825, + 910 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yanwei Yue, Guibin Zhang, Boyang Liu, Guancheng Wan, Kun Wang, Dawei Cheng, and Yiyan Qi. Masrouter: Learning to route llms for multi-agent systems. arXiv preprint arXiv:2502.11133, 2025a.", + "Yu Yue, Yufeng Yuan, Qiying Yu, Xiaochen Zuo, Ruofei Zhu, Wenyuan Xu, Jiaze Chen, Chengyi Wang, TianTian Fan, Zhengyin Du, Xiangpeng Wei, Xiangyu Yu, Gaohong Liu, Juncai Liu, Lingjun Liu, Haibin Lin, Zhiqi Lin, Bole Ma, Chi Zhang, Mofan Zhang, Wang Zhang, Hang Zhu, Ru Zhang, Xin Liu, Mingxuan Wang, Yonghui Wu, and Lin Yan. Vapo: Efficient and reliable reinforcement learning for advanced reasoning tasks, 2025b.", + "Weihao Zeng, Yuzhen Huang, Qian Liu, Wei Liu, Keqing He, Zejun Ma, and Junxian He. Simplerl-zoo: Investigating and taming zero reinforcement learning for open base models in the wild, 2025.", + "Di Zhang, Jianbo Wu, Jingdi Lei, Tong Che, Jiatong Li, Tong Xie, Xiaoshui Huang, Shufei Zhang, Marco Pavone, Yuqiang Li, Wanli Ouyang, and Dongzhan Zhou. Llama-berry: Pairwise optimization for o1-like olympiad-level mathematical reasoning, 2024a. URL https://arxiv.org/abs/2410.02884.", + "Hangfan Zhang, Zhiyao Cui, Xinrun Wang, Qiaosheng Zhang, Zhen Wang, Dinghao Wu, and Shuyue Hu. If multi-agent debate is the answer, what is the question? arXiv preprint arXiv:2502.08788, 2025a.", + "Jiayi Zhang, Jinyu Xiang, Zhaoyang Yu, Fengwei Teng, Xionghui Chen, Jiaqi Chen, Mingchen Zhuge, Xin Cheng, Sirui Hong, Jinlin Wang, et al. Aflow: Automating agentic workflow generation. arXiv preprint arXiv:2410.10762, 2024b.", + "Xiaotian Zhang, Chunyang Li, Yi Zong, Zhengyu Ying, Liang He, and Xipeng Qiu. Evaluating the performance of large language models on gaokao benchmark. arXiv preprint arXiv:2305.12474, 2023.", + "Yiqun Zhang, Peng Ye, Xiaocui Yang, Shi Feng, Shufei Zhang, Lei Bai, Wanli Ouyang, and Shuyue Hu. Nature-inspired population-based evolution of large language models. arXiv preprint arXiv:2503.01155, 2025b.", + "Rosie Zhao, Alexandru Meterez, Sham Kakade, Cengiz Pehlevan, Samy Jelassi, and Eran Malach. Echo chamber: Rl post-training amplifies behaviors learned in pretraining, 2025.", + "Yu Zhao, Huifeng Yin, Bo Zeng, Hao Wang, Tianqi Shi, Chenyang Lyu, Longyue Wang, Weihua Luo, and Kaifu Zhang. Marco-ol: Towards open reasoning models for open-ended solutions, 2024. URL https://arxiv.org/abs/2411.14405.", + "Yifei Zhou, Andrea Zanette, Jiayi Pan, Sergey Levine, and Aviral Kumar. Archer: Training language model agents via hierarchical multi-turn rl, 2024.", + "Mingchen Zhuge, Haozhe Liu, Francesco Faccio, Dylan R Ashley, Róbert Csordás, Anand Gopalakrishnan, Abdullah Hamdi, Hasan Abed Al Kader Hammoud, Vincent Herrmann, Kazuki Irie, et al. Mindstorms in natural language-based societies of mind. arXiv preprint arXiv:2305.17066, 2023." + ], + "bbox": [ + 171, + 90, + 825, + 710 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Appendix Table of Contents", + "text_level": 1, + "bbox": [ + 171, + 89, + 410, + 107 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "- A Related work 19", + "bbox": [ + 217, + 114, + 825, + 128 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "-A.1 Single LLM Reasoning 19", + "-A.2MultipleLLMReasoning 20", + "-A.3 Hierarchical Reasoning 20", + "-A.4RL in LLM 21" + ], + "bbox": [ + 245, + 132, + 823, + 193 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "B Limitation and Future Work 21", + "bbox": [ + 215, + 196, + 823, + 210 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "C Supplementary Materials for Method in Section 3 21", + "bbox": [ + 215, + 215, + 823, + 229 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- C.1 Inference-time Scaling For ReMA 21", + "- C.2 Detailed reward design 22", + "- C.3 Pseudocode of ReMA 23", + "- C.4 Brief convergence analysis 23", + "- C.5 Learning to reason from the perspective of Leader Follower Game 24" + ], + "bbox": [ + 245, + 233, + 823, + 309 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "D Training Details 26", + "bbox": [ + 215, + 313, + 823, + 327 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "- D.1 Single-turn ReMA 26", + "bbox": [ + 245, + 330, + 823, + 345 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "\\* D.1.1 Supervised fine-tuning data collection 27", + "\\* D.1.2 Dataset Curation of RewardBench970 27", + "\\*D.1.3 Training on MATH 28", + "\\* D.1.4 Training on Reward Bench 28" + ], + "bbox": [ + 272, + 347, + 823, + 404 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "- D.2 Multi-turn ReMA 28", + "bbox": [ + 245, + 406, + 823, + 419 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "\\* D.2.1 SFT data collection of multi-turn MAMRP 29", + "\\* D.2.2 Training on MATH 29" + ], + "bbox": [ + 272, + 421, + 823, + 450 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "E Other Experiments 29", + "bbox": [ + 215, + 453, + 823, + 467 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "-E.1 Reward functions shape cross-agent behaviors 29", + "- E.2 Detailed Training Curves on Different Datasets of Multi-turn ReMA 30" + ], + "bbox": [ + 245, + 470, + 823, + 501 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "F Qualitative results 30", + "bbox": [ + 215, + 505, + 823, + 518 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- F.1 High-level policy finds better plans 30", + "- F.2 Case study for Experiments of Different Reward Functions in Appendix E.1 .30", + "- F.3 Case study for Adaptive Meta-thinking in Single-Turn ReMA in Section 4.2.2 30" + ], + "bbox": [ + 245, + 522, + 823, + 566 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "G Prompts 31", + "bbox": [ + 215, + 571, + 823, + 585 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A Related work", + "text_level": 1, + "bbox": [ + 171, + 604, + 321, + 619 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Drawing from the bitter lesson [Sutton, 2019], two methods that appear to scale effectively are searching and learning, aligning with current trends in large language models [Xu et al., 2025]. At present, researchers are leveraging these methods to maximize the capabilities of individual transformers, while other efforts are exploring architectures that involve multiple interacting entities. In this paper, we examine this divergence within the context of LLM reasoning, a capability that allows large language models to solve problems through logical reasoning, step-by-step analysis, and inference [Wang et al., 2024a].", + "bbox": [ + 169, + 633, + 826, + 733 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A.1 Single LLM Reasoning", + "text_level": 1, + "bbox": [ + 171, + 747, + 377, + 763 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Main research works in reasoning involving a single LLM utilize search-based and post-training methods. The fundamental elements of searching methods are text generation and evaluation. Generation schemes include In-Context Learning [Brown et al., 2020], Beam Search [Graves, 2012], and various tree-based searching [Snell et al., 2024]; Evaluation approaches often use outcome accuracy, self-consistency [Wang et al., 2022], or process reward signal [Lightman et al., 2023] as the criteria to select high-quality responses from the generated texts. Post-training method is another research line in opposition to pre-training. Popular training pipelines often involve specific data construction followed by Supervised Fine-tuning [Qin et al., 2024, Ouyang et al., 2022, Hui et al., 2024, Liu et al., 2024], or reinforcement learning to interactively explore learning patterns [Wang et al., 2024a, Zhang et al., 2024a, DeepSeek-AI et al., 2025, Xu et al., 2025].", + "bbox": [ + 169, + 772, + 826, + 912 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A.2 Multiple LLM Reasoning", + "text_level": 1, + "bbox": [ + 174, + 92, + 393, + 104 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Integrating multiple entities can potentially surpass the intelligence of the individual model [Chen et al., 2023]. With the rapid emergence of large language models showing a varying level of abilities, some studies have explored facilitating discussions among multiple off-the-shelf LLMs [Zhang et al., 2025a, Chen et al., 2024a, Wang et al., 2023, Du et al., 2023, Zhuge et al., 2023, Tang et al., 2023, Hao et al., 2025, Akata et al., 2023, Hong et al., 2023, Zhang et al., 2024b], taking the form of free discussion [Du et al., 2023, Liang et al., 2023] or structured role assignments [Hong et al., 2023, Zhang et al., 2024b]. Some have applied routing mechanisms to assign tasks to the most suitable expert models [Hu et al., 2024b, Stripelis et al., 2024, Ding et al., 2024, Yue et al., 2025a, Chen et al., 2024c] or merging mechanisms to develop more versatile models [Yadav et al., 2024, Yu et al., 2024, Zhang et al., 2025b]. Beyond aggregating static knowledge from multiple agents, multi-agent LLM training can also enhance reasoning capabilities. For example, multi-agent debates can generate diverse synthetic data, which can subsequently be used for supervised fine-tuning [Estornell et al., 2024, Li et al., 2024, Motwani et al., 2024, Dong and Ma, 2025, Perez et al., 2022, Ye et al., 2025a, Subramaniam et al., 2025]. Reinforcement learning (RL) methods have also been adopted to improve LLM reasoning in areas such as alignment [Perez et al., 2022, Ma et al., 2023] and legibility [Kirchner et al., 2024]. Motwani et al. [2024] utilize a three-agent system for generation and fine-tune the models using Direct Preference Optimization (DPO). Reinforcement Learning with Generative Reward Models (GenRM) [Mahan et al., 2024, Ye et al., 2025b, Jiao et al., 2024, Wang et al., 2024b] represents another common approach of multi-agent training, where the reward signal is derived from the token probabilities of another LLM, coupled with the reasoning process. While our work aligns with these efforts, it diverges by using an additional tunable LLM to provide metacognitive instructions, guiding the low-level LLM during learning, rather than relying on a static GenRM. The most closely related works to ours are MAPoRL [Park et al., 2025] and COPYR [Ma et al., 2024]. MAPoRL is a multi-agent debating framework that uses multi-agent reinforcement learning (MARL) with a learned verifier to fine-tune each LLM agent. COPYR duplicates an LLM into two agents, training them simultaneously in the roles of pioneer and observer using RL. Shen et al. [2025] trained with a novel Chain-of-Action-Thought (COAT) framework that embeds meta-action tokens for self-reflection and exploration into an autoregressive search process. However, unlike our approach, which explicitly separates metacognition from plan execution, these methods do not decompose the reasoning process but instead focus on improving direct chain-of-thought generation. Furthermore, our experiments are conducted on a larger scale and include more challenging problems.", + "bbox": [ + 174, + 119, + 823, + 561 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "A.3 Hierarchical Reasoning", + "text_level": 1, + "bbox": [ + 174, + 592, + 379, + 604 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Partitioning reasoning into hierarchical processes has been explored in prior research to make biological sense [Ye et al., 2018, Langley et al., 2004]. In the context of language models, a hierarchical structure has been used to facilitate diverse reasoning patterns, including planning [Puerta-Merino et al., 2025, Sun et al., 2024, Song et al., 2023, Rana et al., 2023, Chen et al., 2024d, Yan et al., 2023, Xiao et al., 2024], validation [Haji et al., 2024, Xi et al., 2024] and self-refinement [Madaan et al., 2023, Kumar et al., 2024, Welleck et al., 2022]. For instance, EvalPlanner [Saha et al., 2025b] is a framework that conducts reasoning through plan generation and execution. DOTS [Yue et al., 2024] extends decomposition by integrating a tree-based searching method with Analysis, Solution, and Verification layers. Marco-o1 [Zhao et al., 2024] focuses on open-ended problem-solving and abstract thinking, dynamically adjusting reasoning granularity and incorporating reflection mechanisms to enhance reasoning performance. Beyond these approaches, metacognition [Flavell, 1979] has been identified as another critical component of reasoning, referring to the intuitive understanding of one's own cognitive and reasoning processes [Gao et al., 2024, Wang and Zhao, 2023]. Wang and Zhao [2023] proposed a metacognitive prompting strategy to improve large language model (LLM) capabilities. Didolkar et al. [2024] further developed a prompt-guided method that enables models to label math problems with the required skills and subsequently use these labels to solve new problems. Gao et al. [2024] introduce meta-reasoner which use contextual multi-arm bandit to learn a high-level \"advisor\" over low-level reasoning process. Xiang et al. [2025] provides a Meta-CoT framework to think about its own thinking. They use construction-based methods as well as reinforcement learning to develop meta-cognitive skills. Qingsong et al. [2025] introduces a RL framework for dynamic instruction selection during fine-tuning. In our work, we also value reflect-", + "bbox": [ + 174, + 619, + 823, + 910 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 490, + 936, + 506, + 946 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "ing on reasoning processes, and we enhance metacognitive abilities through two-agent interaction and reinforcement learning at both end.", + "bbox": [ + 169, + 90, + 823, + 122 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "A.4 RL in LLM", + "text_level": 1, + "bbox": [ + 171, + 137, + 302, + 152 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Recent advancements in applying RL to LLMs have enhanced their reasoning and decision-making capabilities. Liu et al. [2025] examines token-level optimization biases by introducing Dr. GRPO to stabilize policy gradients. VAPO [Yue et al., 2025b] enhances PPO with value-aware perturbations and adaptive reward shaping to improve robustness in sparse-reward reasoning tasks. DAPO [Yu et al., 2025] provides a scalable, modular RL framework that integrates distributed rollout collection and dynamic replay buffers for reproducible training at scale. SimpleRL-Zoo [Zeng et al., 2025] conducts zero-shot RL experiments across open-base LLMs to uncover emergent cognitive behaviors under minimal reward signals. Echo Chamber [Zhao et al., 2025] investigates how RL fine-tuning algorithms can amplify pretrained model biases and proposes regularization to mitigate over-amplification. Wen et al. [2024] decomposes high-level language actions into token-level operations to achieve finer-grained credit assignment. Some works push RL training for single-turn to multi-turn. Search-R1 [Jin et al., 2025] trains LLMs to orchestrate multi-turn search strategies with RL-optimized decision policies to improve question-answering accuracy. ArCHer [Zhou et al., 2024] employs a hierarchical, multi-turn RL architecture with manager and worker policies to efficiently handle long-horizon dialogue tasks. RAGEN [Wang et al., 2025] introduces trajectory filtering and critic modules within a multi-turn RL framework to stabilize learning and reduce shallow policy behaviors.", + "bbox": [ + 169, + 162, + 826, + 398 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "B Limitation and Future Work", + "text_level": 1, + "bbox": [ + 171, + 419, + 452, + 435 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In this work, we only test ReMA on math and LLM-as-a-Judge benchmarks. Though the results show the effectiveness of ReMA, adopting ReMA to tasks where naturally needs multi-turn interaction between several interleaved agents has great potential. Moreover, a comprehensive understanding of the learning dynamics of multi-turn RL and multi-turn MARL for LLMs is needed. Finally, there's still sufficient space to further improve the procedure of multi-turn multi-agent rollout through modern LLM speed up techniques, e.g. prefetch-decode disaggregation and asynchronous rollout.", + "bbox": [ + 169, + 450, + 823, + 547 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "C Supplementary Materials for Method in Section 3", + "text_level": 1, + "bbox": [ + 171, + 568, + 625, + 585 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "C.1 Inference-time Scaling of ReMA", + "text_level": 1, + "bbox": [ + 171, + 599, + 442, + 616 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In this section, we discuss how to enhance the inference-time computation of our hierarchical system, specifically focusing on the interaction between the high-level and low-level agents. The total number of model samples required for inference is determined by the product of the sampling budget allocated to each agent.", + "bbox": [ + 169, + 626, + 823, + 681 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "For instance, in a simple single-turn setting, if the high-level agent samples $k_{1}$ responses and each of these responses leads to $k_{2}$ samples from the low-level agent, the total number of model calls required is:", + "bbox": [ + 169, + 688, + 823, + 729 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\n\\text {T o t a l s a m p l e s} = k _ {1} \\times k _ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 411, + 731, + 583, + 746 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Given a fixed computational budget, an important question arises: how should the sampling budget be distributed between the high-level and low-level agents to maximize performance? Allocating more samples to the high-level agent may increase diversity in reasoning strategies while allocating more to the low-level agent may yield more refined solutions for a given metacognitive plan.", + "bbox": [ + 169, + 752, + 823, + 821 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Another crucial consideration is how to perform reranking on the final outputs. Two potential strategies include:", + "bbox": [ + 169, + 828, + 823, + 856 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "- Hierarchical reranking: First, for each high-level response, rank and aggregate the low-level responses under it. Then, rank the aggregated results across different high-level responses.", + "bbox": [ + 215, + 869, + 823, + 912 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "- Flat reranking: Directly rank all sampled responses together, regardless of the hierarchy of high-level reasoning steps.", + "bbox": [ + 215, + 90, + 823, + 119 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Balancing sampling allocation and designing an effective reranking strategy are key challenges in efficiently scaling our multi-agent reasoning system. In the next section, we explore empirical results comparing different allocation strategies and ranking methods.", + "bbox": [ + 169, + 131, + 825, + 174 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "C.2 Detailed reward design", + "text_level": 1, + "bbox": [ + 171, + 189, + 377, + 205 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "As described in Sec. 3.2, we update both high-level and low-level agents by assigning rewards based on the low-level policy output. Below, we outline several potential reward designs:", + "bbox": [ + 169, + 214, + 823, + 244 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Correctness reward: For tasks with explicit ground truth, we assign rewards based on the correctness of the low-level agent's output.", + "- Format reward: For tasks that require a specific output format, we enforce adherence to the prescribed structure by providing a format reward.", + "- To encourage the high-level agent to generate informative and unambiguous meta-thinking, and to stabilize the low-level outputs, we reward the high-level agent when the low-level agent produces consistent responses. Specifically, the consistency reward is defined as" + ], + "bbox": [ + 215, + 253, + 823, + 361 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\nR _ {h} = \\frac {\\text {m a x o c c u r r e n c e o f a n a n s w e r}}{\\text {t o t a l n u m b e r o f r e s p o n s e s}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 403, + 366, + 651, + 398 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "To examine multi-agent metacognition-integrated reasoning with different reward designs, we experiment with different reward function designs to encourage effective collaboration and structured reasoning. Below, we introduce and justify several reward schemes.", + "bbox": [ + 169, + 406, + 823, + 450 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "1. Correctness and Format-Aware Reward (Base Setting) In our primary reward setting, the system's overall correctness is used as the primary reward signal, supplemented by format-based rewards for both the high-level and low-level agents. Using mathematical problem-solving as an example:", + "bbox": [ + 169, + 462, + 825, + 518 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "- Low-level agent $(\\pi_{\\theta_l})$ : Receives a reward of $+1.0$ for a correct answer. If the answer is incorrect, the agent is further penalized based on format compliance. Specifically:", + "bbox": [ + 215, + 530, + 823, + 560 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- If the output contains the designated answer-indicating format (e.g., boxed in Latex), it receives $-0.5$ .", + "- Otherwise, it receives $-1.0$ , as a missing format often suggests an incomplete or unstructured response." + ], + "bbox": [ + 243, + 561, + 823, + 619 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "- High-level agent $(\\pi_{\\theta_h})$ : Receives the average correctness of the low-level agent's sampled responses as its reward. Additionally, to prevent the high-level agent from directly generating explicit answers instead of guiding reasoning, a strong penalty of $-1.0$ is applied if it includes an explicit answer format (e.g., boxed).", + "bbox": [ + 215, + 625, + 825, + 681 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "2. Consistency-Based Reward Instead of using correctness as the high-level reward signal, this approach rewards the high-level agent for promoting consistent responses from the low-level agent, regardless of actual correctness. The consistency reward is defined as the proportion of the most frequently occurring answer among all sampled responses:", + "bbox": [ + 169, + 694, + 825, + 752 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\nR _ {h} = \\frac {\\text {m a x o c c u r r e n c e o f a n a n s w e r}}{\\text {t o t a l n u m b e r o f r e s p o n s e s}} \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 375, + 767, + 823, + 799 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "If the majority of responses do not contain a definitive answer, the reward is set to zero. We also add the format penalty to the high-level agent if its output contains the designated answer-indicating format. This incentivizes the high-level agent to guide the low-level agent toward more stable, detailed, reproducible outputs rather than erratic reasoning paths.", + "bbox": [ + 169, + 806, + 823, + 864 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "These different reward formulations allow us to investigate various dimensions of metacognitive reasoning: correctness, consistency, etc. We empirically compare their effects on learned metacognitive reasoning patterns in Sec. E.1.", + "bbox": [ + 169, + 869, + 823, + 912 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/315cafbb1d2055dded17eaabf88749be03cfea772911a86fa7b194d2357e1c64.jpg", + "image_caption": [ + "Figure 7: Our method can be viewed as a combination of practical TRPO and block coordinate ascent, with the high and low-level models treated as distinct components within a larger neural network. Note that the figure does not represent the exact gradient back-propagation flow but rather highlights the key idea that we separate the high- and low-level models. This separation allows for the independent computation of gradients and the independent training of each model." + ], + "image_footnote": [], + "bbox": [ + 200, + 95, + 763, + 306 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "C.3 Pseudocode of ReMA", + "text_level": 1, + "bbox": [ + 171, + 422, + 366, + 435 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "The pseudocode is shown in Algorithm 1.", + "bbox": [ + 171, + 450, + 447, + 465 + ], + "page_idx": 22 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 Single turn MAMRP" + ], + "code_body": "Require: High-level policy $\\pi_h$ , Low-level policy $\\pi_l$ , Dataset $\\mathcal{D}$ , Optimizers for $\\pi_h$ and $\\pi_l$ . $\\varepsilon_{\\mathrm{min}}, \\varepsilon_{\\mathrm{max}}$ to filter training dataset \n1: Initialize $\\pi_h$ and $\\pi_l$ \n2: while not converged do \n3: build training dataset $\\mathcal{D}_l$ with $\\pi_h, \\pi_l, \\varepsilon_{\\mathrm{min}}, \\varepsilon_{\\mathrm{max}}$ \n4: for Sample $(\\mathbf{x}, \\mathbf{m}, \\mathbf{y}^*) \\sim \\mathcal{D}_l$ do \n5: Generate $\\mathbf{y} \\sim \\pi_l(\\mathbf{x}, \\mathbf{m})$ \n6: Compute low-level reward $R_l(\\mathbf{y}, \\mathbf{y}^*)$ \n7: Update $\\pi_l$ using $\\nabla_{\\theta_l} \\mathbb{E}[R_l]$ \n8: end for \n9: build training dataset $\\mathcal{D}_h$ with $\\pi_h, \\pi_l, \\varepsilon_{\\mathrm{min}}, \\varepsilon_{\\mathrm{max}}$ \n10: for Sample $(\\mathbf{x}, \\mathbf{y}^*) \\sim \\mathcal{D}_h$ do \n11: Generate $\\mathbf{m} \\sim \\pi_h(\\mathbf{x})$ and $\\mathbf{y} \\sim \\pi_l(\\mathbf{x}, \\mathbf{m})$ \n12: Compute high-level reward $R_h(\\mathbf{m}, \\mathbf{y}, \\mathbf{y}^*)$ \n13: Update $\\pi_h$ using $\\nabla_{\\theta_h} \\mathbb{E}[R_h]$ \n14: end for \n15: end while", + "bbox": [ + 173, + 507, + 823, + 743 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "C.4 Brief convergence analysis", + "text_level": 1, + "bbox": [ + 171, + 792, + 401, + 806 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "We reuse the notations from Sec. 3.2, where $\\mathbf{x}$ is task prompt, $\\mathbf{y}$ is generated answer, $\\mathbf{y}^*$ is groundtruth, $\\mathbf{m}$ is metacognition on task solving, $\\pi_{\\theta_h}$ and $\\pi_{\\theta_l}$ are high- and low-level agents with parameters $\\theta_h$ and $\\theta_l$ . We consider the joint hierarchical policy defined in Eq. (8) and update the objective as in Eq. (9).", + "bbox": [ + 169, + 821, + 823, + 878 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "To leverage existing RL and optimization convergence analysis methods, we treat the two models as components of a larger model, as illustrated in Fig. 7. When updating one model, we treat the other", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "model as part of a stationary environment. The gradients with respect to $\\theta_h$ and $\\theta_l$ are:", + "bbox": [ + 171, + 90, + 740, + 106 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {\\theta_ {h}} J (\\theta_ {h}, \\theta_ {l}) = \\mathbb {E} _ {\\mathbf {x}, \\mathbf {y} ^ {*}} \\sum_ {\\mathbf {m} \\sim \\pi_ {h} (\\mathbf {m} | \\mathbf {x}; \\theta_ {h})} \\nabla_ {\\theta_ {h}} \\pi_ {h} (\\mathbf {m} | \\mathbf {x}; \\theta_ {h}) \\left[ \\mathbb {E} _ {\\mathbf {y} \\sim \\pi_ {l} (\\mathbf {y} | \\mathbf {x}, \\mathbf {m})} R (\\mathbf {y}, \\mathbf {y} ^ {*}) \\right],\n$$\n", + "text_format": "latex", + "bbox": [ + 225, + 112, + 767, + 146 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {\\theta_ {l}} J (\\theta_ {h}, \\theta_ {l}) = \\mathbb {E} _ {\\mathbf {x}, \\mathbf {y} ^ {*}} \\sum_ {\\mathbf {y} \\sim \\pi (\\theta_ {h}, \\theta_ {l})} \\nabla_ {\\theta_ {l}} \\pi_ {l} (\\mathbf {y} \\mid \\mathbf {x}, \\mathbf {m}; \\theta_ {h}); \\theta_ {l}) R (\\mathbf {y}, \\mathbf {y} ^ {*}).\n$$\n", + "text_format": "latex", + "bbox": [ + 228, + 148, + 671, + 183 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "We can compute the gradients with log trick and estimate $\\mathbb{E}_{\\mathbf{y}\\sim \\pi_l(\\mathbf{y}|\\mathbf{x},\\mathbf{m})}R(\\mathbf{y},\\mathbf{y}^*)$ with Monte Carlo method.", + "bbox": [ + 169, + 188, + 823, + 215 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Equipped with the objective function and gradient computation, we update the models iteratively. Without loss of generality, we analyze the case where the high-level policy is updated first:", + "bbox": [ + 169, + 223, + 823, + 252 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\n\\theta_ {h} ^ {(t + 1)} = \\arg \\max _ {\\theta_ {h}} J (\\theta_ {h}, \\theta_ {l} ^ {(t)}),\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 256, + 593, + 282 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\n\\theta_ {l} ^ {(t + 1)} = \\arg \\max _ {\\theta_ {l}} J \\left(\\theta_ {h} ^ {(t + 1)}, \\theta_ {l}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 392, + 285, + 604, + 310 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Regarding the different regularizations $R_{h}$ and $R_{l}$ in Eqs. (10) and (11) for the different policies, instead of directly integrating them into the loss function, we treat them as constraints, as done in Trust Region Policy Optimization (TRPO) [Schulman et al., 2015]. Note that when one policy is fixed, the other policy operates in a stationary decision process.", + "bbox": [ + 169, + 316, + 823, + 372 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Based on the defined objective and update method, we apply TRPO and block coordinate ascent. First, recall that when updating a single policy, TRPO guarantees monotonic improvement by optimizing a lower bound. Specifically, let $\\pi_{\\mathrm{old}}$ and $\\pi$ represent the old and current policies, respectively. We define a surrogate objective as:", + "bbox": [ + 169, + 378, + 823, + 435 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\pi_ {\\mathrm {o l d}}} (\\pi) = \\mathbb {E} _ {s \\sim \\pi_ {\\mathrm {o l d}}, a \\sim \\pi_ {\\mathrm {o l d}}} \\left[ \\frac {\\pi (a | s)}{\\pi_ {\\mathrm {o l d}} (a | s)} A ^ {\\pi_ {\\mathrm {o l d}}} (s, a) \\right],\n$$\n", + "text_format": "latex", + "bbox": [ + 334, + 440, + 661, + 474 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "As shown by Schulman et al. [2015], the true objective of $\\pi$ is lower-bounded by:", + "bbox": [ + 171, + 477, + 707, + 492 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\nJ (\\pi) \\geq L _ {\\pi_ {\\mathrm {o l d}}} (\\pi) - C \\cdot \\max _ {s} \\mathrm {K L} [ \\pi_ {\\mathrm {o l d}} (\\cdot | s), \\pi (\\cdot | s) ],\n$$\n", + "text_format": "latex", + "bbox": [ + 323, + 496, + 671, + 518 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "for some constant $C$ . By optimizing the right-hand side of the above inequality, we are guaranteed to improve the performance of $\\pi$ . Therefore, for policies $\\pi^t$ and $\\pi^{t + 1}$ obtained from iterations $t$ and $t + 1$ using the TRPO method, we have:", + "bbox": [ + 169, + 523, + 823, + 565 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\nJ (\\pi^ {t + 1}) \\geq J (\\pi^ {t}).\n$$\n", + "text_format": "latex", + "bbox": [ + 434, + 569, + 558, + 588 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Now, returning to our updating method, we treat the high- and low-level policies as two blocks of a single agent. The iterative update process can thus be viewed as a cyclic block coordinate ascent, where the two policies are updated in a fixed order. By updating each block using the TRPO method, and improving the surrogate objective within the KL constraint, each block update does not decrease $J$ :", + "bbox": [ + 169, + 598, + 823, + 666 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\nJ \\left(\\theta_ {h} ^ {t + 1}, \\theta_ {l} ^ {t}\\right) \\geq J \\left(\\theta_ {h} ^ {t}, \\theta_ {l} ^ {t}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 398, + 672, + 566, + 689 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\nJ \\left(\\theta_ {h} ^ {t + 1}, \\theta_ {l} ^ {t + 1}\\right) \\geq J \\left(\\theta_ {h} ^ {t + 1}, \\theta_ {l} ^ {t}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 691, + 596, + 710 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Thus $J(\\theta_h^{t + 1},\\theta_l^{t + 1})\\geq J(\\theta_h^t,\\theta_l^t)$ . This repeated coordinate maximization converges to a fixed point, where no single coordinate update can further improve $J(\\theta_h,\\theta_l)$ .", + "bbox": [ + 169, + 722, + 823, + 753 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Given the theoretical monotonic improvement with TRPO and block coordinate ascent, we adopt a practical version of TRPO in our experiments, specifically Proximal Policy Optimization (PPO) [Schulman et al., 2017] or GRPO [Shao et al., 2024].", + "bbox": [ + 169, + 758, + 823, + 801 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "C.5 Learning to reason from the perspective of Leader Follower Game", + "text_level": 1, + "bbox": [ + 169, + 816, + 678, + 830 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Besides the loss function in the main part, we also propose to frame the problem as a leader-follower game. By analyzing the equilibria of the leader-follower game, we demonstrate that our framework inherently identifies the optimal sub-tasks aligned with the capabilities of the low-level model. This ensures that the high-level decisions are guided by the low-level model's strengths, leading to more efficient and targeted task decomposition.", + "bbox": [ + 169, + 842, + 825, + 912 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "C.5.1 Leader-follower game", + "text_level": 1, + "bbox": [ + 171, + 90, + 382, + 107 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "The leader-follower game, also known as the Stackelberg game, models interaction between two agents with parametrized strategies $\\pmb{\\theta} = (\\pmb{\\theta}_1, \\pmb{\\theta}_2)$ and differentiable objective functions $(\\mathcal{L}_1, \\mathcal{L}_2): \\mathbb{R}^d \\to \\mathbb{R}$ . In this framework, the leader announces its strategy first, and the follower observes this decision to respond optimally. This sequential structure enables the leader to anticipate the follower's reaction and adjust its strategy accordingly. A Stackelberg equilibrium occurs when neither agent can unilaterally improve its objective. Denoting $\\pmb{\\theta}_1$ as the leader's strategy and $\\pmb{\\theta}_2$ as the follower's, the loss functions $\\mathcal{L}_1$ and $\\mathcal{L}_2$ are optimized with the following bi-level structure:", + "bbox": [ + 169, + 114, + 823, + 213 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\theta} _ {1} ^ {*} = \\operatorname {a r g m i n} _ {\\boldsymbol {\\theta} _ {1}} \\mathcal {L} _ {1} (\\boldsymbol {\\theta}, \\boldsymbol {\\theta} _ {2} ^ {*} (\\boldsymbol {\\theta} _ {1})), \\quad \\boldsymbol {w} _ {2} ^ {*} (\\boldsymbol {\\theta} _ {1}) = \\operatorname {a r g m i n} _ {\\boldsymbol {\\theta} _ {2}} \\mathcal {L} _ {2} (\\boldsymbol {\\theta} _ {1}, \\boldsymbol {\\theta} _ {2}).\n$$\n", + "text_format": "latex", + "bbox": [ + 281, + 218, + 712, + 237 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Anil et al. [2021] apply the leader-follower game to ensure checkable answers in a prover-verifier game (PVG). The objective is a verifier that is both complete (accepts all correct proofs from a verifier) and sound (rejects all incorrect proofs from a verifier). They analyze different scenarios where the verifier acts as the leader, the prover as the follower, and both announce strategies simultaneously, forming a Nash equilibrium. The study concludes that in verifier-led SVG, a Stackelberg equilibrium is both necessary and sufficient for achieving a sound and complete verifier, whereas in other configurations, a Stackelberg equilibrium is not necessary or sufficient for this outcome.", + "bbox": [ + 169, + 246, + 823, + 344 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "C.5.2 Efficacy of LLM", + "text_level": 1, + "bbox": [ + 171, + 358, + 344, + 372 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Because the high-level policy possesses strong generalization capabilities, it is impractical for it to exhaustively explore every potential sub-task for each question. Instead, it naturally focuses on tasks within a feasible range of difficulty, leveraging only a limited set of coarse planning actions. Rather than pinpointing perfectly tailored sub-tasks, the policy searches for general tasks of particular computational complexity, i.e., difficulty, that it can handle reliably. Motivated by this perspective, we incorporate the concept of a reasoning boundary for large language models (LLMs) [Chen et al., 2024b]. Intuitively, the reasoning boundary circumscribes the maximum difficulty of problems a model can solve at a desired accuracy level. Formally, for a model $\\theta$ , a task $t$ , and a predefined threshold $A$ , the reasoning boundary of $\\theta$ represents the maximum problem difficulty $d$ that satisfies:", + "bbox": [ + 169, + 381, + 823, + 518 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {B} _ {A c c = A} (t | \\theta) = \\sup _ {d} \\{d | A c c (t | d, \\theta) = A \\}.\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 523, + 635, + 547 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "where $d$ denotes the problem difficulty. By quantifying the difficulty level a model can reliably handle, the reasoning boundary provides a systematic way to align the high-level policy's focus with the model's actual capabilities, gauge the efficacy of the low-level policy, and determine the optimal strategy for solving the question.", + "bbox": [ + 169, + 554, + 823, + 609 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "C.5.3 Leader-follower Game for LLM Reasoning", + "text_level": 1, + "bbox": [ + 169, + 625, + 529, + 640 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Our goal is to find the high-level policy that searches for the sub-task sequence based on the efficacy of the low-level policy to solve the question. We design the loss functions as follows:", + "bbox": [ + 169, + 648, + 823, + 676 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {h} = \\mathbb {E} _ {(x, y) \\sim p _ {D}, t _ {1: K}} \\left[ - \\log \\pi_ {l} \\left(y _ {K} \\mid x, t _ {1: K}, y _ {1: K - 1}\\right) \\right],\n$$\n", + "text_format": "latex", + "bbox": [ + 313, + 683, + 669, + 699 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {l} = \\mathbb {E} _ {x \\sim p _ {D}, t _ {1: k} \\sim \\pi_ {h}, \\hat {y} _ {k} \\sim \\pi_ {l}} \\left[ - r \\left(y _ {k}, \\hat {y} _ {k} \\mid x, t _ {1: k}, y _ {1: k - 1}\\right) \\right],\n$$\n", + "text_format": "latex", + "bbox": [ + 313, + 702, + 683, + 717 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "where $r(y_k, \\hat{y}_k \\mid x, t_{1:k}, y_{1:k-1})$ represents the step reward for the correctness of $\\hat{y}_k$ derived from the question $x$ , the sub-task sequence $t_{1:k}$ from the high policy and prior intermediate answer $y_{1:k-1}$ . The loss functions can be interpreted as follows: the high-level policy is incentivized to find subtasks that lead to the correct answer based on the capabilities of the low-level policy, while the low-level policy is incentivized to enhance its instruction-following ability.", + "bbox": [ + 169, + 722, + 823, + 792 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "How to minimize the loss functions and whether such minimization leads to the desired results remain questions. To explore this, we consider a simplified case of our method, where the high-level policy plans the complete sub-task sequence at the beginning and the low-level executes the instruction in a single interaction. The corresponding parameterized policies are defined as $\\pi_h((t_1,\\ldots ,t_K)\\mid x)$ and $\\pi_l((\\hat{y}_1,\\dots ,\\hat{y}_K)\\mid x,(t_1,\\dots ,t_K))$ . The corresponding loss functions are:", + "bbox": [ + 169, + 797, + 823, + 869 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {h} = \\mathbb {E} _ {(x, y) \\sim p _ {D}, t _ {1: K}} \\left[ - \\log \\pi_ {l} \\left(y _ {K} \\mid x, t _ {1: K}\\right) \\right], \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 313, + 875, + 821, + 888 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {l} = \\mathbb {E} _ {x \\sim p _ {D}, t _ {1: k} \\sim \\pi_ {h}, \\hat {y} _ {k} \\sim \\pi_ {l}} \\left[ - r \\left(y _ {k}, \\hat {y} _ {k} \\mid x, t _ {1: k}, y _ {1: k - 1}\\right) \\right]. \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 313, + 893, + 821, + 907 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "In this step, the high-level policy generates the entire sub-task sequence without relying on intermediate answers, while the low-level policy follows the sequence to produce answers for the sub-tasks. The low-level policy can still leverage prior intermediate answers to sequentially refine its responses.", + "bbox": [ + 169, + 90, + 823, + 133 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "To analyze the result agents by minimizing the loss functions, we adopt the completeness and soundness properties from the PVG framework for LLM reasoning. Specifically, if the high-level policy generates a sub-task sequence that is executable within the low-level policy's capabilities, the problem must be solved (completeness). Conversely, if the sub-task sequence is incorrect or beyond the low-level policy's capacity, the problem cannot be solved (soundness). To achieve this, we utilize the conclusion from Anil et al. [2021], which positions the low-level policy as the leader and the high-level policy as the follower, equilibria guarantee the complete and sound low-level policy.", + "bbox": [ + 169, + 138, + 823, + 238 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "When the high-level policy takes the lead, the low-level policy is forced to adapt to the specific strategy defined by the high-level policy, which can result in neither complete nor sound low-level policy. For example, if the high-level policy dictates that it will only generate sub-tasks involving addition and subtraction, the low-level policy is constrained to optimize only for these tasks. While they may reach an equilibrium, the low-level policy remains incomplete, and this limitation impacts both policies. In the case of the simultaneous PVG game, convergence to a Nash equilibrium is possible, but it is not sufficient for completeness and soundness. For instance, the low-level policy might disregard the high-level policy entirely (e.g., if the high-level provides incorrect instructions, but the low-level still performs correctly). This approach, however, is challenging to implement due to the significantly larger search space involved.", + "bbox": [ + 169, + 242, + 826, + 381 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Furthermore, the loss functions we design ensure that, at a Stackelberg equilibrium, the high-level policy identifies sub-task sequences that the low-level policy can execute to solve the problem with the highest probability. With the low-level policy acting as the leader, it establishes its reasoning boundary for tasks. Based on the reasoning boundary, let $\\theta_h$ and $\\theta_l$ represent the policy parameters for the high-level and low-level policies, respectively. The probability that the low-level policy correctly solves the question is defined as:", + "bbox": [ + 169, + 387, + 823, + 470 + ], + "page_idx": 25 + }, + { + "type": "equation", + "text": "\n$$\n\\pi_ {l} \\left(y _ {K} \\mid x, t _ {1: K}\\right) = \\prod_ {k = 1} ^ {K} \\operatorname {A c c} \\left(t _ {k} \\mid x, \\theta_ {l}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 476, + 627, + 517 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "where we can compute the difficulty $d_{k}$ from $t_k$ and $x$ . where the difficulty $d_{k}$ can be derived from $t_k$ and $x$ . The loss function in Eq. (15) ensures that the selected sub-tasks are optimal for the low-level policy. Here we provide a theoretical condition under which the most efficient solution strategy can be identified, according to the efficacy of the LLM.", + "bbox": [ + 169, + 523, + 823, + 579 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "This approach can be viewed as a game between a high-level \"prover\" and a low-level \"verifier\". The verifier, representing the low-level policy, adheres the high-level policy's instructions to validate its reasoning. Unlike the classic PVG setting, where the prover has ground-truth labels, the label of our high-level policy depends on the tunable low-level policy. This distinction, where the low-level policy (leader) is inherently more complex, contrasts with traditional PVG setups and adds complexity due to the interdependence between the high- and low-level policies.", + "bbox": [ + 169, + 585, + 823, + 669 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "By framing the problem-solving process as a leader-follower game, with the low-level policy designated as the leader, we can construct a bi-level optimization problem to identify an equilibrium. Following the formulation in Sec. C.5.1, the problem is expressed as:", + "bbox": [ + 169, + 674, + 823, + 717 + ], + "page_idx": 25 + }, + { + "type": "equation", + "text": "\n$$\n\\theta_ {l} ^ {*} = \\underset {\\theta_ {l}} {\\arg \\min } \\mathcal {L} _ {l} (\\theta_ {h} ^ {*} (\\theta_ {l}), \\theta_ {l}) \\quad \\theta_ {h} ^ {*} (\\theta_ {l}) = \\underset {\\theta_ {l}} {\\arg \\min } \\mathcal {L} _ {h} (\\theta_ {h}, \\theta_ {l}).\n$$\n", + "text_format": "latex", + "bbox": [ + 297, + 720, + 696, + 747 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Then we can apply bi-level optimization techniques.", + "bbox": [ + 171, + 752, + 517, + 767 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "D Training Details", + "text_level": 1, + "bbox": [ + 171, + 786, + 346, + 803 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "D.1 Single-turn ReMA", + "text_level": 1, + "bbox": [ + 171, + 816, + 346, + 830 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "We refer to Appendix G for prompts we use during training. We implement the training pipeline with OpenRLHF [Hu et al., 2024a] which is a highly efficient codebase and is easy to scale up. We select REINFORCE++ to save resources and for efficient training. All experiments are conducted in a node of 8 NVIDIA A100 GPUs. We use bf16, Zero2, Flash-Attention and gradient checkpointing to run our experiments.", + "bbox": [ + 169, + 840, + 823, + 912 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "During rollout, we set temperature $= 1.0$ , top_p $= 1.0$ , top_k $= -1$ , and use vLLM for inference acceleration. We set the max generation length to be 2048 and, the rollout batch size to be 1000. The number of samples per prompt is 4. During training, we use Adam Optimizer with a learning rate of 5e-7. We set the mini-batch size to be 500, and the clip ratio to be 0.2. Other hyperparameters, such as KL coefficients and the number of training episodes, were carefully tuned based on validation set performance to ensure robust and reliable results. To align with the hyperparameter in OpenRLHF, we use #Training Episode as the number of reinforcement learning epoch on the entire dataset.", + "bbox": [ + 169, + 90, + 823, + 188 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "In ReMA, during prompt filtering of the high-level model, the high-level agent first samples 10 candidates for each question with $t = 1.0$ , and for each output the low-level agents sample 1 solution with $t = 0.0$ , then we select questions of success rate between $[\\varepsilon_{\\mathrm{min}}, \\varepsilon_{\\mathrm{max}}]$ . And for the low-level agent's prompt filtering, the high-level agent first samples 1 candidate for each question with $t = 0.0$ and for each output the low-level agents sample 10 solutions with $t = 1.0$ , then we select questions of success rate between $[\\varepsilon_{\\mathrm{min}}, \\varepsilon_{\\mathrm{max}}]$ and use the high-level agent to sample 4 meta-thoughts with $t = 1.0$ as the input.", + "bbox": [ + 169, + 194, + 826, + 294 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "D.1.1 Supervised fine-tuning data collection", + "text_level": 1, + "bbox": [ + 171, + 306, + 491, + 321 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "For experiments in Sec. 4.2.1, we collect expert data to enhance the reasoning pattern, i.e. $RL$ from SFT. Specifically, we collect demonstration data from GPT-4o Mini on MATH training dataset (7.5k problems) Hendrycks et al. [2021] and use it to fine-tune the LLMs. The data generation follows these steps: First, we prompt GPT-4o Mini to produce metacognitive reasoning for high-level model training. Specifically, we use different prompts to instruct it to rewrite and decompose a given question without providing a final answer. We collect metacognitive reasoning using two predefined actions, \"rewrite\" and \"decompose\", which align with human approaches to complex problem-solving while preserving answer diversity. Next, we use the generated instructions to prompt GPT-4o Mini to follow the metacognitive steps and solve the question, obtaining SFT data for low-level policy training. Below, we present the prompts used for both high-level and low-level models. Prompts can be found in Appendix G.1.1.", + "bbox": [ + 169, + 329, + 826, + 482 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "D.1.2 Dataset Curation of RewardBench970", + "text_level": 1, + "bbox": [ + 171, + 496, + 493, + 510 + ], + "page_idx": 26 + }, + { + "type": "table", + "img_path": "images/13ee9b755412f0ddda27dfb3a4338a0ead1c5a78cddf2ef644b3f886d89c5815.jpg", + "table_caption": [ + "Table 2: Performance on LLM-as-a-Judge benchmarks, trained on dataset under the loose setting. The two-agent workflow in ReMA" + ], + "table_footnote": [], + "table_body": "
ModelBenchmarkVRP(CoT)\\( \\mathbf{V R P_{R L}} \\)\\( \\mathbf{M R P_{R L}} \\)ReMA(Ours)
Llama3.1-8B-InstructRewardBench97071.2481.86 (+10.62)80.41 (+9.17)86.29 (+15.05)
JudgeBench51.7751.45 (-0.32)50.65 (-1.12)53.71 (+1.94)
Average61.5166.65 (+5.14)65.53 (+4.02)70.00 (+8.49)
Qwen2.5-7B-InstructRewardBench97086.4987.22 (+0.73)80.31 (-6.18)90.72 (+4.23)
JudgeBench58.3954.84 (-3.55)55.81 (-2.58)58.71 (+0.32)
Average72.4471.03 (-1.41)68.06 (-4.38)74.72 (+2.28)
", + "bbox": [ + 179, + 561, + 818, + 702 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "We process the original dataset in RewardBench by splitting it into a training set containing 5,000 tuples of (instruction, response A, response B) and a test set with the remaining 970 tuples.", + "bbox": [ + 169, + 717, + 823, + 747 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "To ensure a meaningful dataset split, we validate two separation strategies:", + "bbox": [ + 171, + 752, + 663, + 767 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Loose setting: We only ensure that there is no direct overlap of tuples between the training and test sets.", + "- Strict setting: We further enforce that no instruction appears in both the training and test sets. The results for this setting are presented in the main results (Table 1b)." + ], + "bbox": [ + 215, + 777, + 823, + 838 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Additionally, since the original RewardBench data originates from different subsets, we ensure that all original subsets are evenly represented in both the training and test sets.", + "bbox": [ + 169, + 848, + 823, + 877 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Table 2 reports the learning performance of various methods under the loose dataset split setting. Compared to the results in Table 1b, ReMA significantly outperforms other RL tuning baselines", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "across all models, particularly on out-of-distribution (OOD) benchmarks. The consistent improvements on OOD datasets of these two settings suggest that ReMA enhances meta-thinking ability, resulting in better generalization across diverse task distributions.", + "bbox": [ + 169, + 90, + 823, + 133 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "D.1.3 Training on MATH", + "text_level": 1, + "bbox": [ + 171, + 148, + 366, + 162 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "VRP For Llama3-8B-Instruct, Llama3.1-8B-Instruct, and Qwen2.5-7B-Instruct, we all use a KL coefficient of 1e-2, and for #Training Episode, we use 12,6,6 for these 3 models respectively. For Llama3-8B-Instruct, we set the learning rate of 2e-7 for stable training.", + "bbox": [ + 169, + 172, + 823, + 214 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "MRP For Llama3-8B-Instruct, Llama3.1-8B-Instruct, and Qwen2.5-7B-Instruct, we all use a KL coefficient of 1e-2, and for #Training Episode, we use 10,6,6 for these 3 models respectively.", + "bbox": [ + 169, + 229, + 823, + 258 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "MAMRP We use $\\varepsilon_{\\mathrm{min}} = 0.2, \\varepsilon_{\\mathrm{max}} = 0.8$ for prompt filtering. We use the same #Training Episode=4 for all models, and for #Update Iteration, we use 3 for Llama3-8B-Instruct and Llama3.1-8B-Instruct, 10 for Qwen2.5-7B-Instruct. And we set the KL coefficient to be 1e-2 for all the 3 models.", + "bbox": [ + 169, + 272, + 823, + 328 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "D.1.4 Training on Reward Bench", + "text_level": 1, + "bbox": [ + 171, + 343, + 416, + 358 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "VRP For Llama3.1-8B-Instruct, and Qwen2.5-7B-Instruct, we all use a KL coefficient of 1e-2, and for #Training Episode, we use 4,6 for these 2 models respectively.", + "bbox": [ + 169, + 367, + 823, + 397 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "MRP For Llama3.1-8B-Instruct, and Qwen2.5-7B-Instruct, we all use a KL coefficient of 1e-2, and for #Training Episode, we use 4,6 for these 2 models respectively.", + "bbox": [ + 169, + 410, + 823, + 439 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "MAMRP We set #Update Iteration=1 for all models. We set the KL coefficient to be 1e-2 for Llama3.1-8B-Instruct and 1e-2 for Qwen2.5-7B-Instruct all models. For Llama3.1-8B-Instruct, we use $\\varepsilon_{\\mathrm{min}} = 0.2$ , $\\varepsilon_{\\mathrm{max}} = 0.8$ for prompt filtering and we use #Training Episode of 2 during training. For Llama3.1-8B-Instruct, we use $\\varepsilon_{\\mathrm{min}} = 0.1$ , $\\varepsilon_{\\mathrm{max}} = 0.9$ for prompt filtering and we use #Training Episode of 1 during training.", + "bbox": [ + 169, + 454, + 823, + 525 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "D.2 Multi-turn ReMA", + "text_level": 1, + "bbox": [ + 171, + 541, + 341, + 555 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "We refer to Appendix G for prompts we use during training. We implement a multi-turn ReMA training pipeline with VeRL [Sheng et al., 2024] since it's easier to implement complex training pipeline with a single centralized controller. Similar to OpenRLHF, VeRL is also a highly efficient and scalable codebase for further development.", + "bbox": [ + 169, + 566, + 823, + 622 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "For the multi-turn ReMA rollout, we use parameter sharing and simultaneous update by default. In details, we maintain two message lists with the system prompt of meta-thinking agent and reasoning agent respectively. During rollout, each agent acts as 'assistant' in its own message list and the other agent acts as 'user'. We use three hyperparameters to control the rollout length: (1) 'max_num_turns': the maximum number of turns for each trajectory. (2) 'max_response_length': the maximum number of tokens for each turn's response. (3) 'max_prompt_length': the maximum number of tokens for each trajectory.", + "bbox": [ + 169, + 628, + 823, + 726 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "During training, we apply the collected message list to Qwen2.5-7B's chat template and build loss masks in order to compute the loss for all turns of one trajectory (message list).", + "bbox": [ + 169, + 731, + 823, + 761 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Moreover, for multi-turn ReMA rollout, unlike single agent single turn rollout, we need to carefully design the termination logic. Basically, we let the meta-thinking agent automatically decide when to finish the solving procedure, we use a special tag '[FINISH]' to indicate the end of the solving procedure. After we detect this tag, we will terminate trajectory after the reasoning agent generates its output.", + "bbox": [ + 169, + 766, + 823, + 835 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "We also design other termination conditions to ensure the quality of the generated trajectories. If the last agent's response is too long, we will terminate the whole trajectory and setting the reward to 0. We also introduce a different version of format reward: we give a reward of 1.0 only if the reasoning agent's last turn response is correct and the meta-thinking agent's last turn response include '[FINISH]'. We use math_check as the default verifier.", + "bbox": [ + 169, + 842, + 825, + 911 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "D.2.1 SFT data collection of multi-turn MAMRP", + "text_level": 1, + "bbox": [ + 171, + 90, + 527, + 104 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "We use GPT-4o to translate 817 samples in LIMO [Ye et al., 2025c] by prompting it to wrap each sentence with meta-thinking and reasoning tags. We use a temperature of 0. After filtering, we get 800 conversations for training. The prompt can be found in Appendix G.2.1. For supervised finetuning, we use LlamaFactory as the codebase and train the model for 3 epochs with a learning rate of 1e-5, consine learning rate scheduler, and batch size of 8. Use DeepSpeed Zero2 for distributed training.", + "bbox": [ + 169, + 117, + 823, + 202 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "D.2.2 Training on MATH", + "text_level": 1, + "bbox": [ + 171, + 222, + 366, + 237 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "For training of multi-turn ReMA on MATH, we use GRPO [Shao et al., 2024] as the default learning algorithm. We refer to Appendix G.2.2 for prompts. For experiment in Sec 4.3, we use sample 128 prompts, each with 16 trajectories. During training, we drop the KL loss term to improve the numerical stability. We use a learning rate of 1e-6, bfloat16 precision, FSDP backend for distributed training. We split the rollout data into 4 mini-batches for update. For the sake of numerical stability, we do pre-clip before computing the exponential of log-prob for a upperbound of 3.0.", + "bbox": [ + 169, + 248, + 823, + 333 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "For the main result in Fig 5, we test different rollout configurations with a max_prompt_length of 4096, training for 500 steps. We use 32 NVIDIA A800 GPUs, the longest training cost about 40 hours due to large scale validation per 10 steps.", + "bbox": [ + 169, + 338, + 823, + 380 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "For the ablation results in Fig 6, we use a tiny subset of MATH Level 3-5, training for 300 steps. Specifically, we sample 19 questions for every single type (133 instances in total). We use 8 NVIDIA A800 GPUs, the training cost about 30 hours", + "bbox": [ + 169, + 386, + 823, + 429 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "We test different rollout configurations:", + "bbox": [ + 171, + 435, + 433, + 448 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "(1) max_num_turns=30, max_response_length=256, max_prompt_length=4096 (2) \nmax_num_turns=30, max_response_length=1024, max_prompt_length=3072", + "bbox": [ + 169, + 449, + 823, + 477 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "And for the experiment of separate parameter in multi-turn ReMA, we iteratively train each agent with the same configuration as above, but with a switch interval of 10 steps, starting from the metathinking agent.", + "bbox": [ + 169, + 483, + 823, + 526 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "E Other Experiments", + "text_level": 1, + "bbox": [ + 171, + 551, + 372, + 568 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "E.1 Reward functions shape cross-agent behaviors", + "text_level": 1, + "bbox": [ + 171, + 585, + 539, + 601 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "We also investigate the impact of different reward function designs on ReMA's behavior. In addition to the base reward setting described in Appendix C.2, we evaluate a consistency-based reward function using Qwen2.5-7B-Instruct. This reward function is designed to encourage the high-level agent to generate more detailed guidance. Indeed, we observe that the high-level agent trained in this manner produces more detailed solution steps compared to the one trained with the basic correctness format reward. However, we also find that this approach often leads to jailbreak behavior, where the high-level agent tends to include the final answer within its output, compromising the intended hierarchical reasoning process.", + "bbox": [ + 169, + 614, + 823, + 726 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Furthermore, we discover an interesting evolution of a pattern during training: although our experimental setup is designed for the high-level agent to provide a solution plan while the lower-level agent executes it, we find that under the consistency-based reward, the lower-level agent significantly increases its attempt of verification rather than straightforward execution. We observed a certain sentence commonly appearing in the low-level agent's responses: \"Let's go through the solution step by step to ensure clarity and correctness.\" To quantify this effect, we track the frequency of it. We analyze this pattern across all mathematical test sets, sampling eight completions per question at a temperature of 0.7. Our empirical results have identified a $30\\mathrm{x}$ increase of such self-verifying patterns in the model trained with the consistency-based reward compared to the one trained with the base reward. Moreover, we also observe additional variations of this pattern, e.g. \"Let's carefully re-evaluate the problem and solution to ensure accuracy and clarity.\" These phrases indicate that the low-level agent is actively exploring to verify the detailed response provided by the high-level agent.", + "bbox": [ + 169, + 731, + 826, + 912 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "This suggests that (1) meta-thinking can not only emerge and be reinforced in the high-level agent but also in the low-level agent. During reinforcement learning (RL) training, the two agents develop a novel problem-solving pattern characterized by a role reversal. (2) Consistency-based rewards promote a more self-corrective approach at the lower level, potentially disrupting the intended separation of roles between planning and execution. For a detailed case study, refer to Appendix F.2.", + "bbox": [ + 169, + 90, + 823, + 161 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "E.2 Detailed Training Curves on Different Datasets of Multi-turn ReMA", + "text_level": 1, + "bbox": [ + 171, + 176, + 689, + 191 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "We show the detailed training curves of the multi-turn ReMA on different datasets in Fig. 8.", + "bbox": [ + 171, + 202, + 772, + 217 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/3a07830c095bd1b0e7d492fba662d270532fb4263b681ea523bd6daaeb0902da.jpg", + "image_caption": [ + "Figure 8: Detailed Training Curves on Different Datasets of Multi-turn ReMA" + ], + "image_footnote": [], + "bbox": [ + 176, + 232, + 823, + 483 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "F Qualitative Results", + "text_level": 1, + "bbox": [ + 171, + 539, + 369, + 556 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "F.1 High-level policy finds better plans", + "text_level": 1, + "bbox": [ + 171, + 569, + 455, + 585 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Here is an example of how a high-level policy alters the solving method of an LLM, increasing the likelihood of providing correct answers. As we can see from the following example, without the high-level policy, the LLM counts all integer coordinates, including those on the boundary, and then subtracts the boundary coordinates. In contrast, the high-level policy identifies a better approach, directly instructing the LLM to count only the coordinates strictly inside the boundary.", + "bbox": [ + 169, + 595, + 823, + 666 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "F.2 Case study for Experiments in Section E.1", + "text_level": 1, + "bbox": [ + 171, + 681, + 504, + 696 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Fig. 10 and Fig. 11 show an case study of experiments in Sec. E.1.", + "bbox": [ + 171, + 707, + 609, + 722 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Although both agents are prompted with the same instructions as in our main results, the consistency reward of the high-level agent significantly alters the learning dynamics. As illustrated in Fig. 10, the high-level agent generates detailed solution attempts rather than a strategic plan. Consequently, the low-level agent evolves to verify the high-level agent's solutions. This suggests that, during reinforcement learning (RL) training, the two agents develop a novel problem-solving pattern characterized by a role reversal, where one agent generates an answer while the other verifies it.", + "bbox": [ + 169, + 727, + 823, + 811 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "F.3 Case study for Experiments in Section 4.2.2", + "text_level": 1, + "bbox": [ + 171, + 827, + 516, + 842 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Fig. 12 shows the detailed outputs of the two groups of LLMs trained in JSON action. The outputs finally converge to relative easier and shorter sentences, compared to our main experiments. This may due to insufficient capacity of a small LM to produce a valid JSON-format response while simultaneously exploring diverse reasoning strategies.", + "bbox": [ + 169, + 852, + 823, + 907 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Consider the rectangular region with the following points as vertices: $(5,4),(-5,4),(-5, - 4),(5, - 4)$ . How many points with integer coordinates will be strictly in the interior of this rectangular region?", + "bbox": [ + 187, + 99, + 805, + 119 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Output without high-level planning solves this question in a less optimal way, leading to a wrong answer:", + "text_level": 1, + "bbox": [ + 217, + 126, + 465, + 145 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "To determine how many points with integer coordinates will be strictly in the interior of the rectangular region with vertices at $(5,4)$ , $(-5,4)$ , $(-5, - 4)$ , and $(5, - 4)$ , we will follow these steps:", + "bbox": [ + 187, + 151, + 495, + 179 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "1. Identify the dimensions of the rectangle:", + "bbox": [ + 189, + 179, + 356, + 186 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "- The width of the rectangle can be calculated as the distance between the x-coordinates of the vertices. The x-coordinates are $-5$ and 5.", + "bbox": [ + 187, + 188, + 491, + 205 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {W i d t h} = 5 - (- 5) = 1 0\n$$\n", + "text_format": "latex", + "bbox": [ + 290, + 212, + 393, + 223 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "- The height of the rectangle can be calculated as the distance between the y-coordinates of the vertices. The y-coordinates are 4 and -4.", + "bbox": [ + 187, + 229, + 491, + 247 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {H e i g h t} = 4 - (- 4) = 8\n$$\n", + "text_format": "latex", + "bbox": [ + 292, + 253, + 392, + 263 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "2. Calculate the total number of integer points: - The total number of integer points in the rectangle (including the boundary) can be calculated by considering the range of x and y values: - The x-values range from $-5$ to 5, giving us 11 integer values $(-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5)$ . - The y-values range from $-4$ to 4, giving us 9 integer values (", + "bbox": [ + 186, + 270, + 488, + 315 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "$-4, -3, -2, -1, 0, 1, 2, 3, 4)$ . The total number of points in the rectangle (including the boundary) is:", + "bbox": [ + 187, + 316, + 480, + 333 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\n\\text {T o t a l p o i n t s} = 1 1 \\times 9 = 9 9\n$$\n", + "text_format": "latex", + "bbox": [ + 284, + 340, + 401, + 349 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "3. Calculate the number of points on the boundary: - The boundary consists of the four sides of the rectangle. - The top and bottom sides (y = 4 and y = -4) each have 11 integer points (from -5 to 5). - The left and right sides (x = 5 and x = -5) each have 9 integer points (from -4 to 4). - Therefore, the total number of points on the boundary is:", + "bbox": [ + 186, + 356, + 496, + 402 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\n\\text {B o u n d a r y p o i n t s} = 2 \\times 1 1 + 2 \\times 9 = 2 2 + 1 8 = 4 0\n$$\n", + "text_format": "latex", + "bbox": [ + 235, + 407, + 449, + 417 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "4. Calculate the number of strictly interior points: - To find the number of points strictly in the interior, we subtract the points on the boundary from the total number of points:", + "bbox": [ + 186, + 424, + 486, + 452 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\nI n t e r i o r \\text {p o i n t s} = \\text {T o t a l p o i n t s - B o u n d a r y p o i n t s} = 9 9 - 4 0 = 5 9\n$$\n", + "text_format": "latex", + "bbox": [ + 202, + 458, + 482, + 469 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Thus, the number of points with integer coordinates that will be strictly in the interior of this rectangular region is 59.", + "bbox": [ + 187, + 474, + 488, + 494 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Answer with high-level planning identifies the optimal sub-tasks for the LLM to solve, increasing the chance of answering the question correctly", + "bbox": [ + 506, + 126, + 805, + 146 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "High-level policy planning: First, identify the boundaries of the rectangular region. Then, determine the integer coordinates that lie strictly within these boundaries. Finally, count the number of such points.", + "bbox": [ + 504, + 148, + 803, + 178 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Low-level policy reasoning following the high-policy plan: To solve the problem, we will follow the steps outlined in the instructions.", + "bbox": [ + 504, + 188, + 794, + 207 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Step 1: Identify the boundaries of the rectangular region. The vertices of the rectangular region are: $(-5,4)$ $(-5,4)$ $(-5, - 4)$ $(5, - 4)$ From these vertices, we can determine the boundaries:- The left boundary is $x = -5$ The right boundary is $x = 5$ .- The bottom boundary is $y = -4$ .- The top boundary is $y = 4$", + "bbox": [ + 503, + 214, + 800, + 261 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Step 2: Determine the integer coordinates that lie strictly within these boundaries. For a point $(x,y)$ to be strictly inside the rectangle, it must satisfy: $-5 < x < 5 - 4 < y < 4$", + "bbox": [ + 503, + 268, + 779, + 296 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Step 3: Count the integer coordinates that satisfy these conditions. Now, we will find the integer values for $x$ and $y$ that fall within these boundaries. For $x$ : The integer values that satisfy $-5 < x < 5$ are:", + "bbox": [ + 503, + 304, + 807, + 330 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "$-4, -3, -2, -1, 0, 1, 2, 3, 4$ This gives us 9 possible integer values. For $y$ The integer values that satisfy $-4 < y < 4$ are: $-3, -2, -1, 0, 1, 2, 3$ . This gives us 7 possible integer values.", + "bbox": [ + 503, + 330, + 800, + 358 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Step 4: Calculate the total number of points. Now, we multiply the number of possible integer values for $x$ by the number of possible integer values for $y$ .", + "bbox": [ + 503, + 366, + 805, + 385 + ], + "page_idx": 30 + }, + { + "type": "equation", + "text": "\n$$\n\\text {T o t a l p o i n t s} = 9 \\times 7 = 6 3\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 390, + 714, + 401 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Finally, the number of points with integer coordinates that lie strictly in the interior of the rectangular region is $\\boxed{63}$ .", + "bbox": [ + 503, + 407, + 795, + 426 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Figure 9: Case Study comparing with and without high-level metacognition results.", + "bbox": [ + 222, + 515, + 771, + 531 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "G Prompts", + "text_level": 1, + "bbox": [ + 171, + 574, + 282, + 593 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "G.1 Single-turn ReMA prompts", + "text_level": 1, + "bbox": [ + 171, + 618, + 410, + 633 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "G.1.1 Prompts for JSON data collection", + "text_level": 1, + "bbox": [ + 171, + 652, + 465, + 667 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Prompt for metacognition reasoning to rewrite:", + "bbox": [ + 171, + 685, + 483, + 700 + ], + "page_idx": 30 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "System prompt: \nYou are a math expert trying to solve mathematical problems. \nBefore answering a question, your task is to rewrite the original question to make it clearer. \nProvide your rewritten content in JSON format: \n{ \"action\": \"REWRITE\", \"output\": \"{clearer question content}\" \n} \nRespond only with valid JSON. Do not write an introduction or summary. \nUser prompt: \nHere is the question: [problem_text]", + "guess_lang": "txt", + "bbox": [ + 176, + 708, + 816, + 890 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Prompt for metacognition reasoning to decompose:", + "bbox": [ + 171, + 897, + 509, + 912 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 30 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Question: The equation $x^{10} + (13x - 1)^{10} = 0$ has 10 complex roots $r_1,\\overline{r}_1,r_2,\\overline{r}_2,r_3,\\overline{r}_3,r_4,\\overline{r}_4,r_5,\\overline{r}_5$ , where the bar denotes complex conjugation. Find the value of $\\frac{1}{r_1\\overline{r}_1} +\\frac{1}{r_2\\overline{r}_2} +\\frac{1}{r_3\\overline{r}_3} +\\frac{1}{r_4\\overline{r}_4} +\\frac{1}{r_5\\overline{r}_5}$", + "guess_lang": "txt", + "bbox": [ + 184, + 99, + 784, + 109 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "High-Level Agent's Output", + "text_level": 1, + "bbox": [ + 294, + 113, + 380, + 119 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "To solve this problem, let's break it down into steps that a low-level math-solving agent can follow:", + "bbox": [ + 186, + 122, + 431, + 128 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "1. Understand the given equation:", + "bbox": [ + 187, + 133, + 277, + 140 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "The equation is $x^{10} + (13x - 1)^{10} = 0$", + "bbox": [ + 187, + 141, + 290, + 146 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "2. Rewrite the equation:", + "bbox": [ + 187, + 147, + 250, + 152 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Notice that the equation can be rewritten as:", + "bbox": [ + 187, + 152, + 300, + 157 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\nx ^ {1 0} = - (1 3 x - 1) ^ {1 0}.\n$$\n", + "text_format": "latex", + "bbox": [ + 308, + 165, + 364, + 172 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "3. Consider the roots:", + "bbox": [ + 186, + 178, + 243, + 183 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "The roots of the equation are given as $r_1, \\overline{r}_1, r_2, \\overline{r}_2, r_3, \\overline{r}_3, r_4, \\overline{r}_4, r_5, \\overline{r}_5$ , where the bar denotes complex conjugation. 4. Use the property of complex conjugates:", + "For any complex number $z$ , the product of $z$ and its conjugate $\\overline{z}$ is the square of the modulus of $z$ i.e., $z\\overline{z} = |z|^2$ 5. Calculate the required sum:", + "3. Calculate the required sum $\\mathbf{W} = \\mathbf{w}_{1} + \\mathbf{w}_{2} + \\dots + \\mathbf{w}_{n}$" + ], + "bbox": [ + 186, + 184, + 478, + 208 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{r _ {1} \\overline {{F _ {1}}}} + \\frac {1}{r _ {2} \\overline {{F _ {2}}}} + \\frac {1}{r _ {3} \\overline {{F _ {3}}}} + \\frac {1}{r _ {4} \\overline {{F _ {4}}}} + \\frac {1}{r _ {5} \\overline {{F _ {5}}}}\n$$\n", + "text_format": "latex", + "bbox": [ + 285, + 220, + 388, + 234 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Using the property from step 4, this can be rewritten as:", + "bbox": [ + 189, + 237, + 328, + 243 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{| r _ {1} | ^ {2}} + \\frac {1}{| r _ {2} | ^ {2}} + \\frac {1}{| r _ {3} | ^ {2}} + \\frac {1}{| r _ {4} | ^ {2}} + \\frac {1}{| r _ {5} | ^ {2}},\n$$\n", + "text_format": "latex", + "bbox": [ + 285, + 252, + 390, + 266 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "6. Simplify the expression:", + "Since $r_1, r_2, r_3, r_4, r_5$ are roots of the equation $x^{10} = -(13x - 1)^{10}$ , the magnitudes $|r_i|$ are the same for each pair $r_i$ and $\\overline{r}_i$ . Let $|r_i| = |r_i|$ for all $i, j$ .", + "7. Find the common magnitude:", + "To find $|r_{1}|$ , consider the magnitudes of the terms on both sides of the equation $x^{10} = -(13x - 1)^{10}$ . The magnitude of the left side is $|x|^{10}$ and the magnitude of the right side is $|13x - 1|^{10}$ . Therefore:" + ], + "bbox": [ + 186, + 270, + 485, + 304 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\left| x \\right| ^ {1 0} = \\left| 1 3 x - 1 \\right| ^ {1 0}.\n$$\n", + "text_format": "latex", + "bbox": [ + 310, + 313, + 362, + 320 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Taking the 10th root of both sides, we get:", + "bbox": [ + 189, + 325, + 295, + 330 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n| x | = | 1 3 x - 1 |.\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 340, + 359, + 347 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "8. Solve for $|x|$ :", + "Let $|x| = a$ . Then:" + ], + "bbox": [ + 186, + 351, + 236, + 362 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "$\\therefore m - 1 \\neq 0$ ;", + "bbox": [ + 186, + 364, + 274, + 369 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\na = \\left| 1 3 a - 1 \\right|.\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 372, + 356, + 378 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "This gives us two cases:", + "- $a = 13$", + "- $a = -(13a - 1)$", + "Solving the first case:" + ], + "bbox": [ + 187, + 383, + 251, + 406 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\na = 1 3 a - 1 \\Longrightarrow 1 2 a = 1 \\Longrightarrow a = \\frac {1}{1 2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 281, + 415, + 393, + 428 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Solving the second case:", + "bbox": [ + 189, + 431, + 251, + 436 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\na = - 1 3 a + 1 \\Longrightarrow 1 4 a = 1 \\Longrightarrow a = \\frac {1}{1 4}.\n$$\n", + "text_format": "latex", + "bbox": [ + 279, + 445, + 395, + 458 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Since $a$ must be the same for all roots, we need to check which value is consistent. Given the nature of the problem, we can assume $a = \\frac{1}{2}$ as the correct magnitude.", + "9. Calculate the final sum:", + "Since $|r_i| = \\frac{1}{2}$ for all $i$ , we have" + ], + "bbox": [ + 186, + 460, + 485, + 483 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{| r _ {1} | ^ {2}} = \\frac {1}{(\\frac {1}{4}) ^ {2}} = 1 4 4.\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 493, + 367, + 508 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Therefore, the sum is:", + "bbox": [ + 189, + 513, + 245, + 518 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n1 4 4 + 1 4 4 + 1 4 4 + 1 4 4 + 1 4 4 = 5 \\times 1 4 4 = 7 2 0.\n$$\n", + "text_format": "latex", + "bbox": [ + 274, + 529, + 401, + 535 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "10. Final answer:", + "bbox": [ + 186, + 540, + 232, + 545 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{\\frac {1}{n} + \\frac {1}{n}} + \\frac {1}{\\frac {1}{n} + \\frac {1}{n}} + \\frac {1}{\\frac {1}{n} + \\frac {1}{n}} \\text {i s} 7 2 0.\n$$\n", + "text_format": "latex", + "bbox": [ + 243, + 545, + 326, + 551 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Low-Level Agent's Output", + "text_level": 1, + "bbox": [ + 611, + 113, + 696, + 119 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Let's go through the solution step by step to ensure clarity and correctness.", + "bbox": [ + 496, + 122, + 694, + 128 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Understand the given equation:", + "The equation is $x^{10} + (13x - 1)^{10} = 0$", + "2. Rewrite the equation:", + "Notice that the equation can be rewritten as:" + ], + "bbox": [ + 496, + 133, + 609, + 156 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\nx ^ {1 0} = - (1 3 x - 1) ^ {1 0}.\n$$\n", + "text_format": "latex", + "bbox": [ + 625, + 165, + 681, + 172 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "3. Consider the roots:", + "bbox": [ + 496, + 178, + 553, + 183 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "The roots of the equation are given as $r_1, \\overline{r}_1, r_2, \\overline{r}_2, r_3, \\overline{r}_3, r_4, \\overline{r}_4, r_5, \\overline{r}_5$ , where the bar denotes complex conjugation. 4. Use the property of complex conjugates:", + "For any complex number $z$ , the product of $z$ and its conjugate $\\overline{z}$ is the square of the modulus of $z$ , i.e., $z\\overline{z} = |z|^2$ . 5. Calculate the required sum:", + "We need to find the value of:" + ], + "bbox": [ + 496, + 184, + 787, + 210 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{r _ {1} F _ {1}} + \\frac {1}{r _ {2} F _ {2}} + \\frac {1}{r _ {3} F _ {3}} + \\frac {1}{r _ {4} F _ {4}} + \\frac {1}{r _ {5} F _ {5}}\n$$\n", + "text_format": "latex", + "bbox": [ + 602, + 220, + 705, + 234 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Using the property from step 4, this can be rewritten as:", + "bbox": [ + 500, + 237, + 638, + 243 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{| r _ {1} | ^ {2}} + \\frac {1}{| r _ {2} | ^ {2}} + \\frac {1}{| r _ {3} | ^ {2}} + \\frac {1}{| r _ {4} | ^ {2}} + \\frac {1}{| r _ {5} | ^ {2}} + \\frac {1}{| r _ {6} | ^ {2}}\n$$\n", + "text_format": "latex", + "bbox": [ + 601, + 251, + 705, + 265 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "6. Simplify the expression:", + "Since $r_1, r_2, r_3, r_4, r_5$ are roots of the equation $x^{10} = -(13x - 1)^{10}$ , the magnitudes $|r_i|$ are the same for each pair $r_1$ and $\\overline{r}_i$ . Let $|r_i| = |r_i|$ for all $i, j$ .", + "7. Find the common magnitude:", + "To find $|\\boldsymbol{r}_i|$ , consider the determinants of the terms on both sides of the equation $x^{10} = -(13x - 1)^{10}$ . The magnitude of the left side is $|z|^10$ and the magnitude of the right side is $|13z - 1|^{10}$ . Therefore:" + ], + "bbox": [ + 496, + 270, + 808, + 304 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n| x | ^ {1 0} = | 1 3 x - 1 | ^ {1 0}.\n$$\n", + "text_format": "latex", + "bbox": [ + 627, + 311, + 679, + 320 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Taking the 10th root of both sides, we get:", + "bbox": [ + 500, + 324, + 606, + 330 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\left| x \\right| = \\left| 1 3 x - 1 \\right|.\n$$\n", + "text_format": "latex", + "bbox": [ + 632, + 339, + 674, + 345 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "8. Solve for $|x|$ :", + "bbox": [ + 496, + 349, + 537, + 356 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Let $|x| = a$ . Then:", + "bbox": [ + 496, + 357, + 547, + 362 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\na = | 1 3 x - 1 |.\n$$\n", + "text_format": "latex", + "bbox": [ + 633, + 372, + 673, + 378 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "This gives us two cases:", + "-α=13a-1", + "- $a = -(13a - 1)$", + "Solving the first case:" + ], + "bbox": [ + 496, + 383, + 560, + 406 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\na = 1 3 a - 1 \\Longrightarrow 1 2 a = 1 \\Longrightarrow a = \\frac {1}{1 2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 596, + 415, + 709, + 428 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Solving the second case:", + "bbox": [ + 500, + 431, + 563, + 436 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\na = - 1 3 a + 1 \\Rightarrow 1 4 a = 1 \\Rightarrow a = \\frac {1}{1 4}.\n$$\n", + "text_format": "latex", + "bbox": [ + 596, + 445, + 710, + 458 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Since $a$ must be the same for all roots, we need to check which value is consistent. Given the nature of the problem, we can assume $a = \\frac{1}{2}$ as the correct magnitude.", + "9. Calculate the final sum:", + "Since $|r_i| = \\frac{1}{12}$ for all i, we have:" + ], + "bbox": [ + 496, + 460, + 805, + 484 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{| r _ {i} | ^ {2}} = \\frac {1}{(\\mathrm {古}) ^ {2}} = 1 4 4.\n$$\n", + "text_format": "latex", + "bbox": [ + 622, + 493, + 684, + 510 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Therefore, the sum is:", + "bbox": [ + 500, + 513, + 555, + 518 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n1 4 4 + 1 4 4 + 1 4 4 + 1 4 4 + 1 4 4 = 5 \\times 1 4 4 = 7 2 0.\n$$\n", + "text_format": "latex", + "bbox": [ + 589, + 527, + 717, + 535 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "10. Final answer:", + "bbox": [ + 496, + 540, + 542, + 545 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\text {The v a l u e o f} \\frac {1}{r _ {1} r _ {1}} + \\frac {1}{r _ {2} r _ {2}} + \\frac {1}{r _ {3} r _ {3}} + \\frac {1}{r _ {4} r _ {4}} + \\frac {1}{r _ {5} r _ {5}} \\text {i s} \\boxed {7 2 0}.\n$$\n", + "text_format": "latex", + "bbox": [ + 500, + 545, + 640, + 551 + ], + "page_idx": 31 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Question: The equation $x^{10} + (13x - 1)^{10} = 0$ has 10 complex roots $r_1, \\overline{r}_1, r_2, \\overline{r}_2, r_3, \\overline{r}_3, r_4, \\overline{r}_4, r_5, \\overline{r}_5$ , where the bar denotes complex conjugation. Find the value of $\\frac{1}{r_1r_4} + \\frac{1}{r_2r_2} + \\frac{1}{r_3r_3} + \\frac{1}{r_4r_4} + \\frac{1}{r_5r_5}$ .", + "guess_lang": "latex", + "bbox": [ + 184, + 604, + 782, + 616 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "High-Level Agent's Output", + "bbox": [ + 186, + 618, + 279, + 623 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Low-Level Agent's Output (part I)", + "bbox": [ + 354, + 618, + 462, + 625 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "To solve this problem, we need to follow these steps:", + "1. Understand the given equation: The equation is $x^{10} - (13x - 1)^{10} = 0$ .", + "2. Identify the roots: The roots of the", + "equation are given as", + "$\\overline{r_1},\\overline{r_2},\\overline{r_3},\\overline{r_4},\\overline{r_5},\\overline{r_6},\\overline{r_7},\\overline{r_8}$ where the bar denotes complex conjugation.", + "3. Use properties of complex conjugates:", + "For any complex number $z$ , the product $z \\cdot z$ is a complex function of the modulus of $z$ , i.e., $|z \\cdot \\overline{z}| = |z|^2$ .", + "4. Simplify the expression: We need to find", + "the value of 1", + "r1+ r2r3+ r4", + "#", + "Apply an property of complex conjugates. Since $\\pi_1 \\cdot \\overline{\\pi}_1 = |\\pi_1|^2$ , we have", + "1+", + "A", + "6.Sum the reciprocals: Sum the values", + "1+1 1 1", + "7. Use Vieta's formulas: To find the sum of the reciprocals of the moduli squared, we can use the fact that the sum of the roots of the polynomial and their properties.", + "8. Finalize the solution: Combine all the", + "steps to find the final value", + "Now, let's guide the low-level math solving", + "agent to implement these steps." + ], + "bbox": [ + 176, + 625, + 290, + 839 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "To solve the given problem, we start by analyzing the equation $x^{10} + (13x - 1)^{10} = 0$ .", + "bbox": [ + 297, + 625, + 516, + 628 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "This can be rewritten as:", + "bbox": [ + 297, + 630, + 357, + 635 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\nx ^ {1 0} = - (1 3 x - 1) ^ {1 0}.\n$$\n", + "text_format": "latex", + "bbox": [ + 382, + 640, + 437, + 645 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Taking the 10th root on both sides, we get:", + "bbox": [ + 297, + 650, + 403, + 656 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\nx = \\omega (1 3 x - 1),\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 661, + 431, + 666 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "where $\\omega$ is a 10th root of $-1$ . The 10th roots of $-1$ are given by $\\omega = e^{\\pi (2k + 1) / 10}$ for $k = 0, 1, 2, \\ldots, 9$ .", + "bbox": [ + 297, + 671, + 506, + 683 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Rewriting the equation, we have:", + "bbox": [ + 297, + 688, + 380, + 694 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "$x = \\omega (13x - 1)\\Rightarrow x - 13\\omega x = -\\omega \\Rightarrow x(1 - 13\\omega) = -\\omega \\Rightarrow x = \\frac{-\\omega}{1 - 13\\omega}$", + "bbox": [ + 299, + 698, + 517, + 710 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Thus, the roots of the equation are:", + "bbox": [ + 297, + 712, + 383, + 717 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\nr _ {k} = \\frac {- \\omega_ {k}}{1 - 1 3 \\omega_ {k}}\n$$\n", + "text_format": "latex", + "bbox": [ + 299, + 724, + 431, + 734 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "where $\\omega_{k} = e^{i\\pi (2k + 1) / 10}$ for $k = 0,1,2$", + "bbox": [ + 297, + 738, + 393, + 743 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "The roots come in conjugate pairs, so the roots are $r_1, \\overline{r_1}, r_2, \\overline{r_2}, r_3, \\overline{r_3}, r_4, \\overline{r_4}, r_5, \\overline{r_5}$ . We need to find the value of:", + "bbox": [ + 297, + 750, + 514, + 761 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{r _ {1} \\overline {{r _ {1}}}} + \\frac {1}{r _ {2} \\overline {{r _ {2}}}} + \\frac {1}{r _ {3} \\overline {{r _ {3}}}} + \\frac {1}{r _ {4} \\overline {{r _ {4}}}} + \\frac {1}{r _ {5} \\overline {{r _ {5}}}}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 763, + 460, + 777 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Since $r_k\\overline{r_k} = |r_k|^2$ we have:", + "bbox": [ + 297, + 780, + 370, + 786 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{r _ {k} r _ {k}} = \\frac {1}{| r _ {k} | ^ {2}}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 790, + 429, + 804 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "We need to find $|r_k|^2$ . Note that:", + "bbox": [ + 297, + 808, + 377, + 813 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\nr _ {k} = \\frac {- \\omega_ {k}}{1 - 1 3 \\omega_ {k}}\n$$\n", + "text_format": "latex", + "bbox": [ + 387, + 818, + 429, + 829 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Low-Level Agent's Output (part II)", + "bbox": [ + 616, + 619, + 728, + 625 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "The magnitude of $r_k$ is:", + "bbox": [ + 527, + 628, + 586, + 635 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n| r _ {k} | = \\left| \\frac {- \\omega_ {k}}{1 - 1 3 \\omega_ {k}} \\right| = \\left| \\frac {| \\omega_ {k} |}{1 - 1 3 \\omega_ {k}} \\right| = \\frac {1}{1 - 1 3 \\omega_ {k}}\n$$\n", + "text_format": "latex", + "bbox": [ + 607, + 638, + 733, + 652 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Thus,", + "bbox": [ + 527, + 656, + 542, + 661 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n| r _ {k} | ^ {2} = \\frac {1}{| 1 - 1 3 \\omega_ {k} | ^ {2}}\n$$\n", + "text_format": "latex", + "bbox": [ + 643, + 665, + 699, + 678 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Therefore,", + "bbox": [ + 527, + 681, + 553, + 686 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {1}{r _ {2} r _ {1}} = | 1 - 1 3 \\omega_ {k} | ^ {2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 643, + 691, + 700, + 704 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "We need to sum this over the 5 pairs of roots:", + "bbox": [ + 527, + 709, + 640, + 714 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {k = 0} ^ {4} | 1 - 1 3 \\omega_ {k} | ^ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 650, + 718, + 692, + 732 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Since $\\omega_{k} = e^{i\\pi (2k + 1) / 10}$ , we have", + "bbox": [ + 527, + 738, + 609, + 743 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$|1 - 13\\omega_{k}|^{2} = (1 - 13\\omega_{k})(1 - 13\\overline{\\omega_{k}}) = (1 - 13\\omega_{k})(1 - 13e^{-i\\pi (2k + 1) / 10}) = 1 - 13\\omega_{k} - 13e^{-i\\pi (2k + 1) / 10} + 169.$", + "Since $\\omega_{k}\\overline{\\omega_{k}} = 1$ we get:" + ], + "bbox": [ + 527, + 747, + 812, + 763 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\left| 1 - 1 3 \\omega_ {k} \\right| ^ {2} = 1 6 9 - 1 3 (\\omega_ {k} + e ^ {- i \\pi (2 k + 1) \\cdot 1 0}) + 1 = 1 7 0 - 1 3 \\cdot 2 \\cos \\left(\\frac {\\pi (2 k + 1)}{1 0}\\right) = 1 7 0 - 2 6 \\cos \\left(\\frac {\\pi (2 k + 1)}{1 0}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 527, + 768, + 816, + 781 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Summing over $k = 0,1,2,3,4$ we get:", + "bbox": [ + 527, + 785, + 625, + 791 + ], + "page_idx": 31 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {k = 0} ^ {n - 1} \\left(1 7 0 - 2 6 \\cos \\left(\\frac {\\pi (2 k + 1)}{1 0}\\right)\\right) = 5 \\cdot 1 7 0 - 2 6 \\sum_ {k = 0} ^ {n - 1} \\cos \\left(\\frac {\\pi (2 k + 1)}{1 0}\\right) = 8 5 0 - 2 6 \\cdot 0 = 8 5 0.\n$$\n", + "text_format": "latex", + "bbox": [ + 547, + 795, + 795, + 810 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Thus, the value is:", + "bbox": [ + 527, + 815, + 573, + 820 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/6cce98a8906c58ccf3aa6cedf13fe8485572fcdb57dd27e9b6cd696eb48335fd.jpg", + "image_caption": [ + "Figure 10: Case Study for consistency reward of high-level agent", + "Figure 11: Case Study for base correctness reward of high-level agent" + ], + "image_footnote": [], + "bbox": [ + 663, + 824, + 679, + 830 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 31 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Llama3.2-1B-Instruct training Output:" + ], + "code_body": "Question: The product of $3t^{2} + 5t + a$ and $4t^{2} + bt - 2$ is $12t^{4} + 26t^{3} - 8t^{2} - 16t + 6$ . What is $a + b$ ?", + "guess_lang": "txt", + "bbox": [ + 241, + 97, + 754, + 109 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Llama3.1-8B-Instruct training Output:" + ], + "code_body": "{\n \"action\": \"DECOMPOSE\",\n \"output\": \"Decompose the problem into finding the coefficients of the product of two polynomials and then solving for a and b.\"\n}", + "guess_lang": "json", + "bbox": [ + 176, + 133, + 496, + 181 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "{ \"action\": \"EMPTY\", \"output\".\"\" }", + "guess_lang": "txt", + "bbox": [ + 508, + 133, + 821, + 183 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Step 1: To find the product of the two polynomials $3t^{2} + 5t + a$ and $4t^{2} + bt - 2$ , we need to multiply each term of the first polynomial by each term of the second polynomial and then combine like terms. \nStep 2: The product of the two polynomials can be written as $(3t^{2} + 5t + a)(4t^{2} + bt - 2)$ . \nStep 3: Using the distributive property, we multiply each term in the first polynomial by each term in the second polynomial: $(3t^{2})(4t^{2}) + (3t^{2})(bt) + (3t^{2})(-2) + (5t)(4t^{2}) + (5t)(bt) + (5t)(-2) + (a)(4t^{2}) + (a)(bt) + (a)(-2)$ . \nStep 4: Simplifying the products, we get $12t^{4} + 3b^{3} - 6t^{2} + 20t^{3} + 5bt^{2} - 10t + 4at^{2} + abt - 2a$ . \nStep 5: Combining like terms, we have $12t^{4} + (3b + 20)t^{3} + (5b - 6 + 4a)t^{2} + (ab - 10)t - 2a$ . \nStep 6: We are given that the product of the two polynomials is $12t^{4} + 26t^{3} - 8t^{2} - 16t + 6$ . \nWe can equate the coefficients of the corresponding powers of $t$ in the two expressions. \nStep 7: Equating the coefficients of $t^3$ , we get $3b + 20 = 26$ . \nStep 8: Equating the coefficients of $t^2$ , we get $5b - 6 + 4a = -8$ . \nStep 9: Equating the constant terms, we get $-2a = 6$ . \nStep 10: Solving the equation $-2a = 6$ , we find that $a = -3$ . \nStep 11: Substituting $a = -3$ into the equation $5b - 6 + 4a = -8$ , we get $5b - 6 + 4(-3) = -8$ , which simplifies to $5b - 18 = -8$ . \nStep 12: Solving for $b$ , we find that $5b = 10$ , so $b = 2$ . \nStep 13: Now that we have found the values of $a$ and $b$ , we can calculate their sum: $a + b = -3 + 2 = -1$ . \nThe final answer is: $\\boxed{-1}$", + "guess_lang": "txt", + "bbox": [ + 176, + 189, + 480, + 465 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Step 1: The problem asks us to find the values of $a$ and $b$ in the quadratic expressions $3t^2 + 5t + a$ and $4t^2 + bt - 2$ , respectively, given that their product is $12t^4 + 26t^3 - 8t^2 - 16t + 6$ .", + "guess_lang": "txt", + "bbox": [ + 506, + 194, + 810, + 223 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Step 2: To solve this, we need to multiply the two quadratic expressions and equate the resulting expression to the given product $12t^4 + 26t^3 - 8t^2 - 16t + 6$ .", + "guess_lang": "txt", + "bbox": [ + 506, + 232, + 818, + 261 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Step 3: We start by multiplying the two quadratic expressions: $(3t^{2} + 5t + a)(4t^{2} + bt - 2)$ .", + "guess_lang": "txt", + "bbox": [ + 506, + 270, + 761, + 290 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Step 4: This gives us $12t^4 + (3b + 5)t^3 + (4a + 5b - 6)t^2 + (-2b + 10t + 2a)t - 2a.$", + "guess_lang": "txt", + "bbox": [ + 506, + 299, + 779, + 318 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Step 5: We know that the coefficients of the resulting expression must be equal to the coefficients of the given product $12t^4 + 26t^3 - 8t^2 - 16t + 6$ .", + "guess_lang": "txt", + "bbox": [ + 506, + 325, + 815, + 345 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Step 6: Equating the coefficients of $t^3$ gives us the equation $3b + 5 = 26$ , which simplifies to $3b = 21$ and thus $b = 7$ .", + "guess_lang": "txt", + "bbox": [ + 506, + 354, + 808, + 373 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Step 7: Equating the constant terms gives us $-2a = -6$ , which simplifies to $a = 3$ .", + "guess_lang": "txt", + "bbox": [ + 506, + 382, + 810, + 402 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Step 8: Now that we have the values of $b$ and $a$ , we can find the sum $a + b$ .", + "guess_lang": "txt", + "bbox": [ + 506, + 411, + 815, + 421 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Step 9: Adding $a = 3$ and $b = 7$ gives us $a + b = 10$ .", + "guess_lang": "txt", + "bbox": [ + 506, + 429, + 728, + 439 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "The final answer is: $\\boxed{10}$", + "guess_lang": "txt", + "bbox": [ + 506, + 448, + 609, + 459 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Figure 12: Case Study for interpretability experiments in Section 4.2.2", + "bbox": [ + 264, + 476, + 730, + 491 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "System prompt:" + ], + "code_body": "You are a math expert trying to solve mathematical problems. Before answering a question, your task is to decompose the original question to make it clearer. \nProvide your rewritten content in JSON format: \n{\"action\": \"DECOMPOSE\", \"output\": \"{decomposed question content}}\" \n}} \nRespond only with valid JSON. Do not write an introduction or summary. \nUser prompt: \nHere is the question: [problem.text]", + "guess_lang": "txt", + "bbox": [ + 176, + 534, + 777, + 713 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Prompt for generating final answers using on the question and metacognition reasoning:", + "bbox": [ + 173, + 720, + 748, + 734 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "System prompt:", + "User prompt:" + ], + "code_body": "You are a math expert tasked with solving problems step by step. Follow the provided instructions precisely, showing all reasoning and intermediate steps. Present the final answer within \\boxed{\\{\\}}\\}.", + "guess_lang": "txt", + "bbox": [ + 176, + 760, + 816, + 815 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Here is the question and instructions: \nQuestion \n[problem_text] \nProvided Instruction \n[instruction_text]", + "guess_lang": "txt", + "bbox": [ + 176, + 828, + 550, + 896 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "G.1.2 Prompts for Math problems", + "text_level": 1, + "bbox": [ + 171, + 90, + 426, + 107 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "VRP prompt:", + "text_level": 1, + "bbox": [ + 173, + 114, + 272, + 130 + ], + "page_idx": 33 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "System prompt: \nYou are a math expert tasked with solving problems step by step. Present the final answer within \\boxed{}?. \nUser prompt: \nHere is the question: \n{Question}", + "guess_lang": "txt", + "bbox": [ + 173, + 131, + 805, + 218 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "MRP prompt:", + "text_level": 1, + "bbox": [ + 173, + 224, + 276, + 239 + ], + "page_idx": 33 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "System prompt: \nYou are a math expert tasked with solving problems. When solving a problem, your first task is to provide a high-level solution plan as an instruction. Then you need to follow the provided instructions precisely, showing all reasoning and intermediate steps. Finally, you must present the final answer within boxed}. \nUser prompt: \nHere is the question: {Question}", + "guess_lang": "txt", + "bbox": [ + 173, + 241, + 818, + 382 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "MAMRP prompt:", + "text_level": 1, + "bbox": [ + 173, + 388, + 303, + 404 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "high-level agent:", + "bbox": [ + 173, + 404, + 285, + 417 + ], + "page_idx": 33 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "System prompt: \nYou are a math expert specialized in solving mathematical problems, you need to teach a weaker agent with minimal capability in math how to solve a problem step-by-step. \nYour task is to provide a high-level solution plan for the given problem, in order to guide a low-level math solving agent to solve the problem. \nYou can not directly answer the question. You'll be punished if you include any answer in your response. \nYou need to first think deeply in mind and output your final instruction. \nUser prompt: \nHere is the question: \n{Question}", + "guess_lang": "txt", + "bbox": [ + 173, + 419, + 828, + 614 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "low-level agent:", + "bbox": [ + 173, + 617, + 279, + 631 + ], + "page_idx": 33 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "System prompt: \nYou are a math expert tasked with solving problems step by step. Follow the provided instructions precisely, showing all reasoning and intermediate steps. Present the final answer within \\boxed{}/. User prompt: Here is the question and instructions: [Question] {Question} [End of Question] [Provided Instruction] {instruction} [End of Instruction]", + "guess_lang": "txt", + "bbox": [ + 173, + 632, + 818, + 827 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "G.1.3 Prompts for LLM-as-a-Judge problems", + "text_level": 1, + "bbox": [ + 171, + 90, + 504, + 107 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "We adopt the prompts from Saha et al. [2025a].", + "bbox": [ + 171, + 114, + 485, + 128 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "VRP prompt:", + "text_level": 1, + "bbox": [ + 171, + 128, + 272, + 143 + ], + "page_idx": 34 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "System prompt: \nPlease act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. You should choose the assistant that follows the user's instructions and answers the user's question better. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of their responses. Begin your evaluation by comparing the two responses and provide a short explanation. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision.. \nDo not allow the length of the responses to influence your evaluation.. \nDo not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[A]\" if assistant A is better, \"[B]\" if assistant B is better.. \nUser prompt: \n[User Question] {instruction} [End of User Question] [The Start of Assistant A's Answer] {response_A} [The End of Assistant A's Answer] [The Start of Assistant B's Answer] {response_B} [The End of Assistant B's Answer]", + "guess_lang": "txt", + "bbox": [ + 173, + 145, + 828, + 520 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "MRP prompt:", + "text_level": 1, + "bbox": [ + 173, + 90, + 274, + 106 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "System prompt:", + "text_level": 1, + "bbox": [ + 176, + 108, + 285, + 122 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. You should choose the assistant that follows the user's instructions and answers the user's question better. First of your task is to build an evaluation plan that can then be executed to assess the response quality. Whenever appropriate, you can choose to also include a step-by-step reference answer as part of the evaluation plan. Enclose your evaluation plan between the tags \"[Start of Evaluation Plan]\" and \"[End of Evaluation Plan)\". After that, please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. You should choose the assistant that follows the user's instructions and answers the user's question better. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of their responses. Begin your evaluation by comparing the two responses and provide a short explanation. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[A]\" if assistant A is better, \"[B]\" if assistant B is better. User prompt: [User Question] {instruction} [End of User Question] [The Start of Assistant A's Answer] {response_A} [The End of Assistant A's Answer] [The Start of Assistant B's Answer] {response_B} [The End of Assistant B's Answer]", + "bbox": [ + 176, + 123, + 825, + 633 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "MAMRP prompt: high-level agent:", + "text_level": 1, + "bbox": [ + 173, + 640, + 415, + 655 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "System prompt:", + "text_level": 1, + "bbox": [ + 176, + 657, + 285, + 672 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "We want to evaluate the quality of the responses provided by AI assistants to the user question displayed below. For that, your task is to help us build an evaluation plan that can then be executed to assess the response quality. Whenever appropriate, you can choose to also include a step-by-step reference answer as part of the evaluation plan.", + "bbox": [ + 176, + 672, + 797, + 755 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "User prompt:", + "text_level": 1, + "bbox": [ + 178, + 755, + 267, + 768 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "[User Question] \n{Question} \n[End of User Question]", + "bbox": [ + 179, + 768, + 393, + 809 + ], + "page_idx": 35 + }, + { + "type": "page_number", + "text": "36", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "low-level agent:", + "text_level": 1, + "bbox": [ + 173, + 92, + 279, + 104 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "System prompt:", + "text_level": 1, + "bbox": [ + 178, + 109, + 284, + 122 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. Your evaluation should be performed by following the provided evaluation plan step-by-step. Avoid copying the plan when doing the evaluation.", + "bbox": [ + 176, + 123, + 828, + 189 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Please also only stick to the given plan and provide explanation of how the plan is executed to compare the two responses.", + "bbox": [ + 176, + 191, + 808, + 219 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision.", + "bbox": [ + 176, + 220, + 807, + 246 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible.", + "bbox": [ + 176, + 247, + 790, + 287 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "After providing your evaluation, output your final verdict by strictly following this format: \"[A]\" if assistant A is better, \"[B]\" if assistant B is better.", + "bbox": [ + 176, + 287, + 823, + 328 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "User prompt:", + "text_level": 1, + "bbox": [ + 179, + 330, + 267, + 343 + ], + "page_idx": 36 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "[User Question] \n{instruction} \n{End of User Question] \n{The Start of Assistant A's Answer} \n{response_A} \n{The End of Assistant A's Answer} \n{The Start of Assistant B's Answer} \n{response_B} \n{The End of Assistant B's Answer} \n{The Start of Evaluation Plan} \n{evaluation計劃} \n{The End of Evaluation Plan}", + "guess_lang": "txt", + "bbox": [ + 178, + 343, + 521, + 508 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "37", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "G.2 Multi-turn ReMA prompts", + "text_level": 1, + "bbox": [ + 171, + 90, + 408, + 107 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "G.2.1 SFT data collection of multi-turn MAMRP", + "text_level": 1, + "bbox": [ + 171, + 116, + 529, + 130 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "System prompt:", + "text_level": 1, + "bbox": [ + 176, + 141, + 285, + 155 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "You are classifying reasoning process data into two types of thinking. You will be given a question-answer pair from a reasoning dataset. Your task is to split all words into two parts. These words are crucial for analyzing reasoning patterns, so do not skip any details.", + "bbox": [ + 176, + 155, + 813, + 223 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "- **Meta-Thinking Agent (MTA):** Responsible for high-level thought processes. This includes planning, evaluating steps, expressing uncertainty, making observations, or setting goals. Avoid detailed calculations. The content should be enclosed in `` and ``.", + "bbox": [ + 176, + 224, + 797, + 292 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "- $\\star \\star$ Reasoning Agent (RA): $\\star \\star$ Responsible for detailed problem-solving steps, such as calculations, logical deductions, or breaking down a problem into subproblems. The content should be enclosed in `` and ``.", + "bbox": [ + 176, + 292, + 807, + 348 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "\\*\\*Rules to follow: \\*\\*", + "bbox": [ + 176, + 349, + 377, + 359 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "1. **Do not assign large chunks of text to a single type of thinking.** The reasoning process consists of small, nonlinear thinking steps, so alternate appropriately between Meta-Thinking and Reasoning steps.", + "bbox": [ + 176, + 362, + 805, + 416 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "2. **Keep the words from the original solution unmodified whenever possible.** Words like \"Wait,\" \"Hmm,\" \"But,\" etc., typically indicate Meta-Thinking and should be preserved.", + "bbox": [ + 176, + 417, + 754, + 458 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "3. $\\star \\star$ When finalizing the answer: $\\star \\star$", + "bbox": [ + 176, + 459, + 524, + 470 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "- The \\*\\*Meta-Thinking Agent (MTA) \\*\\* must explicitly confirm the answer before completion and output '[FINISH]'.", + "bbox": [ + 176, + 472, + 797, + 500 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "- The $\\star \\star$ Reasoning Agent (RA) $\\star \\star$ should then provide the final answer in the correct format.", + "bbox": [ + 176, + 500, + 767, + 525 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "4. **Do not skip any reasoning steps, even if they seem redundant, incorrect or irrelevant**", + "bbox": [ + 176, + 527, + 730, + 553 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "5. **Do not modify or remove any part of the original reasoning process**, even if it seems redundant or repetitive. The goal is to **preserve the exact flow of thought** as it naturally occurs.", + "bbox": [ + 176, + 554, + 816, + 595 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "6. **Retain all expressions such as \"Wait,\" \"Hmm,\" \"But wait,\" etc., exactly as they appear. These indicate important cognitive processes and should not be skipped or altered.**", + "bbox": [ + 176, + 595, + 816, + 637 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Here are examples for you: [Examples] ...", + "bbox": [ + 176, + 638, + 433, + 664 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "User prompt:", + "text_level": 1, + "bbox": [ + 176, + 665, + 267, + 678 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "[Begin of Question] \n{question} \n[End of Question] \n[Begin of Solution] \n{solution} \n[End of Solution]", + "bbox": [ + 176, + 679, + 364, + 761 + ], + "page_idx": 37 + }, + { + "type": "page_number", + "text": "38", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "G.2.2 Prompt for math problems", + "text_level": 1, + "bbox": [ + 171, + 90, + 416, + 107 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Meta-Thinking Agent (MTA):", + "text_level": 1, + "bbox": [ + 173, + 114, + 383, + 128 + ], + "page_idx": 38 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "System prompt: \nYou are a meta-think agent that represents human high-level think process, when solving a question, you will have a discussion with human, each time you think about what to do next: e.g. \n- Exploring multiple angles and approaches \n- Breaking down the solution into clear steps \n- Continuously reflecting on intermediate results honestly and adapt your strategy as you progress \n- Backtracking when necessary \n- Requesting exploration of multiple solutions individually \n- Finally confirm the answer with the tag [FINISH] \nUser prompt: \n{question}", + "guess_lang": "txt", + "bbox": [ + 173, + 137, + 826, + 321 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Reasoning Agent (RA):", + "text_level": 1, + "bbox": [ + 173, + 327, + 338, + 343 + ], + "page_idx": 38 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "System prompt: Please reason step by step follow the given instruction, when asked to finalize your answer, put your answer within \\boxed{} User prompt: {question} {instruction}", + "guess_lang": "txt", + "bbox": [ + 173, + 349, + 807, + 436 + ], + "page_idx": 38 + }, + { + "type": "page_number", + "text": "39", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 38 + } +] \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09501/7c196e4e-1362-4974-a470-65c83d863927_model.json b/data/2025/2503_09xxx/2503.09501/7c196e4e-1362-4974-a470-65c83d863927_model.json new file mode 100644 index 0000000000000000000000000000000000000000..2df6fbf6437128e6a96195d28904d158e87b31ae --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/7c196e4e-1362-4974-a470-65c83d863927_model.json @@ -0,0 +1,8363 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.264, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2503.09501v3 [cs.AI] 27 May 2025" + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.123, + 0.783, + 0.174 + ], + "angle": 0, + "content": "ReMA: Learning to Meta-think for LLMs with Multi-agent Reinforcement Learning" + }, + { + "type": "text", + "bbox": [ + 0.202, + 0.224, + 0.796, + 0.257 + ], + "angle": 0, + "content": "Ziyu Wan\\(^{1,2*}\\), Yunxiang Li\\(^{3*}\\), Xiaoyu Wen\\(^{1,2}\\), Yan Song\\(^{4}\\), Hanjing Wang\\(^{1}\\), Linyi Yang\\(^{4}\\), Mark Schmidt\\(^{3}\\), Jun Wang\\(^{4}\\), Weinan Zhang\\(^{1}\\), Shuyue Hu\\(^{2\\ddagger}\\), Ying Wen\\(^{1\\ddagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.394, + 0.269, + 0.607, + 0.285 + ], + "angle": 0, + "content": "1 Shanghai Jiao Tong University" + }, + { + "type": "text", + "bbox": [ + 0.352, + 0.284, + 0.645, + 0.299 + ], + "angle": 0, + "content": "\\(^{2}\\) Shanghai Artificial Intelligence Laboratory" + }, + { + "type": "text", + "bbox": [ + 0.392, + 0.299, + 0.607, + 0.313 + ], + "angle": 0, + "content": "3 University of British Columbia" + }, + { + "type": "text", + "bbox": [ + 0.403, + 0.314, + 0.594, + 0.329 + ], + "angle": 0, + "content": "4 University College London" + }, + { + "type": "list", + "bbox": [ + 0.352, + 0.269, + 0.645, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.364, + 0.538, + 0.38 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.395, + 0.767, + 0.685 + ], + "angle": 0, + "content": "Recent research on Reasoning of Large Language Models (LLMs) has sought to further enhance their performance by integrating meta-thinking—enabling models to monitor, evaluate, and control their reasoning processes for more adaptive and effective problem-solving. However, current single-agent work lacks a specialized design for acquiring meta-thinking, resulting in low efficacy. To address this challenge, we introduce Reinforced Meta-thinking Agents (ReMA), a novel framework that leverages Multi-Agent Reinforcement Learning (MARL) to elicit meta-thinking behaviors, encouraging LLMs to think about thinking. ReMA decouples the reasoning process into two hierarchical agents: a high-level meta-thinking agent responsible for generating strategic oversight and plans, and a low-level reasoning agent for detailed executions. Through iterative reinforcement learning with aligned objectives, these agents explore and learn collaboration, leading to improved generalization and robustness. Empirical results from single-turn experiments demonstrate that ReMA outperforms single-agent RL baselines on complex reasoning tasks, including competitive-level mathematical benchmarks and LLM-as-a-Judge benchmarks. Additionally, we further extend ReMA to multi-turn interaction settings, leveraging turn-level ratio and parameter sharing to improve efficiency. Comprehensive ablation studies further illustrate the evolving dynamics of each distinct agent, providing valuable insights into how the meta-thinking reasoning process enhances the reasoning capabilities of LLMs. Our code can be found in https://github.com/ziyuwan/ReMA-public" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.71, + 0.314, + 0.726 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.741, + 0.827, + 0.853 + ], + "angle": 0, + "content": "Large language models (LLMs) have demonstrated remarkable capabilities in knowledge understanding and complex reasoning tasks [Chowdhery et al., 2023, Achiam et al., 2023, Anil et al., 2023, Dubey et al., 2024]. The paradigm in developing LLM-based reasoning models is shifting from scaling training-time computation towards scaling test-time computation [Snell et al., 2024]. Recent advancements, such as OpenAI-o1 [OpenAI, 2024], Deepseek R1 [DeepSeek-AI et al., 2025], and Gemini 2.0 Flash Thinking [DeepMind, 2025], have demonstrated that allowing LLMs to think before generating answers can significantly enhance performance and lead to the emergence of human-like reasoning patterns. These patterns like \"Wait, hold on.\" or \"Let's break this down.\"" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.86, + 0.316, + 0.874 + ], + "angle": 0, + "content": "*Equal contribution." + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.874, + 0.642, + 0.888 + ], + "angle": 0, + "content": "\\(^{\\dagger}\\)Work done during internship at Shanghai Artificial Intelligence Laboratory" + }, + { + "type": "page_footnote", + "bbox": [ + 0.194, + 0.888, + 0.336, + 0.902 + ], + "angle": 0, + "content": "Corresponding Author" + }, + { + "type": "footer", + "bbox": [ + 0.172, + 0.923, + 0.315, + 0.937 + ], + "angle": 0, + "content": "Preprint. Under review." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.307, + 0.094, + 0.692, + 0.107 + ], + "angle": 0, + "content": "Question: \\( T = 9.5 \\). If \\( \\log_2 x^T - \\log_4 x = \\log_8 x^k \\) is an identity for all \\( x > 0 \\), compute the value of \\( k \\)." + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.109, + 0.824, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.307, + 0.825, + 0.364 + ], + "angle": 0, + "content": "Figure 1: Left: A construction-based method that fine-tunes LLMs using rejection sampling, searching among combinations of pre-defined templates. Middle: R1-like method learns to mix meta-thinking and detailed reasoning steps during training. Right: Our method ReMA separates the meta-thinking and reasoning steps in a multi-agent system and updated by reinforcement learning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.381, + 0.825, + 0.424 + ], + "angle": 0, + "content": "indicate that LLMs can develop a form of meta-thinking abilities that can generalize well to out-of-distribution (OOD) tasks [Xiang et al., 2025]. Meta-thinking, also known as metacognitive skills [Flavell, 1979], is an ability traditionally considered uniquely human [Didolkar et al., 2024]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.43, + 0.827, + 0.651 + ], + "angle": 0, + "content": "To cultivate meta-thinking patterns from LLMs themselves, recent construction-based supervised approaches leverage supervised finetuning on structured reasoning trajectories. Specifically, these methods sampling reasoning trajectories from predefined meta-thinking templates and then use supervised finetuning (SFT) or direct preference optimization (DPO) [Rafailov et al., 2023] to teach LLMs imitate these patterns [Qi et al., 2024, Yue et al., Xi et al., 2024, Yang et al., 2025, Muenighoff et al., 2025, Ye et al., 2025c]. However, such methods lack sufficient flexibility for LLMs to explore suitable meta-thinking patterns. Thus, they often fail to generalize to out-of-distribution (OOD) problems, leading to unstable performance on unseen data [Kirk et al., Chu et al., 2025]. Besides construction-based methods, R1-like single-agent reinforcement learning (SARL) has also been adopted for meta-thinking in reasoning [DeepSeek-AI et al., 2025, Xie et al., 2025]. However, these SARL attempts typically rely on strong foundational models for easier exploration or extensive task-specific fine-tuning for stable training [Xu et al., 2025, Gandhi et al., 2025]. Furthermore, SARL needs to learn meta-thinking and reasoning within a single forward pass, seeking to capture complex reasoning structures purely in an autoregressive manner [Xie et al., 2025]. This can potentially lead to issues such as inefficient exploration as well as reduced readability and early convergence to local optima [DeepSeek-AI et al., 2025, Xiang et al., 2025]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.657, + 0.827, + 0.809 + ], + "angle": 0, + "content": "To address these limitations, we introduce Reinforced Meta-thinking Agents (ReMA), a novel framework that leverages multi-agent reinforcement learning (MARL) to encourage LLMs to think about thinking. Our approach employs a multi-agent system (MAS) composed of a high-level meta-thinking agent, responsible for strategic oversight and instruction generation, and a low-level reasoning agent tasked with detailed executing processes based on provided guidance. We compare the inference process among the construction-based method, R1-like method, and ReMA in Fig. 1. Since MAS distributes the exploration space of SARL into multiple agents, it enables each agent to explore more structurally and efficiently during training. Then we apply reinforcement learning on each agent with aligned reward functions. In this way, ReMA effectively balances the trade-off between generalization capability and exploration efficiency. As a result, they can learn to play the best of their role (either to meta-think or to follow instructions), at the present of the other agent." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.815, + 0.825, + 0.913 + ], + "angle": 0, + "content": "To our knowledge, we are the first to formally define and optimize a multi-agent meta-thinking reasoning process (MAMRP) through multi-agent reinforcement learning. Our extensive experiments span both math reasoning and LLM-as-a-Judge tasks, where ReMA consistently achieves the highest average performance across three backbone pretrained models. We further extend ReMA to multi-turn interaction settings on math reasoning tasks, implementing turn-level ratio to optimize trajectory returns and stabilize training. Through comprehensive ablation studies, we illustrate the evolving dynamics between agents, revealing unexpected interaction patterns such as role reversals" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.123 + ], + "angle": 0, + "content": "under different reward settings. These findings provide valuable insights into how meta-thinking processes enhance the reasoning capabilities of LLMs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.141, + 0.321, + 0.158 + ], + "angle": 0, + "content": "2 Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.173, + 0.825, + 0.203 + ], + "angle": 0, + "content": "In this section, we outline the formulation of the vanilla reasoning process (Sec. 2.1) and the representative training methods (Sec. 2.2) along with the notation used throughout the paper." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.219, + 0.445, + 0.235 + ], + "angle": 0, + "content": "2.1 Vanilla Reasoning Process (VRP)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.245, + 0.825, + 0.29 + ], + "angle": 0, + "content": "The probability of generating a response \\(\\mathbf{y}\\) equals the product of its stepwise probabilities. Given a model \\(\\pi_{\\theta}\\) and a prompt \\(\\mathbf{x} = (x_1, \\ldots, x_N)\\), the vanilla reasoning process (VRP) autoregressively produces a response \\(\\mathbf{y} = (y_1, \\ldots, y_L)\\) with" + }, + { + "type": "equation", + "bbox": [ + 0.264, + 0.297, + 0.732, + 0.34 + ], + "angle": 0, + "content": "\\[\n\\pi_ {\\theta} (\\mathbf {y} | \\mathbf {x}) = \\prod_ {l = 1} ^ {L} \\pi_ {\\theta} (y _ {l} | x _ {1}, x _ {2}, \\dots x _ {N}, y _ {1}, \\dots , y _ {l - 1}) = \\prod_ {l = 1} ^ {L} \\pi_ {\\theta} (\\mathbf {y} _ {l} | \\mathbf {x}, \\mathbf {y} _ {< l})\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.347, + 0.825, + 0.378 + ], + "angle": 0, + "content": "The response usually contains intermediate reasoning steps before arriving at the final answer, this process is also known as chain-of-thought (CoT) [Wei et al., 2022], which can be represented as:" + }, + { + "type": "equation", + "bbox": [ + 0.423, + 0.386, + 0.825, + 0.407 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} \\xrightarrow {\\text {r e a s o n i n g s t e p s}} \\mathbf {y} \\sim \\mathbf {a}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.414, + 0.647, + 0.43 + ], + "angle": 0, + "content": "where \\(\\mathbf{a}\\) is the extracted final answer, which is included in the answer \\(\\mathbf{y}\\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.446, + 0.513, + 0.463 + ], + "angle": 0, + "content": "2.2 Training VRP via Reinforcement Learning" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.472, + 0.825, + 0.502 + ], + "angle": 0, + "content": "RL frames VRP decoding process as a deterministic, token-level Markov Decision process (MDP) [Wang et al., 2024a]. Its objective is" + }, + { + "type": "equation", + "bbox": [ + 0.374, + 0.509, + 0.622, + 0.528 + ], + "angle": 0, + "content": "\\[\n\\mathcal {J} (\\theta) = \\mathbb {E} _ {(\\mathbf {x}, \\mathbf {y} ^ {*}) \\sim \\mathcal {D}, \\mathbf {y} \\sim \\pi_ {\\theta}} \\left[ R (\\mathbf {y}, \\mathbf {y} ^ {*}) \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.535, + 0.825, + 0.565 + ], + "angle": 0, + "content": "where \\( R(\\cdot, \\cdot) \\) represents a reward function comparing generated answer \\( \\mathbf{y} \\) with the golden answer \\( \\mathbf{y}^* \\) for any question \\( \\mathbf{x} \\) sampled from dataset \\( \\mathcal{D} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.568, + 0.825, + 0.622 + ], + "angle": 0, + "content": "To compute the gradient \\(\\nabla_{\\theta}\\mathcal{J}(\\theta)\\), computationally efficient algorithms GRPO [Shao et al., 2024] and REINFORCE++ [Hu, 2025] are widely adopted. Take GRPO as an example, given a question-answer pair \\(\\mathbf{x},\\mathbf{y}^*\\) and a group of \\(G\\) generated responses \\(\\mathbf{y}_i\\), denote \\(\\mathbf{y}_{i,j}\\) as the \\(j\\)-th token of the \\(i\\)-th response, it optimizes the following token-level objective:" + }, + { + "type": "equation", + "bbox": [ + 0.191, + 0.628, + 0.825, + 0.689 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {J} (\\boldsymbol {\\theta}) = \\mathbb {E} _ {(\\mathbf {x}, \\mathbf {y} ^ {*}) \\sim \\mathcal {D}, \\{\\mathbf {y} _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\boldsymbol {\\theta} _ {\\mathrm {o l d}}} (\\cdot | \\mathbf {x})} \\\\ \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{| \\mathbf {y} _ {i} |} \\sum_ {j = 1} ^ {| \\mathbf {y} _ {i} |} \\left(\\min \\left(r _ {i, j} (\\theta) \\hat {A} _ {i, j}, \\operatorname {c l i p} \\left(r _ {i, j} (\\theta), 1 - \\epsilon , 1 + \\epsilon\\right) \\hat {A} _ {i, j}\\right) - \\beta D _ {\\mathrm {K L}} \\left(\\pi_ {\\theta} \\| \\pi_ {\\text {r e f}}\\right)\\right) \\right], \\tag {2} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.698, + 0.777, + 0.716 + ], + "angle": 0, + "content": "where the token-level ratio \\( r_{i,j}(\\theta) \\) and the group-normalized advantage \\( \\hat{A}_{i,j} \\) are defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.286, + 0.723, + 0.71, + 0.761 + ], + "angle": 0, + "content": "\\[\nr _ {i, j} (\\theta) = \\frac {\\pi_ {\\theta} \\left(\\mathbf {y} _ {i , j} \\mid \\mathbf {x} , \\mathbf {y} _ {i , < j}\\right)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} \\left(\\mathbf {y} _ {i , j} \\mid \\mathbf {x} , \\mathbf {y} _ {i , < j}\\right)}, \\hat {A} _ {i, j} = \\frac {R _ {i} - \\operatorname {m e a n} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}{\\operatorname {s t d} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.775, + 0.825, + 0.819 + ], + "angle": 0, + "content": "However, RL on base LLMs that haven't been well-aligned may suffer from issues like poor readability and language mixing, preventing researchers from verifying, understanding, and further developing their LLMs. And huge searching space makes efficient learning of meta-thinking daunting." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.838, + 0.273, + 0.854 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.826, + 0.914 + ], + "angle": 0, + "content": "In this section, we present Reinforced Meta-thinking Agents (ReMA), a RL method integrating meta-thinking into the reasoning process of LLM under multi-agent settings (Sec. 3.1), then describe the learning process enabled by MARL of single- and multi-turn LLM setting (Secs. 3.2.1 and 3.2.2)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.092, + 0.6, + 0.107 + ], + "angle": 0, + "content": "3.1 Deploying Meta-Thinking Reasoning Process for LLMs" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.117, + 0.825, + 0.201 + ], + "angle": 0, + "content": "Beyond VRP (Sec. 2.1), recent studies [Muennighoff et al., 2025, Ye et al., 2025c] have shown that integrating meta-thinking behaviors in reasoning process can largely improve the accuracy of the final answers. By integrating Meta-thinking, ReMA decomposes problem solving into two sequential phases: a meta-thinking phase that plans, monitors, or revises strategy, followed by a reasoning phase that produces the detailed solution. We analyse Meta-thinking Reasoning Process along two orthogonal axes—single- vs. multi-agent and single- vs. multi-turn." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.207, + 0.825, + 0.236 + ], + "angle": 0, + "content": "In a single-agent setting, such a process calls LLM once and generates meta-thinking and the following reasoning autoregressively. We formulate the meta-thinking reasoning process (MRP) below:" + }, + { + "type": "equation", + "bbox": [ + 0.397, + 0.243, + 0.825, + 0.259 + ], + "angle": 0, + "content": "\\[\n\\mathbf {y} \\sim \\pi_ {\\theta} (\\mathbf {y} \\mid \\mathbf {x}, \\mathbf {m}) \\cdot \\pi_ {\\theta} (\\mathbf {m} \\mid \\mathbf {x}), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.265, + 0.825, + 0.291 + ], + "angle": 0, + "content": "where \\(\\mathbf{m}\\) and \\(\\mathbf{y}\\) are the output of meta-thinking and reasoning respectively. We present the procedure as shown below:" + }, + { + "type": "equation", + "bbox": [ + 0.377, + 0.292, + 0.824, + 0.311 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} \\xrightarrow {\\text {m e t a - t h i n k i n g}} \\mathbf {m} \\xrightarrow {\\text {r e a s o n i n g s t e p s}} \\mathbf {y} \\sim \\mathbf {a}. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.314, + 0.825, + 0.398 + ], + "angle": 0, + "content": "Exploring MRP reasoning through a single-agent approach is often inefficient, as it requires the language model to simultaneously master both meta-thinking and detailed problem-solving within one call. Prior research has demonstrated that activating different model capabilities through specialized agents significantly improves MRP exploration efficiency. To leverage this insight, we decouple meta-thinking and reasoning into two separate LLM agents: a high-level agent dedicated to generating meta-thinking, and a low-level agent focused on executing reasoning steps." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.403, + 0.825, + 0.459 + ], + "angle": 0, + "content": "During a conversation, the high-level and low-level agents (i.e., \\(\\pi_h\\) and \\(\\pi_l\\)) act in an interleaving manner. The high-level agent generates and summarizes meta-thoughts from the prompt and interaction history, while the low-level agent executes detailed problem-solving under those instructions. We formulate the multi-agent meta-thinking reasoning process (MAMRP) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.401, + 0.467, + 0.824, + 0.482 + ], + "angle": 0, + "content": "\\[\n\\mathbf {y} \\sim \\pi_ {l} (\\mathbf {y} \\mid \\mathbf {x}, \\mathbf {m}) \\pi_ {h} (\\mathbf {m} \\mid \\mathbf {x}). \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.488, + 0.825, + 0.543 + ], + "angle": 0, + "content": "While the single-turn MAMRP offers a straightforward approach, it lacks the ability to perform immediate and fine-grained cognitive switching during the reasoning process, which limits its effectiveness on complex, long-horizon planning tasks. Therefore, we extend Eq. (5) and formulate the multi-turn MAMRP as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.306, + 0.55, + 0.825, + 0.59 + ], + "angle": 0, + "content": "\\[\n\\mathbf {y} _ {T} \\sim \\prod_ {t = 1} ^ {T} \\pi_ {l} \\left(\\mathbf {y} _ {t} \\mid \\mathbf {x}, \\{\\mathbf {m}, \\mathbf {y} \\} _ {< t}, \\mathbf {m} _ {t}\\right) \\pi_ {h} \\left(\\mathbf {m} _ {t} \\mid \\mathbf {x}, \\{\\mathbf {m}, \\mathbf {y} \\} _ {< t}\\right) \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.597, + 0.75, + 0.612 + ], + "angle": 0, + "content": "where \\( T \\) is the number of turns. Similarly, we present the process with a directed graph:" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.62, + 0.825, + 0.647 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} \\xrightarrow [ \\pi_ {h} ]{\\text {m e t a - t h i n k i n g}} \\mathbf {m} _ {1} \\xrightarrow [ \\pi_ {l} ]{\\text {r e a s o n i n g}} \\mathbf {y} _ {1} \\xrightarrow [ \\pi_ {h} ]{\\text {m e t a - t h i n k i n g}} \\mathbf {m} _ {2} \\xrightarrow [ \\pi_ {l} ]{\\text {r e a s o n i n g}} \\mathbf {y} _ {2} \\xrightarrow [ \\pi_ {h} ]{\\text {m e t a - t h i n k i n g}} \\dots \\xrightarrow [ \\pi_ {l} ]{\\text {r e a s o n i n g}} \\mathbf {y} _ {T} \\sim \\mathbf {a}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.653, + 0.825, + 0.681 + ], + "angle": 0, + "content": "As a complex reasoning system, MAMRP provides various optimization opportunities in scaling inference-time computation. We leave further discussion of these aspects in Appendix C.1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.697, + 0.54, + 0.712 + ], + "angle": 0, + "content": "3.2 Training MAMRP: A Multi-Agent RL Method" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.722, + 0.825, + 0.765 + ], + "angle": 0, + "content": "Multi-agent RL, unlike single-agent RL in a deterministic MDP, must contend with stochastic, nonstationary dynamics and rewards, making optimization more challenging. We start by considering an easier case, the optimization of single-turn MAMRP." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.779, + 0.457, + 0.794 + ], + "angle": 0, + "content": "3.2.1 Optimizing Single-turn MAMRP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.803, + 0.825, + 0.845 + ], + "angle": 0, + "content": "To train the system from Sec. 3.1, we embed it as a Markov Game between the two agents. Suppose the two LLM agents are parameterized by \\(\\theta_h\\) and \\(\\theta_l\\), respectively. Define a joint hierarchical policy over sequential decisions \\(\\mathbf{m}\\) and \\(\\mathbf{y}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.329, + 0.852, + 0.825, + 0.869 + ], + "angle": 0, + "content": "\\[\n\\mathbf {y} \\sim \\pi_ {\\left(\\theta_ {h}, \\theta_ {l}\\right)} (\\mathbf {y} \\mid \\mathbf {x}) := \\pi_ {\\theta_ {l}} (\\mathbf {y} \\mid \\mathbf {x}, \\mathbf {m}) \\cdot \\pi_ {\\theta_ {h}} (\\mathbf {m} \\mid \\mathbf {x}), \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.875, + 0.82, + 0.891 + ], + "angle": 0, + "content": "Let \\( R(\\mathbf{y}, \\mathbf{y}^*) \\) denote the final reward serves as the objective function \\( \\mathcal{J}(\\theta_h, \\theta_l) \\) for the joint policy:" + }, + { + "type": "equation", + "bbox": [ + 0.368, + 0.898, + 0.825, + 0.916 + ], + "angle": 0, + "content": "\\[\n\\mathcal {J} \\left(\\theta_ {h}, \\theta_ {l}\\right) = \\mathbb {E} _ {\\mathbf {x}, \\mathbf {y} ^ {*}} \\mathbb {E} _ {\\mathbf {y} \\sim \\pi \\left(\\theta_ {h}, \\theta_ {l}\\right)} R (\\mathbf {y}, \\mathbf {y} ^ {*}). \\tag {9}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.947 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.194, + 0.089, + 0.329, + 0.101 + ], + "angle": 0, + "content": "RL for VRP & MRP" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.103, + 0.346, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.365, + 0.089, + 0.581, + 0.103 + ], + "angle": 0, + "content": "ReMA with Separate Parameters" + }, + { + "type": "image", + "bbox": [ + 0.346, + 0.104, + 0.603, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.608, + 0.089, + 0.815, + 0.102 + ], + "angle": 0, + "content": "ReMA with Shared Parameters" + }, + { + "type": "image", + "bbox": [ + 0.603, + 0.104, + 0.822, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.279, + 0.825, + 0.378 + ], + "angle": 0, + "content": "Figure 2: Comparison of training pipelines. Left: RL training of VRP and MRP, where a single LM agent is updated either with mixed (VRP) or explicit (MRP) meta-thinking. Middle: ReMA with separate parameters for the high-level (meta-thinking) and low-level (reasoning) agents; training alternates between freezing one agent and updating the other. Right: ReMA with shared parameters and multi-turn interactions: both agents share the same parameters and are distinguished by their system prompts. Training employs a turn-level ratio for stable multi-turn reinforcement learning and efficient updates, ensuring each turn's contribution is controlled to prevent instability." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.394, + 0.825, + 0.423 + ], + "angle": 0, + "content": "During optimization procedure, the high-level policy \\(\\pi_{\\theta_h}\\) and low-level policy \\(\\pi_{\\theta_l}\\) aim to maximize their respective rewards independently. The optimization goals for agents are:" + }, + { + "type": "equation", + "bbox": [ + 0.324, + 0.43, + 0.825, + 0.453 + ], + "angle": 0, + "content": "\\[\n\\theta_ {h} ^ {*} = \\arg \\max _ {\\theta_ {h}} \\mathbb {E} _ {(\\mathbf {x}, \\mathbf {y} ^ {*}) \\sim \\mathcal {D}, \\mathbf {m} \\sim \\pi_ {\\theta_ {h}}, \\mathbf {y} \\sim \\pi_ {\\theta_ {l} ^ {*}}} \\left[ R _ {h} (\\mathbf {m}, \\mathbf {y}, \\mathbf {y} ^ {*}) \\right], \\tag {10}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.298, + 0.455, + 0.825, + 0.479 + ], + "angle": 0, + "content": "\\[\n\\theta_ {l} ^ {*} \\left(\\theta_ {h}\\right) = \\arg \\max _ {\\theta_ {l}} \\mathbb {E} _ {\\left(\\mathbf {x}, \\mathbf {y} ^ {*}\\right) \\sim \\mathcal {D}, \\mathbf {m} \\sim \\pi_ {\\theta_ {h}}, \\mathbf {y} \\sim \\pi_ {\\theta_ {l}}} \\left[ R _ {l} \\left(\\mathbf {m}, \\mathbf {y}, \\mathbf {y} ^ {*}\\right) \\right], \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.487, + 0.825, + 0.545 + ], + "angle": 0, + "content": "where \\( R_{h} \\) and \\( R_{l} \\) are policies' individual reward functions, including \\( R \\) and regularization according to tasks and models, e.g., different format rewards (refer to Appendix C.2 for details). The detailed algorithm is in the Algorithm 1. We illustrate the MAMRP inference procedure and the proposed training method in Fig. 2. We also provide an analysis of different loss functions in Appendix C.5." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.56, + 0.466, + 0.575 + ], + "angle": 0, + "content": "3.2.2 Scaling up to Multi-turn MAMRP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.584, + 0.825, + 0.613 + ], + "angle": 0, + "content": "To scale up to multi-turn MAMRP, we can still adopt the iterative training strategy in Sec. 3.2.1. However, we make some changes to improve the efficiency of rollout and training." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.619, + 0.825, + 0.703 + ], + "angle": 0, + "content": "First, we implement a parameter-sharing strategy where both high-level and low-level agents utilize identical model weights \\(\\theta\\), distinguished only by role-specific system prompts \\(S_{h}\\) and \\(S_{l}\\). Formally, we define \\(\\pi_h = \\pi_\\theta (\\cdot |S_h,\\cdot)\\) and \\(\\pi_l = \\pi_\\theta (\\cdot |S_l,\\cdot)\\), sharing the same underlying parameters rather than maintaining separate model instances. This approach eliminates the need for frequent model swapping on GPU during rollout, avoiding inefficient wait times, while enabling larger batch sizes during training to simultaneously optimize policies for both meta-thinking and reasoning roles." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.707, + 0.825, + 0.748 + ], + "angle": 0, + "content": "Second, we propose a multi-turn GRPO with turn-level ratio to address the challenges of multi-turn MAMRP. The trajectory-level averaged objective with turn-level ratio of \\(\\pi_{l}\\) is defined as (The objective of \\(\\pi_h\\) is the similar but with different system prompt):" + }, + { + "type": "equation", + "bbox": [ + 0.172, + 0.754, + 0.435, + 0.773 + ], + "angle": 0, + "content": "\\[\n\\mathcal {J} (\\boldsymbol {\\theta}) = \\mathbb {E} _ {(\\mathbf {x}, \\mathbf {y} ^ {*}) \\sim \\mathcal {D}, \\{(\\mathbf {m} _ {i}, \\mathbf {y} _ {i}) \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {\\mathrm {o l d}}} (\\cdot | \\mathbf {x})\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.206, + 0.776, + 0.824, + 0.827 + ], + "angle": 0, + "content": "\\[\n\\left. \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{T _ {i}} \\sum_ {t = 1} ^ {T _ {i}} \\frac {1}{| \\mathbf {y} _ {i , t} |} \\sum_ {j = 1} ^ {| \\mathbf {y} _ {i, t} |} \\left(\\min \\left(r _ {i, t} (\\theta) \\hat {A} _ {i, t, j}, \\operatorname {c l i p} \\left(r _ {i, t} (\\theta), 1 - \\epsilon , 1 + \\epsilon\\right) \\hat {A} _ {i, t, j}\\right) - \\beta D _ {\\mathrm {K L}} \\left(\\pi_ {\\theta} \\| \\pi_ {\\text {r e f}}\\right)\\right) \\right] \\right. \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.827, + 0.825, + 0.856 + ], + "angle": 0, + "content": "where \\(\\mathbf{y}_{i,t,j}\\) is the \\(j\\)-th token at turn \\(t\\) of the reasoning agent of the \\(i\\)-th trajectory. And the turn-level ratio for clipping is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.19, + 0.865, + 0.825, + 0.909 + ], + "angle": 0, + "content": "\\[\nr _ {i, t} (\\theta) = \\frac {1}{| \\mathbf {y} _ {i , t} |} \\sum_ {j = 1} ^ {| \\mathbf {y} _ {i, t} |} r _ {i, t, j} (\\theta) = \\frac {1}{| \\mathbf {y} _ {i , t} |} \\sum_ {j = 1} ^ {| \\mathbf {y} _ {i, t} |} \\frac {\\pi_ {\\theta} \\left(\\mathbf {y} _ {i , t , j} \\mid \\mathbf {x} , \\left\\{\\mathbf {m} _ {i , ,} , \\mathbf {y} _ {i ,} \\right\\} _ {< t} , \\mathbf {m} _ {i , t} , \\mathbf {y} _ {i , t , < j}\\right)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} \\left(\\mathbf {y} _ {i , t , j} \\mid \\mathbf {x} , \\left\\{\\mathbf {m} _ {i , ,}, \\mathbf {y} _ {i ,} \\right\\} _ {< t} , \\mathbf {m} _ {i , t} , \\mathbf {y} _ {i , t , < j}\\right)}. \\tag {13}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.162 + ], + "angle": 0, + "content": "The introduction of the turn-level ratio serves two key purposes. First, using a token-level ratio (Eq. (2)) in the objective introduces bias for multi-turn training, as it averages over all tokens in a trajectory. This means that tokens within longer turns (those containing more tokens) can disproportionately influence the overall loss, and averaging at the token level may encourage excessively long single-turn responses. Second, clipping each token independently risks instability during training." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.168, + 0.827, + 0.239 + ], + "angle": 0, + "content": "In contrast, the turn-level ratio aligns more closely with the underlying MDP formulation by treating all tokens within a turn as a single action and applying clipping at the turn level. Intuitively, this approach stabilizes training by preventing the LLM from making unstable updates that could result in extreme outputs, such as overly long repetitions or incoherent text. We conduct experimental verification in subsequent empirical results (Sec. 4.3)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.259, + 0.315, + 0.276 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.292, + 0.825, + 0.348 + ], + "angle": 0, + "content": "To evaluate the effectiveness and efficiency of ReMA, we conduct experiments on challenging benchmarks for two types of tasks: mathematical reasoning and LLM-as-a-Judge with three different LLMs. Then, we investigate the models' performance in both single- & multi-turn settings. Finally, we provide ablation studies and qualitative analyses of our method." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.366, + 0.357, + 0.381 + ], + "angle": 0, + "content": "4.1 Experiment Settings" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.392, + 0.825, + 0.449 + ], + "angle": 0, + "content": "We first analyze the single-turn case of ReMA, i.e., \\( T = 1 \\). The high-level agent generates a complete meta-thinking trace in one shot, and the low-level agent follows the instructions and outputs the final results. Single-turn ReMA reduces stochasticity and training cost while our experiments show that it still provides meaningful performance gains." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.465, + 0.825, + 0.548 + ], + "angle": 0, + "content": "Benchmarks We conduct experiments on two types of tasks: mathematical reasoning and LLM-as-a-Judge. For mathematical reasoning experiments, we train models on 7.5k training samples in MATH [Hendrycks et al., 2021] and use MATH500 [Lightman et al., 2023] as the in-distribution test dataset. Additionally, we test the optimized models on out-of-distribution datasets: GSM8K [Cobbe et al., 2021], AIME24\\(^{4}\\), AMC23\\(^{5}\\), GaoKao2023En [Zhang et al., 2023], Minerva Math [Lewkowycz et al., 2022], and Olympiad Bench [He et al., 2024]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.554, + 0.825, + 0.624 + ], + "angle": 0, + "content": "For LLM-as-a-Judge benchmarks, we train models on RewardBench [Lambert et al., 2024]. Specifically, we convert the original data into a pair-ranking format and split it into a training set of 5k items and a test set of 970 items, denoted as RewardBench970. The models are also tested on JudgeBench [Tan et al., 2024] to assess out-of-distribution performance. We refer to Appendix D.1.2 for detailed comparisons and results." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.641, + 0.825, + 0.698 + ], + "angle": 0, + "content": "Baselines, Models, Training Settings We compare pass@1 performance across the following methods: (1) VRP (CoT, step-by-step prompting, Sec. 3.1); (2) \\(\\mathbf{VRP}_{\\mathbf{RL}}\\) (RL under VRP); (3) \\(\\mathbf{MRP}_{\\mathbf{RL}}\\) (RL under MRP with high-level task analysis, Eq. (4)), and (4) ReMA (ours, RL under MAMRP, Eq. (7))." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.702, + 0.825, + 0.801 + ], + "angle": 0, + "content": "We train and test Llama-3-8B-Instruct, Llama-3.1-8B-Instruct [Dubey et al., 2024], and Qwen2.5-7B-Instruct [Team, 2024] on mathematical reasoning benchmarks. For LLM-as-a-judge benchmarks, we train and test Llama-3.1-8B-Instruct and Qwen2.5-7B-Instruct. We use instruct-tuned LLMs to prompt them to perform VRP, MRP, and MAMRP directly during training. Unless specified, we use two separate copies of the same model for high- and low-level agents in ReMA. We use the base reward setting in Appendix C.2 by default. And for the underlying RL algorithm, we use REINFORCE++ [Hu, 2025]. We refer to Appendix D for detailed training settings." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.818, + 0.416, + 0.833 + ], + "angle": 0, + "content": "4.2 Results of Single-turn ReMA" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.845, + 0.825, + 0.873 + ], + "angle": 0, + "content": "Question 1. Does single-turn ReMA outperforms baselines on both in-distribution and out-of-distribution test sets?" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.884, + 0.713, + 0.898 + ], + "angle": 0, + "content": "4https://huggingface.co/datasets/AI-MO/aimo-validation-aime" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.899, + 0.705, + 0.911 + ], + "angle": 0, + "content": "5https://huggingface.co/datasets/AI-MO/aimo-validation-amc" + }, + { + "type": "list", + "bbox": [ + 0.191, + 0.884, + 0.713, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.1, + 0.825, + 0.155 + ], + "angle": 0, + "content": "Table 1: Performance on in-distribution test sets and out-of-distribution test sets. We also report the improvement/degradation w.r.t. basic CoT performance(VRP). On average, ReMA outperforms all baselines. Particularly on out-of-distribution datasets, ReMA achieves the highest performance on most of the benchmarks." + }, + { + "type": "table_caption", + "bbox": [ + 0.386, + 0.159, + 0.611, + 0.17 + ], + "angle": 0, + "content": "(a) Performance on math benchmarks" + }, + { + "type": "table", + "bbox": [ + 0.207, + 0.173, + 0.794, + 0.54 + ], + "angle": 0, + "content": "
ModelBenchmarkVRP(CoT)\\( \\mathbf{V R P_{R L}} \\)\\( \\mathbf{M R P_{R L}} \\)ReMA(Ours)
Llama3-8B-InstructMATH50030.8033.40 (+2.60)32.80 (+2.00)33.80 (+3.00)
GSM8K67.4881.80 (+14.32)79.68 (+12.20)79.38 (+11.90)
AIME240.000.00 (+0.00)3.33 (+3.33)0.00 (+0.00)
AMC232.5010.00 (+7.50)12.50 (+10.00)22.50 (+20.00)
Gaokao2023en22.3427.53 (+5.19)23.38 (+1.04)28.57 (+6.23)
Minerva Math8.8216.54 (+7.72)18.01 (+9.19)13.97 (+5.15)
Olympiad Bench8.448.89 (+0.45)9.33 (+0.89)8.89 (+0.45)
Average20.0525.45 (+5.40)25.58 (+5.53)26.73 (+6.68)
Llama3.1-8B-InstructMATH50050.8050.20 (-0.60)48.60 (-2.20)53.20 (+2.40)
GSM8K86.0584.53 (-1.52)85.37 (-0.68)87.26 (+1.21)
AIME2410.003.33 (-6.67)6.67 (-3.33)13.33 (+3.33)
AMC2327.5012.50 (-15.00)30.00 (+2.50)20.00 (-7.50)
Gaokao2023en38.9636.10 (-2.86)37.14 (-1.82)37.14 (-1.82)
Minerva Math22.7926.84 (+4.05)25.37 (+2.58)28.31 (+5.52)
Olympiad Bench15.1119.70 (+4.59)15.70 (+0.59)19.56 (+4.45)
Average35.8933.32 (-2.57)35.55 (-0.34)36.97 (+1.08)
Qwen2.5-7B-InstructMATH50075.0077.20 (+2.20)76.40 (+1.40)74.40 (-0.60)
GSM8K92.0491.36 (-0.68)91.81 (-0.23)90.60 (-1.44)
AIME246.676.67 (+0.00)10.00 (+3.33)20.00 (+13.33)
AMC2347.5050.00 (+2.50)52.50 (+5.00)57.50 (+10.00)
Gaokao2023en56.6254.81 (-1.81)55.06 (-1.56)57.92 (+1.30)
Minerva Math35.6634.93 (-0.73)32.35 (-3.31)34.93 (-0.73)
Olympiad Bench38.2238.37 (+0.15)37.78 (-0.44)36.30 (-1.92)
Average50.2450.48 (+0.24)50.84 (+0.60)53.09 (+2.85)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.352, + 0.548, + 0.646, + 0.561 + ], + "angle": 0, + "content": "(b) Performance on LLM-as-a-Judge benchmarks" + }, + { + "type": "table", + "bbox": [ + 0.204, + 0.562, + 0.795, + 0.699 + ], + "angle": 0, + "content": "
ModelBenchmarkVRP(CoT)\\( \\mathbf{V R P_{R L}} \\)\\( \\mathbf{M R P_{R L}} \\)ReMA(Ours)
Llama3.1-8B-InstructRewardBench97069.4882.89 (+13.41)81.13 (+11.65)83.71 (+14.23)
JudgeBench51.2951.94 (+0.65)52.90 (+1.61)52.90 (+1.61)
Average60.3967.41 (+7.02)67.02 (+6.63)68.31 (+7.92)
Qwen2.5-7B-InstructRewardBench97078.5685.36 (+6.80)86.49 (+7.93)83.51 (+4.95)
JudgeBench58.3956.94 (-1.45)58.39 (+0.00)56.94 (-1.45)
Average68.4771.15 (+2.68)72.44 (+3.97)70.22 (+1.75)
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.739, + 0.825, + 0.795 + ], + "angle": 0, + "content": "Table 1 compares the greedy decoding performance of ReMA against various RL baselines across mathematical benchmarks (Table 1a) and LLM-as-a-Judge benchmarks (Table 1b). Results across different LLMs indicate that, on average, ReMA outperforms all baselines, achieving a maximum improvement of \\(6.68\\%\\) on mathematical benchmarks and \\(8.49\\%\\) on LLM-as-a-Judge benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.801, + 0.825, + 0.912 + ], + "angle": 0, + "content": "Notably, ReMA achieves the highest performance on most benchmarks, particularly on out-of-distribution datasets, with a maximum improvement of \\(20\\%\\) on AMC23 for Llama3-8B-Instruct, \\(13.33\\%\\) on AIME24 for Qwen2.5-7B-Instruct, \\(14.23\\%\\) on RewardBench970 for Llama3.1-8B-Instruct. These results demonstrate the superior out-of-distribution generalization ability conferred by the meta-thinking mechanism in ReMA. However, we observe that the accuracy gains from RL training on instruction-tuned LMs are smaller than from base models (Sec. 4.2.1). This may be due to the higher initial performance and the relatively fixed output distribution of instruction-tuned models, which limits the improvement and peak performance in RL." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.947 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.187, + 0.083, + 0.81, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.267, + 0.825, + 0.296 + ], + "angle": 0, + "content": "Figure 3: An RL experiment with 3 training schemes. While RL from SFT excels on easier problems, RL under Meta-thinking shows superior generalization to harder problems like AIME24." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.302, + 0.486, + 0.419 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.302, + 0.819, + 0.42 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.423, + 0.825, + 0.454 + ], + "angle": 0, + "content": "Figure 4: Average problem difficulty by action type during training. Left: 1B LM collapses to the EMPTY action. Right: 8B LM adapts to a more complex meta-thinking strategy for harder problems." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.469, + 0.538, + 0.485 + ], + "angle": 0, + "content": "4.2.1 Meta-thoughts boost low-level generalization" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.494, + 0.547, + 0.51 + ], + "angle": 0, + "content": "Question 2. Can Reasoning benefit from Meta-thinking?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.515, + 0.825, + 0.613 + ], + "angle": 0, + "content": "Here we provide a tiny but motivating example of how ReMA gives better learning dynamics. We use Qwen2.5-Math-7B [Yang et al., 2024] as the starting base model, MATH (level 3-5, about \\(5.5\\mathrm{K}\\) number of instances) as the training dataset, and we compare three reinforcement learning training schemes, in particular: (1) RL from Base: train the base model directly on MATH with binary outcome reward; (2) RL from SFT: SFT the base model with GPT-4o's CoT answers; then RL on train dataset with binary outcome reward; (3) RL under Meta-thinking: SFT the base model with GPT-4o's meta-thinking plans; then RL on train dataset with binary outcome reward." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.619, + 0.825, + 0.675 + ], + "angle": 0, + "content": "The models are evaluated on 3 benchmarks (Fig. 3). SFT brings the best initial accuracy on in-distribution and easier sets, but fails to improve on harder ones. RL from Base yields limited gains. In contrast, RL under Meta-thinking achieves the best learning dynamics and generalizes better to challenging problems (AIME24). See Appendix F.1 for case studies." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.693, + 0.551, + 0.709 + ], + "angle": 0, + "content": "4.2.2 Diverse meta-thinking characteristics of LLMs" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.718, + 0.531, + 0.733 + ], + "angle": 0, + "content": "Question 3. How well can LLMs learn to meta-think?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.739, + 0.825, + 0.837 + ], + "angle": 0, + "content": "To further analyze meta-thinking behaviors, we train models with structured JSON-format actions inspired by Yue et al.. The meta-thinking agent generate two entries in one LM call, first selects from three actions: DECOMPOSE (breaking into subproblems), REWRITE (simplifying the problem), or EMPTY (direct solving), then generates the corresponding text. We compare Llama-3.1-8B-Instruct and Llama-3.2-1B-Instruct to study scale effects (two 1B models vs two 8B models) on meta-thinking agent's training. We use vLLM guided JSON decoding [Dong et al., 2024] for valid formatting and base reward (reasoning agent's solution accuracy with format constraints)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.825, + 0.913 + ], + "angle": 0, + "content": "We observe that smaller LMs produce simpler outputs, likely due to limited capacity to maintain valid JSON formatting while exploring diverse reasoning strategies. As Fig. 4 shows, smaller LMs like Llama-3.2-1B-Instruct quickly converge to the simplest EMPTY action to avoid formatting penalties, while larger LMs like Llama-3.1-8B-Instruct can adapt meta-thinking strategies based on problem difficulty. See Appendix F.3 for detailed case studies." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.177, + 0.079, + 0.468, + 0.211 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.483, + 0.079, + 0.822, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.215, + 0.825, + 0.258 + ], + "angle": 0, + "content": "Figure 5: Training results of multi-turn Figure 6: Ablations of multi-turn ReMA on a tiny subReMA on MATH-Level3-5-8K under different set of MATH, we only show here the training curves of different training and rollout configurations." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.289, + 0.502, + 0.305 + ], + "angle": 0, + "content": "4.3 Extending ReMA to Multi-turn MAMRP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.322, + 0.825, + 0.352 + ], + "angle": 0, + "content": "We further extend ReMA to multi-turn MAMRP settings, enabling multiple rounds of interaction between the meta-thinking agent and the reasoning agent as defined in Eq. (7)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.357, + 0.825, + 0.413 + ], + "angle": 0, + "content": "Unlike the inherent VRP capabilities of most LLMs, multi-turn ReMA requires initial bootstrapping. Thus, we constructed a supervised fine-tuning dataset (about 0.8K samples) from LIMO [Ye et al., 2025c] using GPT-4o to establish the starting point for multi-turn interaction capabilities. Then we finetune Qwen2.5-7B before RL training." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.419, + 0.825, + 0.476 + ], + "angle": 0, + "content": "As described in Sec.3.2.2, we deploy the proposed GRPO with turn-level ratio clipping and trajectory-level averaging loss during training. And we remove the KL-divergence term to allow more flexible exploration. By default, the agents share the same parameters and are simultaneously updated using their trajectories. We refer to details in Appendix D.2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.508, + 0.379, + 0.523 + ], + "angle": 0, + "content": "4.3.1 Results and Ablations" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.539, + 0.547, + 0.555 + ], + "angle": 0, + "content": "Question 4. Can ReMA be scaled to multi-turn settings?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.56, + 0.825, + 0.644 + ], + "angle": 0, + "content": "There are two key points revealed by our multi-turn ReMA experiments, as shown in Fig. 5. On one hand, the algorithm can demonstrate effective convergence on the training set, with accuracy steadily increasing from approximately \\(55\\%\\) to \\(70\\%\\) during training. It also achieves an average performance gain of about \\(5\\%\\) across all seven test benchmarks, indicating stable improvements on out-of-distribution data. (Experiment with the rollout config of turn30_token512, see Appendix D.2.2 and Fig. 8 for more details.)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.649, + 0.826, + 0.775 + ], + "angle": 0, + "content": "On the other hand, we observe that the performance of multi-turn ReMA is highly sensitive to hyperparameters such as the maximum response length per turn and the maximum number of turns. For certain configurations, the model either collapses into producing massive repetitions within a single turn or generates empty responses after only a few turns. Similar phenomena have been reported in concurrent works such as RAGEN [Wang et al., 2025], where these issues are attributed to the lack of fine-grained, reasoning-aware guidance. As a result, multi-turn RL becomes susceptible to long-horizon credit assignment challenges and state drift, often leading to reduced exploration diversity—a phenomenon referred to as the \"Echo Trap\". To address this challenge, it is essential to comprehensively explore the training recipe w.r.t. model, data, and algorithm." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.78, + 0.747, + 0.796 + ], + "angle": 0, + "content": "Question 5. How does parameter sharing and turn-level ratio affect multi-turn ReMA?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.801, + 0.826, + 0.913 + ], + "angle": 0, + "content": "As shown in Fig. 6, we compare different configurations on a smaller dataset consisting of 133 samples—19 from each of the 7 MATH problem types—to evaluate sample efficiency and convergence speed. First, all configurations eventually achieve nearly \\(100\\%\\) accuracy on the training dataset. Notably, the trajectory-level loss with turn-level ratio (Turn-Ratio, Eq. (13)) demonstrates substantially better sample efficiency than its token-level variants (Eq. (2)), reaching higher training rewards with fewer steps. We also present the training curve of separate weight setting, the empirical results show that shared parameters with simultaneous updates converge noticeably faster." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.176, + 0.091, + 0.3, + 0.106 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.121, + 0.825, + 0.273 + ], + "angle": 0, + "content": "In this paper, we introduced ReMA, a novel framework that leverages multi-agent reinforcement learning to elicit meta-thinking in large language models. By explicitly separating meta-thinking and reasoning processes into distinct agents, our approach enhances both exploration during training and the interpretability of model outputs. We tailored RL algorithms and reward functions to ensure reliable performance. Through comprehensive experiments on mathematical reasoning and LLM-as-a-Judge benchmarks, ReMA consistently achieved superior results, particularly on out-of-distribution datasets. We further extend ReMA to multi-turn settings, enabling the framework to handle more complex reasoning scenarios that require more communication between agents. Our ablations demonstrate how effective coordination between agents evolves, highlighting the promise of reinforcement learning and structured agents' collaboration for advancing the capabilities of language models in complex reasoning tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.27, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.113, + 0.826, + 0.157 + ], + "angle": 0, + "content": "Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.164, + 0.825, + 0.194 + ], + "angle": 0, + "content": "Elif Akata, Lion Schulz, Julian Coda-Forno, Seong Joon Oh, Matthias Bethge, and Eric Schulz. Playing repeated games with large language models. arXiv preprint arXiv:2305.16867, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.201, + 0.825, + 0.244 + ], + "angle": 0, + "content": "Cem Anil, Guodong Zhang, Yuhuai Wu, and Roger B. Grosse. Learning to give checkable answers with prover-verifier games. CoRR, abs/2108.12099, 2021. URL https://arxiv.org/abs/2108.12099." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.252, + 0.825, + 0.297 + ], + "angle": 0, + "content": "Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: A family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 1, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.303, + 0.825, + 0.348 + ], + "angle": 0, + "content": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.355, + 0.825, + 0.384 + ], + "angle": 0, + "content": "Jiaqi Chen, Yuxian Jiang, Jiachen Lu, and Li Zhang. S-agents: Self-organizing agents in open-ended environments. arXiv preprint arXiv:2402.04578, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.391, + 0.825, + 0.449 + ], + "angle": 0, + "content": "Qiguang Chen, Libo Qin, Jiaqi WANG, Jingxuan Zhou, and Wanxiang Che. Unlocking the capabilities of thought: A reasoning boundary framework to quantify and optimize chain-of-thought. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024b. URL https://openreview.net/forum?id=pC44UMwy2v." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.457, + 0.825, + 0.5 + ], + "angle": 0, + "content": "Shuhao Chen, Weisen Jiang, Baijiong Lin, James T Kwok, and Yu Zhang. Routersc: Query-based router by dual contrastive learning for assembling large language models. arXiv preprint arXiv:2409.19886, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.508, + 0.825, + 0.564 + ], + "angle": 0, + "content": "Weize Chen, Yusheng Su, Jingwei Zuo, Cheng Yang, Chenfei Yuan, Chi-Min Chan, Heyang Yu, Yaxi Lu, Yi-Hsin Hung, Chen Qian, et al. Agentverse: Facilitating multi-agent collaboration and exploring emergent behaviors. In The Twelfth International Conference on Learning Representations, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.572, + 0.825, + 0.63 + ], + "angle": 0, + "content": "Yongchao Chen, Jacob Arkin, Charles Dawson, Yang Zhang, Nicholas Roy, and Chuchu Fan. Autotamp: Autoregressive task and motion planning with llms as translators and checkers. In 2024 IEEE International conference on robotics and automation (ICRA), pages 6695-6702. IEEE, 2024d." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.638, + 0.825, + 0.694 + ], + "angle": 0, + "content": "Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. Palm: Scaling language modeling with pathways. Journal of Machine Learning Research, 24(240): 1-113, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.702, + 0.825, + 0.746 + ], + "angle": 0, + "content": "Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V Le, Sergey Levine, and Yi Ma. Sft memorizes, rl generalizes: A comparative study of foundation model post-training. arXiv preprint arXiv:2501.17161, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.753, + 0.825, + 0.797 + ], + "angle": 0, + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.804, + 0.825, + 0.834 + ], + "angle": 0, + "content": "Google DeepMind. Gemini flash thinking, 2025. URL https://deepmind.google/technologies/gemini/flash-thinking/. Accessed: 2025-01-29." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.842, + 0.825, + 0.913 + ], + "angle": 0, + "content": "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao," + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.113, + 0.826, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.091, + 0.825, + 0.424 + ], + "angle": 0, + "content": "Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun, T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W. L. Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X. Q. Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y. K. Li, Y. Q. Wang, Y. X. Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong, Ying He, Yishi Piao, Yisong Wang, Yixuan Tan, Yiyang Ma, Yiyuan Liu, Yongqiang Guo, Yuan Ou, Yuduan Wang, Yue Gong, Yuheng Zou, Yujia He, Yunfan Xiong, Yuxiang Luo, Yuxiang You, Yuxuan Liu, Yuyang Zhou, Y. X. Zhu, Yanhong Xu, Yanping Huang, Yaohui Li, Yi Zheng, Yuchen Zhu, Yunxian Ma, Ying Tang, Yukun Zha, Yuting Yan, Z. Z. Ren, Zehui Ren, Zhangli Sha, Zhe Fu, Zhean Xu, Zhenda Xie, Zhengyan Zhang, Zhewen Hao, Zhicheng Ma, Zhigang Yan, Zhiyu Wu, Zihui Gu, Zijia Zhu, Zijun Liu, ZiLin Li, Ziwei Xie, Ziyang Song, Zizheng Pan, Zhen Huang, Zhipeng Xu, Zhongyu Zhang and Zhen Zhang. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. 2025. URL https://arxiv.org/abs/2501.12948." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.434, + 0.825, + 0.489 + ], + "angle": 0, + "content": "Aniket Didolkar, Anirudh Goyal, Nan Rosemary Ke, Siyuan Guo, Michal Valko, Timothy Lillicrap, Danilo Rezende, Yoshua Bengio, Michael Mozer, and Sanjeev Arora. Metacognitive capabilities of llms: An exploration in mathematical problem solving. arXiv preprint arXiv:2405.12205, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.5, + 0.825, + 0.543 + ], + "angle": 0, + "content": "Dujian Ding, Ankur Mallick, Chi Wang, Robert Sim, Subhabrata Mukherjee, Victor Ruhle, Laks VS Lakshmanan, and Ahmed Hassan Awadallah. Hybrid llm: Cost-efficient and quality-aware query routing. arXiv preprint arXiv:2404.14618, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.554, + 0.825, + 0.583 + ], + "angle": 0, + "content": "Kefan Dong and Tengyu Ma. Stp: Self-play llm theorem provers with iterative conjecturing and proving, 2025. URL https://arxiv.org/abs/2502.00212." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.593, + 0.825, + 0.636 + ], + "angle": 0, + "content": "Yixin Dong, Charlie F Ruan, Yaxing Cai, Ruihang Lai, Ziyi Xu, Yilong Zhao, and Tianqi Chen. Xgrammar: Flexible and efficient structured generation engine for large language models. arXiv preprint arXiv:2411.15100, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.646, + 0.825, + 0.689 + ], + "angle": 0, + "content": "Yilun Du, Shuang Li, Antonio Torralba, Joshua B Tenenbaum, and Igor Mordatch. Improving factuality and reasoning in language models through multiagent debate. In *Forty-first International Conference on Machine Learning*, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.699, + 0.825, + 0.741 + ], + "angle": 0, + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.751, + 0.825, + 0.781 + ], + "angle": 0, + "content": "Andrew Estornell, Jean-Francois Ton, Yuanshun Yao, and Yang Liu. Acc-debate: An actor-critic approach to multi-agent debate, 2024. URL https://arxiv.org/abs/2411.00053." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.791, + 0.825, + 0.82 + ], + "angle": 0, + "content": "John H Flavell. Metacognition and cognitive monitoring: A new area of cognitive-developmental inquiry. American psychologist, 34(10):906, 1979." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.83, + 0.825, + 0.873 + ], + "angle": 0, + "content": "Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D. Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars. 2025. URL https://arxiv.org/abs/2503.01307." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.883, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Peizhong Gao, Ao Xie, Shaoguang Mao, Wenshan Wu, Yan Xia, Haipeng Mi, and Furu Wei. Meta reasoning for large language models. arXiv preprint arXiv:2406.11698, 2024." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.825, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.121 + ], + "angle": 0, + "content": "Alex Graves. Sequence transduction with recurrent neural networks. arXiv preprint arXiv:1211.3711, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.13, + 0.825, + 0.174 + ], + "angle": 0, + "content": "Fatemeh Haji, Mazal Bethany, Maryam Tabar, Jason Chiang, Anthony Rios, and Peyman Najafirad. Improving llm reasoning with multi-agent tree-of-thought validator agent, 2024. URL https://arxiv.org/abs/2409.11527." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.183, + 0.824, + 0.214 + ], + "angle": 0, + "content": "Rui Hao, Linmei Hu, Weijian Qi, Qingliu Wu, Yirui Zhang, and Liqiang Nie. Chatlm network: More brains, more intelligence. AI Open, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.223, + 0.825, + 0.279 + ], + "angle": 0, + "content": "Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, et al. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.289, + 0.825, + 0.332 + ], + "angle": 0, + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.343, + 0.825, + 0.386 + ], + "angle": 0, + "content": "Sirui Hong, Xiawu Zheng, Jonathan Chen, Yuheng Cheng, Jinlin Wang, Ceyao Zhang, Zili Wang, Steven Ka Shing Yau, Zijuan Lin, Liyang Zhou, et al. Metagpt: Meta programming for multiagent collaborative framework. arXiv preprint arXiv:2308.00352, 3(4):6, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.395, + 0.825, + 0.424 + ], + "angle": 0, + "content": "Jian Hu. Reinforce++: A simple and efficient approach for aligning large language models. arXiv preprint arXiv:2501.03262, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.434, + 0.825, + 0.476 + ], + "angle": 0, + "content": "Jian Hu, Xibin Wu, Zilin Zhu, Xianyu, Weixun Wang, Dehao Zhang, and Yu Cao. Openrlhf: An easy-to-use, scalable and high-performance rlhf framework. arXiv preprint arXiv:2405.11143, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.487, + 0.825, + 0.53 + ], + "angle": 0, + "content": "Qitian Jason Hu, Jacob Bieker, Xiuyu Li, Nan Jiang, Benjamin Keigwin, Gaurav Ranganath, Kurt Keutzer, and Shriyash Kaustubh Upadhyay. Routerbench: A benchmark for multi-llm routing system. arXiv preprint arXiv:2403.12031, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.54, + 0.825, + 0.582 + ], + "angle": 0, + "content": "Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.593, + 0.825, + 0.623 + ], + "angle": 0, + "content": "Fangkai Jiao, Geyang Guo, Xingxing Zhang, Nancy F Chen, Shafiq Joty, and Furu Wei. Preference optimization for reasoning with pseudo feedback. arXiv preprint arXiv:2411.16345, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.632, + 0.825, + 0.675 + ], + "angle": 0, + "content": "Bowen Jin, Hansi Zeng, Zhenrui Yue, Jinsung Yoon, Sercan Arik, Dong Wang, Hamed Zamani, and Jiawei Han. Search-r1: Training llms to reason and leverage search engines with reinforcement learning, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.685, + 0.824, + 0.715 + ], + "angle": 0, + "content": "Jan Hendrik Kirchner, Yining Chen, Harri Edwards, Jan Leike, Nat McAleese, and Yuri Burda. Prover-verifier games improve legibility of llm outputs. arXiv preprint arXiv:2407.13692, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.724, + 0.825, + 0.768 + ], + "angle": 0, + "content": "Robert Kirk, Ishita Mediratta, Christoforos Nalmpantis, Jelena Luketina, Eric Hambro, Edward Grefenstette, and Roberta Raileanu. Understanding the effects of rlhf on llm generalisation and diversity. In The Twelfth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.777, + 0.825, + 0.82 + ], + "angle": 0, + "content": "Aviral Kumar, Vincent Zhuang, Rishabh Agarwal, Yi Su, John D Co-Reyes, Avi Singh, Kate Baumli, Shariq Iqbal, Colton Bishop, Rebecca Roelofs, et al. Training language models to self-correct via reinforcement learning. arXiv preprint arXiv:2409.12917, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.83, + 0.825, + 0.874 + ], + "angle": 0, + "content": "Nathan Lambert, Valentina Pyatkin, Jacob Morrison, LJ Miranda, Bill Yuchen Lin, Khyathi Chandu, Nouha Dziri, Sachin Kumar, Tom Zick, Yejin Choi, et al. Rewardbench: Evaluating reward models for language modeling. arXiv preprint arXiv:2403.13787, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.883, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Pat Langley, Kirstin Cummings, and Daniel Shapiro. Hierarchical skills and cognitive architectures. In Proceedings of the annual meeting of the cognitive science society, volume 26, 2004." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.092, + 0.826, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Aitor Lewkowycz, Anders Andreassen, David Dohan, Ethan Dyer, Henryk Michalewski, Vinay Ramasesh, Ambrose Slone, Cem Anil, Imanol Schlag, Theo Gutman-Solo, et al. Solving quantitative reasoning problems with language models. Advances in Neural Information Processing Systems, 35:3843-3857, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.157, + 0.826, + 0.2 + ], + "angle": 0, + "content": "Ming Li, Jiuhai Chen, Lichang Chen, and Tianyi Zhou. Can llms speak for diverse people? tuning llms via debate to generate controllable controversial statements. arXiv preprint arXiv:2402.10614, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.21, + 0.826, + 0.254 + ], + "angle": 0, + "content": "Tian Liang, Zhiwei He, Wenxiang Jiao, Xing Wang, Yan Wang, Rui Wang, Yujiu Yang, Shuming Shi, and Zhaopeng Tu. Encouraging divergent thinking in large language models through multiagent debate. arXiv preprint arXiv:2305.19118, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.263, + 0.826, + 0.305 + ], + "angle": 0, + "content": "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. arXiv preprint arXiv:2305.20050, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.315, + 0.826, + 0.358 + ], + "angle": 0, + "content": "Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.368, + 0.826, + 0.424 + ], + "angle": 0, + "content": "Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective. CoRR, abs/2503.20783, 2025. doi: 10.48550/ARXIV.2503.20783. URL https://doi.org/10.48550/arXiv.2503.20783." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.434, + 0.826, + 0.478 + ], + "angle": 0, + "content": "Chengdong Ma, Ziran Yang, Minquan Gao, Hai Ci, Jun Gao, Xuehai Pan, and Yaodong Yang. Red teaming game: A game-theoretic framework for red teaming language models. arXiv preprint arXiv:2310.00322, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.487, + 0.826, + 0.543 + ], + "angle": 0, + "content": "Hao Ma, Tianyi Hu, Zhiqiang Pu, Boyin Liu, Xiaolin Ai, Yanyan Liang, and Min Chen. Coevolving with the other you: Fine-tuning LLM with sequential cooperative multi-agent reinforcement learning. CoRR, abs/2410.06101, 2024. doi: 10.48550/ARXIV.2410.06101. URL https://doi.org/10.48550/arXiv.2410.06101." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.553, + 0.826, + 0.597 + ], + "angle": 0, + "content": "Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegrefe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, et al. Self-refine: Iterative refinement with self-feedback. Advances in Neural Information Processing Systems, 36:46534-46594, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.606, + 0.826, + 0.649 + ], + "angle": 0, + "content": "Dakota Mahan, Duy Van Phung, Rafael Rafailov, Chase Blagden, Nathan Lile, Louis Castricato, Jan-Philipp Franken, Chelsea Finn, and Alon Albalak. Generative reward models. arXiv preprint arXiv:2410.12832, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.659, + 0.826, + 0.714 + ], + "angle": 0, + "content": "Sumeet Ramesh Motwani, Chandler Smith, Rocktim Jyoti Das, Markian Rybchuk, Philip H. S. Torr, Ivan Laptev, Fabio Pizzati, Ronald Clark, and Christian Schroeder de Witt. Malt: Improving reasoning with multi-agent llm training, 2024. URL https://arxiv.org/abs/2412.01928." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.724, + 0.826, + 0.768 + ], + "angle": 0, + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.777, + 0.695, + 0.794 + ], + "angle": 0, + "content": "OpenAI. Openai o1 system card, 2024. URL https://openai.com/ol/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.803, + 0.826, + 0.858 + ], + "angle": 0, + "content": "Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in neural information processing systems, 35: 27730-27744, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.869, + 0.826, + 0.913 + ], + "angle": 0, + "content": "Chanwoo Park, Seungju Han, Xingzhi Guo, Asuman Ozdaglar, Kaiqing Zhang, and Joo-Kyung Kim. Maporl: Multi-agent post-co-training for collaborative large language models with reinforcement learning. 2025. URL https://arxiv.org/abs/2502.18439." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Ethan Perez, Saffron Huang, Francis Song, Trevor Cai, Roman Ring, John Aslanides, Amelia Glaese, Nat McAleese, and Geoffrey Irving. Red teaming language models with language models. arXiv preprint arXiv:2202.03286, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.141, + 0.826, + 0.184 + ], + "angle": 0, + "content": "Israel Puerta-Merino, Carlos Núñez-Molina, Pablo Mesejo, and Juan Fernández-Olivares. A roadmap to guide the integration of llms in hierarchical planning. arXiv preprint arXiv:2501.08068, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.19, + 0.825, + 0.223 + ], + "angle": 0, + "content": "Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual reasoning makes smaller llms stronger problem-solvers. arXiv preprint arXiv:2408.06195, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.227, + 0.825, + 0.272 + ], + "angle": 0, + "content": "Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, and Pengfei Liu. O1 replication journey: A strategic progress report - part 1, 2024. URL https://arxiv.org/abs/2410.18982." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.277, + 0.825, + 0.32 + ], + "angle": 0, + "content": "Lv Qingsong, Yangning Li, Zihua Lan, Zishan Xu, Jiwei Tang, Yinghui Li, Wenhao Jiang, Hai-Tao Zheng, and Philip S. Yu. Raise: Reinforenced adaptive instruction selection for large language models, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.327, + 0.825, + 0.371 + ], + "angle": 0, + "content": "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.377, + 0.825, + 0.421 + ], + "angle": 0, + "content": "Krishan Rana, Jesse Haviland, Sourav Garg, Jad Abou-Chakra, Ian Reid, and Niko Suenderhauf. Sayplan: Grounding large language models using 3d scene graphs for scalable robot task planning. arXiv preprint arXiv:2307.06135, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.427, + 0.825, + 0.47 + ], + "angle": 0, + "content": "Swarnadeep Saha, Xian Li, Marjan Ghazvininejad, Jason Weston, and Tianlu Wang. Learning to plan & reason for evaluation with thinking-llm-as-a-judge. arXiv preprint arXiv:2501.18099, 2025a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.477, + 0.825, + 0.521 + ], + "angle": 0, + "content": "Swarnadeep Saha, Xian Li, Marjan Ghazvininejad, Jason Weston, and Tianlu Wang. Learning to plan & reason for evaluation with thinking-llm-as-a-judge, 2025b. URL https://arxiv.org/abs/2501.18099." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.527, + 0.826, + 0.599 + ], + "angle": 0, + "content": "John Schulman, Sergey Levine, Pieter Abbeel, Michael I. Jordan, and Philipp Moritz. Trust region policy optimization. In Francis R. Bach and David M. Blei, editors, Proceedings of the 32nd International Conference on Machine Learning, ICML 2015, Lille, France, 6-11 July 2015, volume 37 of JMLR Workshop and Conference Proceedings, pages 1889-1897. JMLR.org, 2015. URL http://proceedings.mlr.press/v37/schulman15.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.605, + 0.825, + 0.648 + ], + "angle": 0, + "content": "John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. CoRR, abs/1707.06347, 2017. URL http://arxiv.org/abs/1707.06347." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.655, + 0.825, + 0.699 + ], + "angle": 0, + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.705, + 0.825, + 0.763 + ], + "angle": 0, + "content": "Maohao Shen, Guangtao Zeng, Zhenting Qi, Zhang-Wei Hong, Zhenfang Chen, Wei Lu, Gregory Wornell, Subhro Das, David Cox, and Chuang Gan. Satori: Reinforcement learning with chain-of-action-thought enhances llm reasoning via autoregressive search, 2025. URL https:// arxiv.org/abs/2502.02508." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.769, + 0.825, + 0.812 + ], + "angle": 0, + "content": "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv: 2409.19256, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.818, + 0.825, + 0.849 + ], + "angle": 0, + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.855, + 0.825, + 0.911 + ], + "angle": 0, + "content": "Chan Hee Song, Jiaman Wu, Clayton Washington, Brian M Sadler, Wei-Lun Chao, and Yu Su. Llm-planner: Few-shot grounded planning for embodied agents with large language models. In Proceedings of the IEEE/CVF international conference on computer vision, pages 2998-3009, 2023." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.826, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "Dimitris Stripelis, Zijian Hu, Jipeng Zhang, Zhaozhuo Xu, Alay Dilipbhai Shah, Han Jin, Yuhang Yao, Salman Avestimehr, and Chaoyang He. Tensoropera router: A multi-model router for efficient llm inference. arXiv preprint arXiv:2408.12320, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.142, + 0.826, + 0.187 + ], + "angle": 0, + "content": "Vighnesh Subramaniam, Yilun Du, Joshua B. Tenenbaum, Antonio Torralba, Shuang Li, and Igor Mordatch. Multiagent finetuning: Self improvement with diverse reasoning chains, 2025. URL https://arxiv.org/abs/2501.05707." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.195, + 0.826, + 0.239 + ], + "angle": 0, + "content": "Chuanneng Sun, Songjun Huang, and Dario Pompili. Retrieval-augmented hierarchical in-context reinforcement learning and hindsight modular reflections for task planning with llms. arXiv preprint arXiv:2408.06520, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.246, + 0.668, + 0.264 + ], + "angle": 0, + "content": "Richard Sutton. The bitter lesson. Incomplete Ideas (blog), 13(1):38, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.272, + 0.826, + 0.316 + ], + "angle": 0, + "content": "Sijun Tan, Siyuan Zhuang, Kyle Montgomery, William Y Tang, Alejandro Cuadron, Chenguang Wang, Raluca Ada Popa, and Ion Stoica. Judgebench: A benchmark for evaluating llm-based judges. arXiv preprint arXiv:2410.12784, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.324, + 0.825, + 0.368 + ], + "angle": 0, + "content": "Xiangru Tang, Anni Zou, Zhuosheng Zhang, Ziming Li, Yilun Zhao, Xingyao Zhang, Arman Cohan, and Mark Gerstein. Medagents: Large language models as collaborators for zero-shot medical reasoning. arXiv preprint arXiv:2311.10537, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.375, + 0.822, + 0.406 + ], + "angle": 0, + "content": "Qwen Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwenlm.github.io/blog/qwen2.5/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.414, + 0.825, + 0.457 + ], + "angle": 0, + "content": "Jun Wang, Meng Fang, Ziyu Wan, Muning Wen, Jiachen Zhu, Anjie Liu, Ziqin Gong, Yan Song, Lei Chen, Lionel M Ni, et al. Openr: An open source framework for advanced reasoning with large language models. arXiv preprint arXiv:2410.09671, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.466, + 0.825, + 0.51 + ], + "angle": 0, + "content": "Tianlu Wang, Ilia Kulikov, Olga Golovneva, Ping Yu, Weizhe Yuan, Jane Dwivedi-Yu, Richard Yuanzhe Pang, Maryam Fazel-Zarandi, Jason Weston, and Xian Li. Self-taught evaluators. arXiv preprint arXiv:2408.02666, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.518, + 0.825, + 0.562 + ], + "angle": 0, + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. arXiv preprint arXiv:2203.11171, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.57, + 0.825, + 0.601 + ], + "angle": 0, + "content": "Yuqing Wang and Yun Zhao. Metacognitive prompting improves understanding in large language models. arXiv preprint arXiv:2308.05342, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.609, + 0.825, + 0.652 + ], + "angle": 0, + "content": "Zhenhailong Wang, Shaoguang Mao, Wenshan Wu, Tao Ge, Furu Wei, and Heng Ji. Unleashing the emergent cognitive synergy in large language models: A task-solving agent through multi-personal self-collaboration. arXiv preprint arXiv:2307.05300, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.661, + 0.825, + 0.718 + ], + "angle": 0, + "content": "Zihan Wang, Kangrui Wang, Qineng Wang, Pingyue Zhang, Linjie Li, Zhengyuan Yang, Kefan Yu, Minh Nhat Nguyen, Licheng Liu, Eli Gottlieb, Monica Lam, Yiping Lu, Kyunghyun Cho, Jiajun Wu, Li Fei-Fei, Lijuan Wang, Yejin Choi, and Manling Li. Ragen: Understanding self-evolution in llm agents via multi-turn reinforcement learning, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.726, + 0.825, + 0.77 + ], + "angle": 0, + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.779, + 0.825, + 0.809 + ], + "angle": 0, + "content": "Sean Welleck, Ximing Lu, Peter West, Faeze Brahman, Tianxiao Shen, Daniel Khashabi, and Yejin Choi. Generating sequences by learning to self-correct. arXiv preprint arXiv:2211.00053, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.817, + 0.825, + 0.86 + ], + "angle": 0, + "content": "Muning Wen, Ziyu Wan, Weinan Zhang, Jun Wang, and Ying Wen. Reinforcing language agents via policy optimization with action decomposition. CoRR, abs/2405.15821, 2024. doi: 10.48550/ ARXIV.2405.15821. URL https://doi.org/10.48550/arXiv.2405.15821." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.869, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Zhiheng Xi, Dingwen Yang, Jixuan Huang, Jiafu Tang, Guanyu Li, Yiwen Ding, Wei He, Boyang Hong, Shihan Do, Wenyu Zhan, et al. Enhancing llm reasoning via critique models with test-time and training-time supervision. arXiv preprint arXiv:2411.16579, 2024." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.826, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "Violet Xiang, Charlie Snell, Kanishk Gandhi, Alon Albalak, Anikait Singh, Chase Blagden, Duy Phung, Rafael Rafailov, Nathan Lile, Dakota Mahan, et al. Towards system 2 reasoning in llms: Learning how to think with meta chain-of-though. arXiv preprint arXiv:2501.04682, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.142, + 0.826, + 0.187 + ], + "angle": 0, + "content": "Yihang Xiao, Jinyi Liu, Yan Zheng, Xiaohan Xie, Jianye Hao, Mingzhi Li, Ruitao Wang, Fei Ni, Yuxiao Li, Jintian Luo, et al. Cellagent: An llm-driven multi-agent framework for automated single-cell data analysis. BioRxiv, pages 2024-05, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.193, + 0.826, + 0.236 + ], + "angle": 0, + "content": "Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.244, + 0.826, + 0.288 + ], + "angle": 0, + "content": "Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, et al. Towards large reasoning models: A survey of reinforced reasoning with large language models. arXiv preprint arXiv:2501.09686, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.295, + 0.826, + 0.338 + ], + "angle": 0, + "content": "Prateek Yadav, Tu Vu, Jonathan Lai, Alexandra Chronopoulou, Manaal Faruqui, Mohit Bansal, and Tsendsuren Munkhdalai. What matters for model merging at scale? arXiv preprint arXiv:2410.03617, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.346, + 0.826, + 0.39 + ], + "angle": 0, + "content": "Xue Yan, Yan Song, Xinyu Cui, Filippos Christianos, Haifeng Zhang, David Henry Mguni, and Jun Wang. Ask more, know better: Reinforce-learned prompt questions for decision making with large language models. arXiv preprint arXiv:2310.18127, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.397, + 0.826, + 0.454 + ], + "angle": 0, + "content": "An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, Keming Lu, Mingfeng Xue, Runji Lin, Tianyu Liu, Xingzhang Ren, and Zhenru Zhang. Qwen2.5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.462, + 0.826, + 0.492 + ], + "angle": 0, + "content": "Ling Yang, Zhaochen Yu, Bin Cui, and Mengdi Wang. Reasonflux: Hierarchical llm reasoning via scaling thought templates. arXiv preprint arXiv:2502.06772, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.499, + 0.826, + 0.542 + ], + "angle": 0, + "content": "Guanghao Ye, Khiem Duc Pham, Xinzhi Zhang, Sivakanth Gopi, Baolin Peng, Beibin Li, Janardhan Kulkarni, and Huseyin A Inan. On the emergence of thinking in llms i: Searching for the right intuition. arXiv preprint arXiv:2502.06773, 2025a." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.55, + 0.826, + 0.58 + ], + "angle": 0, + "content": "Peijun Ye, Tao Wang, and Fei-Yue Wang. A survey of cognitive architectures in the past 20 years. IEEE transactions on cybernetics, 48(12):3280-3290, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.587, + 0.826, + 0.617 + ], + "angle": 0, + "content": "Yaowen Ye, Cassidy Laidlaw, and Jacob Steinhardt. Iterative label refinement matters more than preference optimization under weak supervision. arXiv preprint arXiv:2501.07886, 2025b." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.624, + 0.826, + 0.654 + ], + "angle": 0, + "content": "Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning. arXiv preprint arXiv:2502.03387, 2025c." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.661, + 0.826, + 0.705 + ], + "angle": 0, + "content": "Le Yu, Bowen Yu, Haiyang Yu, Fei Huang, and Yongbin Li. Language models are super mario: Absorbing abilities from homologous models as a free lunch. In *Forty-first International Conference on Machine Learning*, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.712, + 0.826, + 0.797 + ], + "angle": 0, + "content": "Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, Haibin Lin, Zhiqi Lin, Bole Ma, Guangming Sheng, Yuxuan Tong, Chi Zhang, Mofan Zhang, Wang Zhang, Hang Zhu, Jinhua Zhu, Jiaze Chen, Jiangjie Chen, Chengyi Wang, Hongli Yu, Weinan Dai, Yuxuan Song, Xiangpeng Wei, Hao Zhou, Jingjing Liu, Wei-Ying Ma, Ya-Qin Zhang, Lin Yan, Mu Qiao, Yonghui Wu, and Mingxuan Wang. Dapo: An open-source llm reinforcement learning system at scale, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.804, + 0.826, + 0.848 + ], + "angle": 0, + "content": "Murong Yue, Wenlin Yao, Haitao Mi, Dian Yu, Ziyu Yao, and Dong Yu. Dots: Learning to reason dynamically in llms via optimal reasoning trajectories search. In The Thirteenth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.856, + 0.826, + 0.911 + ], + "angle": 0, + "content": "Murong Yue, Wenlin Yao, Haitao Mi, Dian Yu, Ziyu Yao, and Dong Yu. DOTS: learning to reason dynamically in llms via optimal reasoning trajectories search. CoRR, abs/2410.03864, 2024. doi: 10.48550/ARXIV.2410.03864. URL https://doi.org/10.48550/arXiv.2410.03864." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.133 + ], + "angle": 0, + "content": "Yanwei Yue, Guibin Zhang, Boyang Liu, Guancheng Wan, Kun Wang, Dawei Cheng, and Yiyan Qi. Masrouter: Learning to route llms for multi-agent systems. arXiv preprint arXiv:2502.11133, 2025a." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.142, + 0.826, + 0.213 + ], + "angle": 0, + "content": "Yu Yue, Yufeng Yuan, Qiying Yu, Xiaochen Zuo, Ruofei Zhu, Wenyuan Xu, Jiaze Chen, Chengyi Wang, TianTian Fan, Zhengyin Du, Xiangpeng Wei, Xiangyu Yu, Gaohong Liu, Juncai Liu, Lingjun Liu, Haibin Lin, Zhiqi Lin, Bole Ma, Chi Zhang, Mofan Zhang, Wang Zhang, Hang Zhu, Ru Zhang, Xin Liu, Mingxuan Wang, Yonghui Wu, and Lin Yan. Vapo: Efficient and reliable reinforcement learning for advanced reasoning tasks, 2025b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.221, + 0.825, + 0.252 + ], + "angle": 0, + "content": "Weihao Zeng, Yuzhen Huang, Qian Liu, Wei Liu, Keqing He, Zejun Ma, and Junxian He. Simplerl-zoo: Investigating and taming zero reinforcement learning for open base models in the wild, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.259, + 0.825, + 0.316 + ], + "angle": 0, + "content": "Di Zhang, Jianbo Wu, Jingdi Lei, Tong Che, Jiatong Li, Tong Xie, Xiaoshui Huang, Shufei Zhang, Marco Pavone, Yuqiang Li, Wanli Ouyang, and Dongzhan Zhou. Llama-berry: Pairwise optimization for o1-like olympiad-level mathematical reasoning, 2024a. URL https://arxiv.org/abs/2410.02884." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.324, + 0.825, + 0.367 + ], + "angle": 0, + "content": "Hangfan Zhang, Zhiyao Cui, Xinrun Wang, Qiaosheng Zhang, Zhen Wang, Dinghao Wu, and Shuyue Hu. If multi-agent debate is the answer, what is the question? arXiv preprint arXiv:2502.08788, 2025a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.375, + 0.825, + 0.418 + ], + "angle": 0, + "content": "Jiayi Zhang, Jinyu Xiang, Zhaoyang Yu, Fengwei Teng, Xionghui Chen, Jiaqi Chen, Mingchen Zhuge, Xin Cheng, Sirui Hong, Jinlin Wang, et al. Aflow: Automating agentic workflow generation. arXiv preprint arXiv:2410.10762, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.427, + 0.825, + 0.469 + ], + "angle": 0, + "content": "Xiaotian Zhang, Chunyang Li, Yi Zong, Zhengyu Ying, Liang He, and Xipeng Qiu. Evaluating the performance of large language models on gaokao benchmark. arXiv preprint arXiv:2305.12474, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.478, + 0.825, + 0.521 + ], + "angle": 0, + "content": "Yiqun Zhang, Peng Ye, Xiaocui Yang, Shi Feng, Shufei Zhang, Lei Bai, Wanli Ouyang, and Shuyue Hu. Nature-inspired population-based evolution of large language models. arXiv preprint arXiv:2503.01155, 2025b." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.53, + 0.824, + 0.56 + ], + "angle": 0, + "content": "Rosie Zhao, Alexandru Meterez, Sham Kakade, Cengiz Pehlevan, Samy Jelassi, and Eran Malach. Echo chamber: Rl post-training amplifies behaviors learned in pretraining, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.567, + 0.825, + 0.611 + ], + "angle": 0, + "content": "Yu Zhao, Huifeng Yin, Bo Zeng, Hao Wang, Tianqi Shi, Chenyang Lyu, Longyue Wang, Weihua Luo, and Kaifu Zhang. Marco-ol: Towards open reasoning models for open-ended solutions, 2024. URL https://arxiv.org/abs/2411.14405." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.619, + 0.825, + 0.648 + ], + "angle": 0, + "content": "Yifei Zhou, Andrea Zanette, Jiayi Pan, Sergey Levine, and Aviral Kumar. Archer: Training language model agents via hierarchical multi-turn rl, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.656, + 0.825, + 0.712 + ], + "angle": 0, + "content": "Mingchen Zhuge, Haozhe Liu, Francesco Faccio, Dylan R Ashley, Róbert Csordás, Anand Gopalakrishnan, Abdullah Hamdi, Hasan Abed Al Kader Hammoud, Vincent Herrmann, Kazuki Irie, et al. Mindstorms in natural language-based societies of mind. arXiv preprint arXiv:2305.17066, 2023." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.826, + 0.712 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.09, + 0.411, + 0.108 + ], + "angle": 0, + "content": "Appendix Table of Contents" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.116, + 0.826, + 0.13 + ], + "angle": 0, + "content": "- A Related work 19" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.133, + 0.825, + 0.148 + ], + "angle": 0, + "content": "-A.1 Single LLM Reasoning 19" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.149, + 0.825, + 0.163 + ], + "angle": 0, + "content": "-A.2MultipleLLMReasoning 20" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.165, + 0.825, + 0.179 + ], + "angle": 0, + "content": "-A.3 Hierarchical Reasoning 20" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.18, + 0.825, + 0.194 + ], + "angle": 0, + "content": "-A.4RL in LLM 21" + }, + { + "type": "list", + "bbox": [ + 0.246, + 0.133, + 0.825, + 0.194 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.198, + 0.824, + 0.212 + ], + "angle": 0, + "content": "B Limitation and Future Work 21" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.216, + 0.824, + 0.23 + ], + "angle": 0, + "content": "C Supplementary Materials for Method in Section 3 21" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.234, + 0.825, + 0.248 + ], + "angle": 0, + "content": "- C.1 Inference-time Scaling For ReMA 21" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.25, + 0.825, + 0.263 + ], + "angle": 0, + "content": "- C.2 Detailed reward design 22" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.265, + 0.825, + 0.278 + ], + "angle": 0, + "content": "- C.3 Pseudocode of ReMA 23" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.28, + 0.825, + 0.294 + ], + "angle": 0, + "content": "- C.4 Brief convergence analysis 23" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.296, + 0.825, + 0.31 + ], + "angle": 0, + "content": "- C.5 Learning to reason from the perspective of Leader Follower Game 24" + }, + { + "type": "list", + "bbox": [ + 0.246, + 0.234, + 0.825, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.314, + 0.824, + 0.328 + ], + "angle": 0, + "content": "D Training Details 26" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.332, + 0.825, + 0.346 + ], + "angle": 0, + "content": "- D.1 Single-turn ReMA 26" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.348, + 0.825, + 0.361 + ], + "angle": 0, + "content": "\\* D.1.1 Supervised fine-tuning data collection 27" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.362, + 0.825, + 0.375 + ], + "angle": 0, + "content": "\\* D.1.2 Dataset Curation of RewardBench970 27" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.377, + 0.825, + 0.39 + ], + "angle": 0, + "content": "\\*D.1.3 Training on MATH 28" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.391, + 0.825, + 0.405 + ], + "angle": 0, + "content": "\\* D.1.4 Training on Reward Bench 28" + }, + { + "type": "list", + "bbox": [ + 0.273, + 0.348, + 0.825, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.407, + 0.825, + 0.42 + ], + "angle": 0, + "content": "- D.2 Multi-turn ReMA 28" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.422, + 0.825, + 0.435 + ], + "angle": 0, + "content": "\\* D.2.1 SFT data collection of multi-turn MAMRP 29" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.437, + 0.825, + 0.451 + ], + "angle": 0, + "content": "\\* D.2.2 Training on MATH 29" + }, + { + "type": "list", + "bbox": [ + 0.273, + 0.422, + 0.825, + 0.451 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.454, + 0.824, + 0.468 + ], + "angle": 0, + "content": "E Other Experiments 29" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.472, + 0.825, + 0.486 + ], + "angle": 0, + "content": "-E.1 Reward functions shape cross-agent behaviors 29" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.488, + 0.825, + 0.502 + ], + "angle": 0, + "content": "- E.2 Detailed Training Curves on Different Datasets of Multi-turn ReMA 30" + }, + { + "type": "list", + "bbox": [ + 0.246, + 0.472, + 0.825, + 0.502 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.506, + 0.824, + 0.52 + ], + "angle": 0, + "content": "F Qualitative results 30" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.523, + 0.825, + 0.538 + ], + "angle": 0, + "content": "- F.1 High-level policy finds better plans 30" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.539, + 0.825, + 0.553 + ], + "angle": 0, + "content": "- F.2 Case study for Experiments of Different Reward Functions in Appendix E.1 .30" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.554, + 0.825, + 0.568 + ], + "angle": 0, + "content": "- F.3 Case study for Adaptive Meta-thinking in Single-Turn ReMA in Section 4.2.2 30" + }, + { + "type": "list", + "bbox": [ + 0.246, + 0.523, + 0.825, + 0.568 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.573, + 0.824, + 0.587 + ], + "angle": 0, + "content": "G Prompts 31" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.605, + 0.323, + 0.621 + ], + "angle": 0, + "content": "A Related work" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.635, + 0.827, + 0.734 + ], + "angle": 0, + "content": "Drawing from the bitter lesson [Sutton, 2019], two methods that appear to scale effectively are searching and learning, aligning with current trends in large language models [Xu et al., 2025]. At present, researchers are leveraging these methods to maximize the capabilities of individual transformers, while other efforts are exploring architectures that involve multiple interacting entities. In this paper, we examine this divergence within the context of LLM reasoning, a capability that allows large language models to solve problems through logical reasoning, step-by-step analysis, and inference [Wang et al., 2024a]." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.748, + 0.379, + 0.764 + ], + "angle": 0, + "content": "A.1 Single LLM Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.773, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Main research works in reasoning involving a single LLM utilize search-based and post-training methods. The fundamental elements of searching methods are text generation and evaluation. Generation schemes include In-Context Learning [Brown et al., 2020], Beam Search [Graves, 2012], and various tree-based searching [Snell et al., 2024]; Evaluation approaches often use outcome accuracy, self-consistency [Wang et al., 2022], or process reward signal [Lightman et al., 2023] as the criteria to select high-quality responses from the generated texts. Post-training method is another research line in opposition to pre-training. Popular training pipelines often involve specific data construction followed by Supervised Fine-tuning [Qin et al., 2024, Ouyang et al., 2022, Hui et al., 2024, Liu et al., 2024], or reinforcement learning to interactively explore learning patterns [Wang et al., 2024a, Zhang et al., 2024a, DeepSeek-AI et al., 2025, Xu et al., 2025]." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.176, + 0.093, + 0.394, + 0.106 + ], + "angle": 0, + "content": "A.2 Multiple LLM Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.121, + 0.825, + 0.563 + ], + "angle": 0, + "content": "Integrating multiple entities can potentially surpass the intelligence of the individual model [Chen et al., 2023]. With the rapid emergence of large language models showing a varying level of abilities, some studies have explored facilitating discussions among multiple off-the-shelf LLMs [Zhang et al., 2025a, Chen et al., 2024a, Wang et al., 2023, Du et al., 2023, Zhuge et al., 2023, Tang et al., 2023, Hao et al., 2025, Akata et al., 2023, Hong et al., 2023, Zhang et al., 2024b], taking the form of free discussion [Du et al., 2023, Liang et al., 2023] or structured role assignments [Hong et al., 2023, Zhang et al., 2024b]. Some have applied routing mechanisms to assign tasks to the most suitable expert models [Hu et al., 2024b, Stripelis et al., 2024, Ding et al., 2024, Yue et al., 2025a, Chen et al., 2024c] or merging mechanisms to develop more versatile models [Yadav et al., 2024, Yu et al., 2024, Zhang et al., 2025b]. Beyond aggregating static knowledge from multiple agents, multi-agent LLM training can also enhance reasoning capabilities. For example, multi-agent debates can generate diverse synthetic data, which can subsequently be used for supervised fine-tuning [Estornell et al., 2024, Li et al., 2024, Motwani et al., 2024, Dong and Ma, 2025, Perez et al., 2022, Ye et al., 2025a, Subramaniam et al., 2025]. Reinforcement learning (RL) methods have also been adopted to improve LLM reasoning in areas such as alignment [Perez et al., 2022, Ma et al., 2023] and legibility [Kirchner et al., 2024]. Motwani et al. [2024] utilize a three-agent system for generation and fine-tune the models using Direct Preference Optimization (DPO). Reinforcement Learning with Generative Reward Models (GenRM) [Mahan et al., 2024, Ye et al., 2025b, Jiao et al., 2024, Wang et al., 2024b] represents another common approach of multi-agent training, where the reward signal is derived from the token probabilities of another LLM, coupled with the reasoning process. While our work aligns with these efforts, it diverges by using an additional tunable LLM to provide metacognitive instructions, guiding the low-level LLM during learning, rather than relying on a static GenRM. The most closely related works to ours are MAPoRL [Park et al., 2025] and COPYR [Ma et al., 2024]. MAPoRL is a multi-agent debating framework that uses multi-agent reinforcement learning (MARL) with a learned verifier to fine-tune each LLM agent. COPYR duplicates an LLM into two agents, training them simultaneously in the roles of pioneer and observer using RL. Shen et al. [2025] trained with a novel Chain-of-Action-Thought (COAT) framework that embeds meta-action tokens for self-reflection and exploration into an autoregressive search process. However, unlike our approach, which explicitly separates metacognition from plan execution, these methods do not decompose the reasoning process but instead focus on improving direct chain-of-thought generation. Furthermore, our experiments are conducted on a larger scale and include more challenging problems." + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.593, + 0.38, + 0.606 + ], + "angle": 0, + "content": "A.3 Hierarchical Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.621, + 0.825, + 0.911 + ], + "angle": 0, + "content": "Partitioning reasoning into hierarchical processes has been explored in prior research to make biological sense [Ye et al., 2018, Langley et al., 2004]. In the context of language models, a hierarchical structure has been used to facilitate diverse reasoning patterns, including planning [Puerta-Merino et al., 2025, Sun et al., 2024, Song et al., 2023, Rana et al., 2023, Chen et al., 2024d, Yan et al., 2023, Xiao et al., 2024], validation [Haji et al., 2024, Xi et al., 2024] and self-refinement [Madaan et al., 2023, Kumar et al., 2024, Welleck et al., 2022]. For instance, EvalPlanner [Saha et al., 2025b] is a framework that conducts reasoning through plan generation and execution. DOTS [Yue et al., 2024] extends decomposition by integrating a tree-based searching method with Analysis, Solution, and Verification layers. Marco-o1 [Zhao et al., 2024] focuses on open-ended problem-solving and abstract thinking, dynamically adjusting reasoning granularity and incorporating reflection mechanisms to enhance reasoning performance. Beyond these approaches, metacognition [Flavell, 1979] has been identified as another critical component of reasoning, referring to the intuitive understanding of one's own cognitive and reasoning processes [Gao et al., 2024, Wang and Zhao, 2023]. Wang and Zhao [2023] proposed a metacognitive prompting strategy to improve large language model (LLM) capabilities. Didolkar et al. [2024] further developed a prompt-guided method that enables models to label math problems with the required skills and subsequently use these labels to solve new problems. Gao et al. [2024] introduce meta-reasoner which use contextual multi-arm bandit to learn a high-level \"advisor\" over low-level reasoning process. Xiang et al. [2025] provides a Meta-CoT framework to think about its own thinking. They use construction-based methods as well as reinforcement learning to develop meta-cognitive skills. Qingsong et al. [2025] introduces a RL framework for dynamic instruction selection during fine-tuning. In our work, we also value reflect-" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.937, + 0.508, + 0.947 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.123 + ], + "angle": 0, + "content": "ing on reasoning processes, and we enhance metacognitive abilities through two-agent interaction and reinforcement learning at both end." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.138, + 0.303, + 0.153 + ], + "angle": 0, + "content": "A.4 RL in LLM" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.164, + 0.827, + 0.399 + ], + "angle": 0, + "content": "Recent advancements in applying RL to LLMs have enhanced their reasoning and decision-making capabilities. Liu et al. [2025] examines token-level optimization biases by introducing Dr. GRPO to stabilize policy gradients. VAPO [Yue et al., 2025b] enhances PPO with value-aware perturbations and adaptive reward shaping to improve robustness in sparse-reward reasoning tasks. DAPO [Yu et al., 2025] provides a scalable, modular RL framework that integrates distributed rollout collection and dynamic replay buffers for reproducible training at scale. SimpleRL-Zoo [Zeng et al., 2025] conducts zero-shot RL experiments across open-base LLMs to uncover emergent cognitive behaviors under minimal reward signals. Echo Chamber [Zhao et al., 2025] investigates how RL fine-tuning algorithms can amplify pretrained model biases and proposes regularization to mitigate over-amplification. Wen et al. [2024] decomposes high-level language actions into token-level operations to achieve finer-grained credit assignment. Some works push RL training for single-turn to multi-turn. Search-R1 [Jin et al., 2025] trains LLMs to orchestrate multi-turn search strategies with RL-optimized decision policies to improve question-answering accuracy. ArCHer [Zhou et al., 2024] employs a hierarchical, multi-turn RL architecture with manager and worker policies to efficiently handle long-horizon dialogue tasks. RAGEN [Wang et al., 2025] introduces trajectory filtering and critic modules within a multi-turn RL framework to stabilize learning and reduce shallow policy behaviors." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.42, + 0.453, + 0.436 + ], + "angle": 0, + "content": "B Limitation and Future Work" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.451, + 0.825, + 0.548 + ], + "angle": 0, + "content": "In this work, we only test ReMA on math and LLM-as-a-Judge benchmarks. Though the results show the effectiveness of ReMA, adopting ReMA to tasks where naturally needs multi-turn interaction between several interleaved agents has great potential. Moreover, a comprehensive understanding of the learning dynamics of multi-turn RL and multi-turn MARL for LLMs is needed. Finally, there's still sufficient space to further improve the procedure of multi-turn multi-agent rollout through modern LLM speed up techniques, e.g. prefetch-decode disaggregation and asynchronous rollout." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.569, + 0.627, + 0.587 + ], + "angle": 0, + "content": "C Supplementary Materials for Method in Section 3" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.601, + 0.444, + 0.617 + ], + "angle": 0, + "content": "C.1 Inference-time Scaling of ReMA" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.627, + 0.825, + 0.683 + ], + "angle": 0, + "content": "In this section, we discuss how to enhance the inference-time computation of our hierarchical system, specifically focusing on the interaction between the high-level and low-level agents. The total number of model samples required for inference is determined by the product of the sampling budget allocated to each agent." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.689, + 0.825, + 0.731 + ], + "angle": 0, + "content": "For instance, in a simple single-turn setting, if the high-level agent samples \\( k_{1} \\) responses and each of these responses leads to \\( k_{2} \\) samples from the low-level agent, the total number of model calls required is:" + }, + { + "type": "equation", + "bbox": [ + 0.412, + 0.732, + 0.584, + 0.747 + ], + "angle": 0, + "content": "\\[\n\\text {T o t a l s a m p l e s} = k _ {1} \\times k _ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.753, + 0.825, + 0.822 + ], + "angle": 0, + "content": "Given a fixed computational budget, an important question arises: how should the sampling budget be distributed between the high-level and low-level agents to maximize performance? Allocating more samples to the high-level agent may increase diversity in reasoning strategies while allocating more to the low-level agent may yield more refined solutions for a given metacognitive plan." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.829, + 0.825, + 0.857 + ], + "angle": 0, + "content": "Another crucial consideration is how to perform reranking on the final outputs. Two potential strategies include:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "- Hierarchical reranking: First, for each high-level response, rank and aggregate the low-level responses under it. Then, rank the aggregated results across different high-level responses." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.217, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "- Flat reranking: Directly rank all sampled responses together, regardless of the hierarchy of high-level reasoning steps." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.132, + 0.826, + 0.175 + ], + "angle": 0, + "content": "Balancing sampling allocation and designing an effective reranking strategy are key challenges in efficiently scaling our multi-agent reasoning system. In the next section, we explore empirical results comparing different allocation strategies and ranking methods." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.19, + 0.379, + 0.206 + ], + "angle": 0, + "content": "C.2 Detailed reward design" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.215, + 0.825, + 0.245 + ], + "angle": 0, + "content": "As described in Sec. 3.2, we update both high-level and low-level agents by assigning rewards based on the low-level policy output. Below, we outline several potential reward designs:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.254, + 0.825, + 0.284 + ], + "angle": 0, + "content": "- Correctness reward: For tasks with explicit ground truth, we assign rewards based on the correctness of the low-level agent's output." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.286, + 0.825, + 0.315 + ], + "angle": 0, + "content": "- Format reward: For tasks that require a specific output format, we enforce adherence to the prescribed structure by providing a format reward." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.318, + 0.825, + 0.362 + ], + "angle": 0, + "content": "- To encourage the high-level agent to generate informative and unambiguous meta-thinking, and to stabilize the low-level outputs, we reward the high-level agent when the low-level agent produces consistent responses. Specifically, the consistency reward is defined as" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.254, + 0.825, + 0.362 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.404, + 0.367, + 0.652, + 0.399 + ], + "angle": 0, + "content": "\\[\nR _ {h} = \\frac {\\text {m a x o c c u r r e n c e o f a n a n s w e r}}{\\text {t o t a l n u m b e r o f r e s p o n s e s}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.407, + 0.825, + 0.451 + ], + "angle": 0, + "content": "To examine multi-agent metacognition-integrated reasoning with different reward designs, we experiment with different reward function designs to encourage effective collaboration and structured reasoning. Below, we introduce and justify several reward schemes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.463, + 0.826, + 0.52 + ], + "angle": 0, + "content": "1. Correctness and Format-Aware Reward (Base Setting) In our primary reward setting, the system's overall correctness is used as the primary reward signal, supplemented by format-based rewards for both the high-level and low-level agents. Using mathematical problem-solving as an example:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.531, + 0.825, + 0.561 + ], + "angle": 0, + "content": "- Low-level agent \\((\\pi_{\\theta_l})\\): Receives a reward of \\(+1.0\\) for a correct answer. If the answer is incorrect, the agent is further penalized based on format compliance. Specifically:" + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.563, + 0.825, + 0.59 + ], + "angle": 0, + "content": "- If the output contains the designated answer-indicating format (e.g., boxed in Latex), it receives \\(-0.5\\)." + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.593, + 0.825, + 0.621 + ], + "angle": 0, + "content": "- Otherwise, it receives \\(-1.0\\), as a missing format often suggests an incomplete or unstructured response." + }, + { + "type": "list", + "bbox": [ + 0.245, + 0.563, + 0.825, + 0.621 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.625, + 0.826, + 0.682 + ], + "angle": 0, + "content": "- High-level agent \\((\\pi_{\\theta_h})\\): Receives the average correctness of the low-level agent's sampled responses as its reward. Additionally, to prevent the high-level agent from directly generating explicit answers instead of guiding reasoning, a strong penalty of \\(-1.0\\) is applied if it includes an explicit answer format (e.g., boxed)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.695, + 0.826, + 0.753 + ], + "angle": 0, + "content": "2. Consistency-Based Reward Instead of using correctness as the high-level reward signal, this approach rewards the high-level agent for promoting consistent responses from the low-level agent, regardless of actual correctness. The consistency reward is defined as the proportion of the most frequently occurring answer among all sampled responses:" + }, + { + "type": "equation", + "bbox": [ + 0.377, + 0.768, + 0.825, + 0.8 + ], + "angle": 0, + "content": "\\[\nR _ {h} = \\frac {\\text {m a x o c c u r r e n c e o f a n a n s w e r}}{\\text {t o t a l n u m b e r o f r e s p o n s e s}} \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.808, + 0.825, + 0.866 + ], + "angle": 0, + "content": "If the majority of responses do not contain a definitive answer, the reward is set to zero. We also add the format penalty to the high-level agent if its output contains the designated answer-indicating format. This incentivizes the high-level agent to guide the low-level agent toward more stable, detailed, reproducible outputs rather than erratic reasoning paths." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "These different reward formulations allow us to investigate various dimensions of metacognitive reasoning: correctness, consistency, etc. We empirically compare their effects on learned metacognitive reasoning patterns in Sec. E.1." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.202, + 0.096, + 0.764, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.319, + 0.825, + 0.39 + ], + "angle": 0, + "content": "Figure 7: Our method can be viewed as a combination of practical TRPO and block coordinate ascent, with the high and low-level models treated as distinct components within a larger neural network. Note that the figure does not represent the exact gradient back-propagation flow but rather highlights the key idea that we separate the high- and low-level models. This separation allows for the independent computation of gradients and the independent training of each model." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.423, + 0.367, + 0.436 + ], + "angle": 0, + "content": "C.3 Pseudocode of ReMA" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.452, + 0.449, + 0.467 + ], + "angle": 0, + "content": "The pseudocode is shown in Algorithm 1." + }, + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.489, + 0.404, + 0.504 + ], + "angle": 0, + "content": "Algorithm 1 Single turn MAMRP" + }, + { + "type": "algorithm", + "bbox": [ + 0.174, + 0.508, + 0.825, + 0.744 + ], + "angle": 0, + "content": "Require: High-level policy \\(\\pi_h\\), Low-level policy \\(\\pi_l\\), Dataset \\(\\mathcal{D}\\), Optimizers for \\(\\pi_h\\) and \\(\\pi_l\\). \\(\\varepsilon_{\\mathrm{min}}, \\varepsilon_{\\mathrm{max}}\\) to filter training dataset \n1: Initialize \\(\\pi_h\\) and \\(\\pi_l\\) \n2: while not converged do \n3: build training dataset \\(\\mathcal{D}_l\\) with \\(\\pi_h, \\pi_l, \\varepsilon_{\\mathrm{min}}, \\varepsilon_{\\mathrm{max}}\\) \n4: for Sample \\((\\mathbf{x}, \\mathbf{m}, \\mathbf{y}^*) \\sim \\mathcal{D}_l\\) do \n5: Generate \\(\\mathbf{y} \\sim \\pi_l(\\mathbf{x}, \\mathbf{m})\\) \n6: Compute low-level reward \\(R_l(\\mathbf{y}, \\mathbf{y}^*)\\) \n7: Update \\(\\pi_l\\) using \\(\\nabla_{\\theta_l} \\mathbb{E}[R_l]\\) \n8: end for \n9: build training dataset \\(\\mathcal{D}_h\\) with \\(\\pi_h, \\pi_l, \\varepsilon_{\\mathrm{min}}, \\varepsilon_{\\mathrm{max}}\\) \n10: for Sample \\((\\mathbf{x}, \\mathbf{y}^*) \\sim \\mathcal{D}_h\\) do \n11: Generate \\(\\mathbf{m} \\sim \\pi_h(\\mathbf{x})\\) and \\(\\mathbf{y} \\sim \\pi_l(\\mathbf{x}, \\mathbf{m})\\) \n12: Compute high-level reward \\(R_h(\\mathbf{m}, \\mathbf{y}, \\mathbf{y}^*)\\) \n13: Update \\(\\pi_h\\) using \\(\\nabla_{\\theta_h} \\mathbb{E}[R_h]\\) \n14: end for \n15: end while" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.793, + 0.402, + 0.808 + ], + "angle": 0, + "content": "C.4 Brief convergence analysis" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.822, + 0.825, + 0.879 + ], + "angle": 0, + "content": "We reuse the notations from Sec. 3.2, where \\(\\mathbf{x}\\) is task prompt, \\(\\mathbf{y}\\) is generated answer, \\(\\mathbf{y}^*\\) is groundtruth, \\(\\mathbf{m}\\) is metacognition on task solving, \\(\\pi_{\\theta_h}\\) and \\(\\pi_{\\theta_l}\\) are high- and low-level agents with parameters \\(\\theta_h\\) and \\(\\theta_l\\). We consider the joint hierarchical policy defined in Eq. (8) and update the objective as in Eq. (9)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "To leverage existing RL and optimization convergence analysis methods, we treat the two models as components of a larger model, as illustrated in Fig. 7. When updating one model, we treat the other" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.172, + 0.092, + 0.741, + 0.107 + ], + "angle": 0, + "content": "model as part of a stationary environment. The gradients with respect to \\(\\theta_h\\) and \\(\\theta_l\\) are:" + }, + { + "type": "equation", + "bbox": [ + 0.227, + 0.113, + 0.768, + 0.147 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {\\theta_ {h}} J (\\theta_ {h}, \\theta_ {l}) = \\mathbb {E} _ {\\mathbf {x}, \\mathbf {y} ^ {*}} \\sum_ {\\mathbf {m} \\sim \\pi_ {h} (\\mathbf {m} | \\mathbf {x}; \\theta_ {h})} \\nabla_ {\\theta_ {h}} \\pi_ {h} (\\mathbf {m} | \\mathbf {x}; \\theta_ {h}) \\left[ \\mathbb {E} _ {\\mathbf {y} \\sim \\pi_ {l} (\\mathbf {y} | \\mathbf {x}, \\mathbf {m})} R (\\mathbf {y}, \\mathbf {y} ^ {*}) \\right],\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.23, + 0.149, + 0.672, + 0.184 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {\\theta_ {l}} J (\\theta_ {h}, \\theta_ {l}) = \\mathbb {E} _ {\\mathbf {x}, \\mathbf {y} ^ {*}} \\sum_ {\\mathbf {y} \\sim \\pi (\\theta_ {h}, \\theta_ {l})} \\nabla_ {\\theta_ {l}} \\pi_ {l} (\\mathbf {y} \\mid \\mathbf {x}, \\mathbf {m}; \\theta_ {h}); \\theta_ {l}) R (\\mathbf {y}, \\mathbf {y} ^ {*}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.189, + 0.825, + 0.217 + ], + "angle": 0, + "content": "We can compute the gradients with log trick and estimate \\(\\mathbb{E}_{\\mathbf{y}\\sim \\pi_l(\\mathbf{y}|\\mathbf{x},\\mathbf{m})}R(\\mathbf{y},\\mathbf{y}^*)\\) with Monte Carlo method." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.224, + 0.825, + 0.253 + ], + "angle": 0, + "content": "Equipped with the objective function and gradient computation, we update the models iteratively. Without loss of generality, we analyze the case where the high-level policy is updated first:" + }, + { + "type": "equation", + "bbox": [ + 0.39, + 0.257, + 0.594, + 0.284 + ], + "angle": 0, + "content": "\\[\n\\theta_ {h} ^ {(t + 1)} = \\arg \\max _ {\\theta_ {h}} J (\\theta_ {h}, \\theta_ {l} ^ {(t)}),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.393, + 0.286, + 0.605, + 0.311 + ], + "angle": 0, + "content": "\\[\n\\theta_ {l} ^ {(t + 1)} = \\arg \\max _ {\\theta_ {l}} J \\left(\\theta_ {h} ^ {(t + 1)}, \\theta_ {l}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.317, + 0.825, + 0.373 + ], + "angle": 0, + "content": "Regarding the different regularizations \\( R_{h} \\) and \\( R_{l} \\) in Eqs. (10) and (11) for the different policies, instead of directly integrating them into the loss function, we treat them as constraints, as done in Trust Region Policy Optimization (TRPO) [Schulman et al., 2015]. Note that when one policy is fixed, the other policy operates in a stationary decision process." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.38, + 0.825, + 0.436 + ], + "angle": 0, + "content": "Based on the defined objective and update method, we apply TRPO and block coordinate ascent. First, recall that when updating a single policy, TRPO guarantees monotonic improvement by optimizing a lower bound. Specifically, let \\(\\pi_{\\mathrm{old}}\\) and \\(\\pi\\) represent the old and current policies, respectively. We define a surrogate objective as:" + }, + { + "type": "equation", + "bbox": [ + 0.336, + 0.441, + 0.662, + 0.475 + ], + "angle": 0, + "content": "\\[\nL _ {\\pi_ {\\mathrm {o l d}}} (\\pi) = \\mathbb {E} _ {s \\sim \\pi_ {\\mathrm {o l d}}, a \\sim \\pi_ {\\mathrm {o l d}}} \\left[ \\frac {\\pi (a | s)}{\\pi_ {\\mathrm {o l d}} (a | s)} A ^ {\\pi_ {\\mathrm {o l d}}} (s, a) \\right],\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.478, + 0.708, + 0.493 + ], + "angle": 0, + "content": "As shown by Schulman et al. [2015], the true objective of \\(\\pi\\) is lower-bounded by:" + }, + { + "type": "equation", + "bbox": [ + 0.325, + 0.497, + 0.673, + 0.519 + ], + "angle": 0, + "content": "\\[\nJ (\\pi) \\geq L _ {\\pi_ {\\mathrm {o l d}}} (\\pi) - C \\cdot \\max _ {s} \\mathrm {K L} [ \\pi_ {\\mathrm {o l d}} (\\cdot | s), \\pi (\\cdot | s) ],\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.524, + 0.825, + 0.566 + ], + "angle": 0, + "content": "for some constant \\( C \\). By optimizing the right-hand side of the above inequality, we are guaranteed to improve the performance of \\( \\pi \\). Therefore, for policies \\( \\pi^t \\) and \\( \\pi^{t + 1} \\) obtained from iterations \\( t \\) and \\( t + 1 \\) using the TRPO method, we have:" + }, + { + "type": "equation", + "bbox": [ + 0.436, + 0.57, + 0.56, + 0.589 + ], + "angle": 0, + "content": "\\[\nJ (\\pi^ {t + 1}) \\geq J (\\pi^ {t}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.599, + 0.825, + 0.667 + ], + "angle": 0, + "content": "Now, returning to our updating method, we treat the high- and low-level policies as two blocks of a single agent. The iterative update process can thus be viewed as a cyclic block coordinate ascent, where the two policies are updated in a fixed order. By updating each block using the TRPO method, and improving the surrogate objective within the KL constraint, each block update does not decrease \\( J \\):" + }, + { + "type": "equation", + "bbox": [ + 0.4, + 0.673, + 0.567, + 0.69 + ], + "angle": 0, + "content": "\\[\nJ \\left(\\theta_ {h} ^ {t + 1}, \\theta_ {l} ^ {t}\\right) \\geq J \\left(\\theta_ {h} ^ {t}, \\theta_ {l} ^ {t}\\right),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.402, + 0.693, + 0.597, + 0.712 + ], + "angle": 0, + "content": "\\[\nJ \\left(\\theta_ {h} ^ {t + 1}, \\theta_ {l} ^ {t + 1}\\right) \\geq J \\left(\\theta_ {h} ^ {t + 1}, \\theta_ {l} ^ {t}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.723, + 0.825, + 0.754 + ], + "angle": 0, + "content": "Thus \\( J(\\theta_h^{t + 1},\\theta_l^{t + 1})\\geq J(\\theta_h^t,\\theta_l^t) \\). This repeated coordinate maximization converges to a fixed point, where no single coordinate update can further improve \\( J(\\theta_h,\\theta_l) \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.759, + 0.825, + 0.802 + ], + "angle": 0, + "content": "Given the theoretical monotonic improvement with TRPO and block coordinate ascent, we adopt a practical version of TRPO in our experiments, specifically Proximal Policy Optimization (PPO) [Schulman et al., 2017] or GRPO [Shao et al., 2024]." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.817, + 0.679, + 0.832 + ], + "angle": 0, + "content": "C.5 Learning to reason from the perspective of Leader Follower Game" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.826, + 0.913 + ], + "angle": 0, + "content": "Besides the loss function in the main part, we also propose to frame the problem as a leader-follower game. By analyzing the equilibria of the leader-follower game, we demonstrate that our framework inherently identifies the optimal sub-tasks aligned with the capabilities of the low-level model. This ensures that the high-level decisions are guided by the low-level model's strengths, leading to more efficient and targeted task decomposition." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.092, + 0.383, + 0.108 + ], + "angle": 0, + "content": "C.5.1 Leader-follower game" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.115, + 0.825, + 0.214 + ], + "angle": 0, + "content": "The leader-follower game, also known as the Stackelberg game, models interaction between two agents with parametrized strategies \\(\\pmb{\\theta} = (\\pmb{\\theta}_1, \\pmb{\\theta}_2)\\) and differentiable objective functions \\((\\mathcal{L}_1, \\mathcal{L}_2): \\mathbb{R}^d \\to \\mathbb{R}\\). In this framework, the leader announces its strategy first, and the follower observes this decision to respond optimally. This sequential structure enables the leader to anticipate the follower's reaction and adjust its strategy accordingly. A Stackelberg equilibrium occurs when neither agent can unilaterally improve its objective. Denoting \\(\\pmb{\\theta}_1\\) as the leader's strategy and \\(\\pmb{\\theta}_2\\) as the follower's, the loss functions \\(\\mathcal{L}_1\\) and \\(\\mathcal{L}_2\\) are optimized with the following bi-level structure:" + }, + { + "type": "equation", + "bbox": [ + 0.282, + 0.219, + 0.713, + 0.238 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\theta} _ {1} ^ {*} = \\operatorname {a r g m i n} _ {\\boldsymbol {\\theta} _ {1}} \\mathcal {L} _ {1} (\\boldsymbol {\\theta}, \\boldsymbol {\\theta} _ {2} ^ {*} (\\boldsymbol {\\theta} _ {1})), \\quad \\boldsymbol {w} _ {2} ^ {*} (\\boldsymbol {\\theta} _ {1}) = \\operatorname {a r g m i n} _ {\\boldsymbol {\\theta} _ {2}} \\mathcal {L} _ {2} (\\boldsymbol {\\theta} _ {1}, \\boldsymbol {\\theta} _ {2}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.247, + 0.825, + 0.345 + ], + "angle": 0, + "content": "Anil et al. [2021] apply the leader-follower game to ensure checkable answers in a prover-verifier game (PVG). The objective is a verifier that is both complete (accepts all correct proofs from a verifier) and sound (rejects all incorrect proofs from a verifier). They analyze different scenarios where the verifier acts as the leader, the prover as the follower, and both announce strategies simultaneously, forming a Nash equilibrium. The study concludes that in verifier-led SVG, a Stackelberg equilibrium is both necessary and sufficient for achieving a sound and complete verifier, whereas in other configurations, a Stackelberg equilibrium is not necessary or sufficient for this outcome." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.359, + 0.345, + 0.373 + ], + "angle": 0, + "content": "C.5.2 Efficacy of LLM" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.382, + 0.825, + 0.519 + ], + "angle": 0, + "content": "Because the high-level policy possesses strong generalization capabilities, it is impractical for it to exhaustively explore every potential sub-task for each question. Instead, it naturally focuses on tasks within a feasible range of difficulty, leveraging only a limited set of coarse planning actions. Rather than pinpointing perfectly tailored sub-tasks, the policy searches for general tasks of particular computational complexity, i.e., difficulty, that it can handle reliably. Motivated by this perspective, we incorporate the concept of a reasoning boundary for large language models (LLMs) [Chen et al., 2024b]. Intuitively, the reasoning boundary circumscribes the maximum difficulty of problems a model can solve at a desired accuracy level. Formally, for a model \\(\\theta\\), a task \\(t\\), and a predefined threshold \\(A\\), the reasoning boundary of \\(\\theta\\) represents the maximum problem difficulty \\(d\\) that satisfies:" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.525, + 0.637, + 0.549 + ], + "angle": 0, + "content": "\\[\n\\mathcal {B} _ {A c c = A} (t | \\theta) = \\sup _ {d} \\{d | A c c (t | d, \\theta) = A \\}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.555, + 0.825, + 0.611 + ], + "angle": 0, + "content": "where \\(d\\) denotes the problem difficulty. By quantifying the difficulty level a model can reliably handle, the reasoning boundary provides a systematic way to align the high-level policy's focus with the model's actual capabilities, gauge the efficacy of the low-level policy, and determine the optimal strategy for solving the question." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.625, + 0.53, + 0.641 + ], + "angle": 0, + "content": "C.5.3 Leader-follower Game for LLM Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.649, + 0.825, + 0.678 + ], + "angle": 0, + "content": "Our goal is to find the high-level policy that searches for the sub-task sequence based on the efficacy of the low-level policy to solve the question. We design the loss functions as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.315, + 0.684, + 0.671, + 0.7 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {h} = \\mathbb {E} _ {(x, y) \\sim p _ {D}, t _ {1: K}} \\left[ - \\log \\pi_ {l} \\left(y _ {K} \\mid x, t _ {1: K}, y _ {1: K - 1}\\right) \\right],\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.315, + 0.703, + 0.684, + 0.718 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {l} = \\mathbb {E} _ {x \\sim p _ {D}, t _ {1: k} \\sim \\pi_ {h}, \\hat {y} _ {k} \\sim \\pi_ {l}} \\left[ - r \\left(y _ {k}, \\hat {y} _ {k} \\mid x, t _ {1: k}, y _ {1: k - 1}\\right) \\right],\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.723, + 0.825, + 0.793 + ], + "angle": 0, + "content": "where \\( r(y_k, \\hat{y}_k \\mid x, t_{1:k}, y_{1:k-1}) \\) represents the step reward for the correctness of \\( \\hat{y}_k \\) derived from the question \\( x \\), the sub-task sequence \\( t_{1:k} \\) from the high policy and prior intermediate answer \\( y_{1:k-1} \\). The loss functions can be interpreted as follows: the high-level policy is incentivized to find subtasks that lead to the correct answer based on the capabilities of the low-level policy, while the low-level policy is incentivized to enhance its instruction-following ability." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.799, + 0.825, + 0.87 + ], + "angle": 0, + "content": "How to minimize the loss functions and whether such minimization leads to the desired results remain questions. To explore this, we consider a simplified case of our method, where the high-level policy plans the complete sub-task sequence at the beginning and the low-level executes the instruction in a single interaction. The corresponding parameterized policies are defined as \\(\\pi_h((t_1,\\ldots ,t_K)\\mid x)\\) and \\(\\pi_l((\\hat{y}_1,\\dots ,\\hat{y}_K)\\mid x,(t_1,\\dots ,t_K))\\) . The corresponding loss functions are:" + }, + { + "type": "equation", + "bbox": [ + 0.314, + 0.875, + 0.823, + 0.89 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {h} = \\mathbb {E} _ {(x, y) \\sim p _ {D}, t _ {1: K}} \\left[ - \\log \\pi_ {l} \\left(y _ {K} \\mid x, t _ {1: K}\\right) \\right], \\tag {15}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.315, + 0.894, + 0.823, + 0.909 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {l} = \\mathbb {E} _ {x \\sim p _ {D}, t _ {1: k} \\sim \\pi_ {h}, \\hat {y} _ {k} \\sim \\pi_ {l}} \\left[ - r \\left(y _ {k}, \\hat {y} _ {k} \\mid x, t _ {1: k}, y _ {1: k - 1}\\right) \\right]. \\tag {16}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "In this step, the high-level policy generates the entire sub-task sequence without relying on intermediate answers, while the low-level policy follows the sequence to produce answers for the sub-tasks. The low-level policy can still leverage prior intermediate answers to sequentially refine its responses." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.825, + 0.239 + ], + "angle": 0, + "content": "To analyze the result agents by minimizing the loss functions, we adopt the completeness and soundness properties from the PVG framework for LLM reasoning. Specifically, if the high-level policy generates a sub-task sequence that is executable within the low-level policy's capabilities, the problem must be solved (completeness). Conversely, if the sub-task sequence is incorrect or beyond the low-level policy's capacity, the problem cannot be solved (soundness). To achieve this, we utilize the conclusion from Anil et al. [2021], which positions the low-level policy as the leader and the high-level policy as the follower, equilibria guarantee the complete and sound low-level policy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.243, + 0.827, + 0.382 + ], + "angle": 0, + "content": "When the high-level policy takes the lead, the low-level policy is forced to adapt to the specific strategy defined by the high-level policy, which can result in neither complete nor sound low-level policy. For example, if the high-level policy dictates that it will only generate sub-tasks involving addition and subtraction, the low-level policy is constrained to optimize only for these tasks. While they may reach an equilibrium, the low-level policy remains incomplete, and this limitation impacts both policies. In the case of the simultaneous PVG game, convergence to a Nash equilibrium is possible, but it is not sufficient for completeness and soundness. For instance, the low-level policy might disregard the high-level policy entirely (e.g., if the high-level provides incorrect instructions, but the low-level still performs correctly). This approach, however, is challenging to implement due to the significantly larger search space involved." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.388, + 0.825, + 0.472 + ], + "angle": 0, + "content": "Furthermore, the loss functions we design ensure that, at a Stackelberg equilibrium, the high-level policy identifies sub-task sequences that the low-level policy can execute to solve the problem with the highest probability. With the low-level policy acting as the leader, it establishes its reasoning boundary for tasks. Based on the reasoning boundary, let \\(\\theta_h\\) and \\(\\theta_l\\) represent the policy parameters for the high-level and low-level policies, respectively. The probability that the low-level policy correctly solves the question is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.368, + 0.477, + 0.629, + 0.518 + ], + "angle": 0, + "content": "\\[\n\\pi_ {l} \\left(y _ {K} \\mid x, t _ {1: K}\\right) = \\prod_ {k = 1} ^ {K} \\operatorname {A c c} \\left(t _ {k} \\mid x, \\theta_ {l}\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.524, + 0.825, + 0.58 + ], + "angle": 0, + "content": "where we can compute the difficulty \\( d_{k} \\) from \\( t_k \\) and \\( x \\). where the difficulty \\( d_{k} \\) can be derived from \\( t_k \\) and \\( x \\). The loss function in Eq. (15) ensures that the selected sub-tasks are optimal for the low-level policy. Here we provide a theoretical condition under which the most efficient solution strategy can be identified, according to the efficacy of the LLM." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.586, + 0.825, + 0.67 + ], + "angle": 0, + "content": "This approach can be viewed as a game between a high-level \"prover\" and a low-level \"verifier\". The verifier, representing the low-level policy, adheres the high-level policy's instructions to validate its reasoning. Unlike the classic PVG setting, where the prover has ground-truth labels, the label of our high-level policy depends on the tunable low-level policy. This distinction, where the low-level policy (leader) is inherently more complex, contrasts with traditional PVG setups and adds complexity due to the interdependence between the high- and low-level policies." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.675, + 0.825, + 0.718 + ], + "angle": 0, + "content": "By framing the problem-solving process as a leader-follower game, with the low-level policy designated as the leader, we can construct a bi-level optimization problem to identify an equilibrium. Following the formulation in Sec. C.5.1, the problem is expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.299, + 0.722, + 0.697, + 0.748 + ], + "angle": 0, + "content": "\\[\n\\theta_ {l} ^ {*} = \\underset {\\theta_ {l}} {\\arg \\min } \\mathcal {L} _ {l} (\\theta_ {h} ^ {*} (\\theta_ {l}), \\theta_ {l}) \\quad \\theta_ {h} ^ {*} (\\theta_ {l}) = \\underset {\\theta_ {l}} {\\arg \\min } \\mathcal {L} _ {h} (\\theta_ {h}, \\theta_ {l}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.753, + 0.518, + 0.768 + ], + "angle": 0, + "content": "Then we can apply bi-level optimization techniques." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.787, + 0.347, + 0.804 + ], + "angle": 0, + "content": "D Training Details" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.817, + 0.347, + 0.832 + ], + "angle": 0, + "content": "D.1 Single-turn ReMA" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.842, + 0.825, + 0.913 + ], + "angle": 0, + "content": "We refer to Appendix G for prompts we use during training. We implement the training pipeline with OpenRLHF [Hu et al., 2024a] which is a highly efficient codebase and is easy to scale up. We select REINFORCE++ to save resources and for efficient training. All experiments are conducted in a node of 8 NVIDIA A100 GPUs. We use bf16, Zero2, Flash-Attention and gradient checkpointing to run our experiments." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.092, + 0.825, + 0.189 + ], + "angle": 0, + "content": "During rollout, we set temperature \\(= 1.0\\), top_p \\(= 1.0\\), top_k \\(= -1\\), and use vLLM for inference acceleration. We set the max generation length to be 2048 and, the rollout batch size to be 1000. The number of samples per prompt is 4. During training, we use Adam Optimizer with a learning rate of 5e-7. We set the mini-batch size to be 500, and the clip ratio to be 0.2. Other hyperparameters, such as KL coefficients and the number of training episodes, were carefully tuned based on validation set performance to ensure robust and reliable results. To align with the hyperparameter in OpenRLHF, we use #Training Episode as the number of reinforcement learning epoch on the entire dataset." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.195, + 0.827, + 0.295 + ], + "angle": 0, + "content": "In ReMA, during prompt filtering of the high-level model, the high-level agent first samples 10 candidates for each question with \\( t = 1.0 \\), and for each output the low-level agents sample 1 solution with \\( t = 0.0 \\), then we select questions of success rate between \\( [\\varepsilon_{\\mathrm{min}}, \\varepsilon_{\\mathrm{max}}] \\). And for the low-level agent's prompt filtering, the high-level agent first samples 1 candidate for each question with \\( t = 0.0 \\) and for each output the low-level agents sample 10 solutions with \\( t = 1.0 \\), then we select questions of success rate between \\( [\\varepsilon_{\\mathrm{min}}, \\varepsilon_{\\mathrm{max}}] \\) and use the high-level agent to sample 4 meta-thoughts with \\( t = 1.0 \\) as the input." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.307, + 0.492, + 0.323 + ], + "angle": 0, + "content": "D.1.1 Supervised fine-tuning data collection" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.33, + 0.827, + 0.483 + ], + "angle": 0, + "content": "For experiments in Sec. 4.2.1, we collect expert data to enhance the reasoning pattern, i.e. \\( RL \\) from SFT. Specifically, we collect demonstration data from GPT-4o Mini on MATH training dataset (7.5k problems) Hendrycks et al. [2021] and use it to fine-tune the LLMs. The data generation follows these steps: First, we prompt GPT-4o Mini to produce metacognitive reasoning for high-level model training. Specifically, we use different prompts to instruct it to rewrite and decompose a given question without providing a final answer. We collect metacognitive reasoning using two predefined actions, \"rewrite\" and \"decompose\", which align with human approaches to complex problem-solving while preserving answer diversity. Next, we use the generated instructions to prompt GPT-4o Mini to follow the metacognitive steps and solve the question, obtaining SFT data for low-level policy training. Below, we present the prompts used for both high-level and low-level models. Prompts can be found in Appendix G.1.1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.497, + 0.495, + 0.511 + ], + "angle": 0, + "content": "D.1.2 Dataset Curation of RewardBench970" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.533, + 0.825, + 0.561 + ], + "angle": 0, + "content": "Table 2: Performance on LLM-as-a-Judge benchmarks, trained on dataset under the loose setting. The two-agent workflow in ReMA" + }, + { + "type": "table", + "bbox": [ + 0.18, + 0.562, + 0.819, + 0.703 + ], + "angle": 0, + "content": "
ModelBenchmarkVRP(CoT)\\( \\mathbf{V R P_{R L}} \\)\\( \\mathbf{M R P_{R L}} \\)ReMA(Ours)
Llama3.1-8B-InstructRewardBench97071.2481.86 (+10.62)80.41 (+9.17)86.29 (+15.05)
JudgeBench51.7751.45 (-0.32)50.65 (-1.12)53.71 (+1.94)
Average61.5166.65 (+5.14)65.53 (+4.02)70.00 (+8.49)
Qwen2.5-7B-InstructRewardBench97086.4987.22 (+0.73)80.31 (-6.18)90.72 (+4.23)
JudgeBench58.3954.84 (-3.55)55.81 (-2.58)58.71 (+0.32)
Average72.4471.03 (-1.41)68.06 (-4.38)74.72 (+2.28)
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.718, + 0.825, + 0.748 + ], + "angle": 0, + "content": "We process the original dataset in RewardBench by splitting it into a training set containing 5,000 tuples of (instruction, response A, response B) and a test set with the remaining 970 tuples." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.753, + 0.664, + 0.768 + ], + "angle": 0, + "content": "To ensure a meaningful dataset split, we validate two separation strategies:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.778, + 0.825, + 0.806 + ], + "angle": 0, + "content": "- Loose setting: We only ensure that there is no direct overlap of tuples between the training and test sets." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.81, + 0.825, + 0.839 + ], + "angle": 0, + "content": "- Strict setting: We further enforce that no instruction appears in both the training and test sets. The results for this setting are presented in the main results (Table 1b)." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.778, + 0.825, + 0.839 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.849, + 0.825, + 0.878 + ], + "angle": 0, + "content": "Additionally, since the original RewardBench data originates from different subsets, we ensure that all original subsets are evenly represented in both the training and test sets." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Table 2 reports the learning performance of various methods under the loose dataset split setting. Compared to the results in Table 1b, ReMA significantly outperforms other RL tuning baselines" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "across all models, particularly on out-of-distribution (OOD) benchmarks. The consistent improvements on OOD datasets of these two settings suggest that ReMA enhances meta-thinking ability, resulting in better generalization across diverse task distributions." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.149, + 0.367, + 0.164 + ], + "angle": 0, + "content": "D.1.3 Training on MATH" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.173, + 0.825, + 0.215 + ], + "angle": 0, + "content": "VRP For Llama3-8B-Instruct, Llama3.1-8B-Instruct, and Qwen2.5-7B-Instruct, we all use a KL coefficient of 1e-2, and for #Training Episode, we use 12,6,6 for these 3 models respectively. For Llama3-8B-Instruct, we set the learning rate of 2e-7 for stable training." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.23, + 0.825, + 0.26 + ], + "angle": 0, + "content": "MRP For Llama3-8B-Instruct, Llama3.1-8B-Instruct, and Qwen2.5-7B-Instruct, we all use a KL coefficient of 1e-2, and for #Training Episode, we use 10,6,6 for these 3 models respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.273, + 0.825, + 0.329 + ], + "angle": 0, + "content": "MAMRP We use \\(\\varepsilon_{\\mathrm{min}} = 0.2, \\varepsilon_{\\mathrm{max}} = 0.8\\) for prompt filtering. We use the same #Training Episode=4 for all models, and for #Update Iteration, we use 3 for Llama3-8B-Instruct and Llama3.1-8B-Instruct, 10 for Qwen2.5-7B-Instruct. And we set the KL coefficient to be 1e-2 for all the 3 models." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.344, + 0.418, + 0.359 + ], + "angle": 0, + "content": "D.1.4 Training on Reward Bench" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.368, + 0.825, + 0.398 + ], + "angle": 0, + "content": "VRP For Llama3.1-8B-Instruct, and Qwen2.5-7B-Instruct, we all use a KL coefficient of 1e-2, and for #Training Episode, we use 4,6 for these 2 models respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.411, + 0.825, + 0.44 + ], + "angle": 0, + "content": "MRP For Llama3.1-8B-Instruct, and Qwen2.5-7B-Instruct, we all use a KL coefficient of 1e-2, and for #Training Episode, we use 4,6 for these 2 models respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.455, + 0.825, + 0.526 + ], + "angle": 0, + "content": "MAMRP We set #Update Iteration=1 for all models. We set the KL coefficient to be 1e-2 for Llama3.1-8B-Instruct and 1e-2 for Qwen2.5-7B-Instruct all models. For Llama3.1-8B-Instruct, we use \\(\\varepsilon_{\\mathrm{min}} = 0.2\\), \\(\\varepsilon_{\\mathrm{max}} = 0.8\\) for prompt filtering and we use #Training Episode of 2 during training. For Llama3.1-8B-Instruct, we use \\(\\varepsilon_{\\mathrm{min}} = 0.1\\), \\(\\varepsilon_{\\mathrm{max}} = 0.9\\) for prompt filtering and we use #Training Episode of 1 during training." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.542, + 0.343, + 0.556 + ], + "angle": 0, + "content": "D.2 Multi-turn ReMA" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.567, + 0.825, + 0.623 + ], + "angle": 0, + "content": "We refer to Appendix G for prompts we use during training. We implement a multi-turn ReMA training pipeline with VeRL [Sheng et al., 2024] since it's easier to implement complex training pipeline with a single centralized controller. Similar to OpenRLHF, VeRL is also a highly efficient and scalable codebase for further development." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.629, + 0.825, + 0.727 + ], + "angle": 0, + "content": "For the multi-turn ReMA rollout, we use parameter sharing and simultaneous update by default. In details, we maintain two message lists with the system prompt of meta-thinking agent and reasoning agent respectively. During rollout, each agent acts as 'assistant' in its own message list and the other agent acts as 'user'. We use three hyperparameters to control the rollout length: (1) 'max_num_turns': the maximum number of turns for each trajectory. (2) 'max_response_length': the maximum number of tokens for each turn's response. (3) 'max_prompt_length': the maximum number of tokens for each trajectory." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.732, + 0.825, + 0.762 + ], + "angle": 0, + "content": "During training, we apply the collected message list to Qwen2.5-7B's chat template and build loss masks in order to compute the loss for all turns of one trajectory (message list)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.767, + 0.825, + 0.837 + ], + "angle": 0, + "content": "Moreover, for multi-turn ReMA rollout, unlike single agent single turn rollout, we need to carefully design the termination logic. Basically, we let the meta-thinking agent automatically decide when to finish the solving procedure, we use a special tag '[FINISH]' to indicate the end of the solving procedure. After we detect this tag, we will terminate trajectory after the reasoning agent generates its output." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.843, + 0.826, + 0.912 + ], + "angle": 0, + "content": "We also design other termination conditions to ensure the quality of the generated trajectories. If the last agent's response is too long, we will terminate the whole trajectory and setting the reward to 0. We also introduce a different version of format reward: we give a reward of 1.0 only if the reasoning agent's last turn response is correct and the meta-thinking agent's last turn response include '[FINISH]'. We use math_check as the default verifier." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.092, + 0.529, + 0.106 + ], + "angle": 0, + "content": "D.2.1 SFT data collection of multi-turn MAMRP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.118, + 0.825, + 0.203 + ], + "angle": 0, + "content": "We use GPT-4o to translate 817 samples in LIMO [Ye et al., 2025c] by prompting it to wrap each sentence with meta-thinking and reasoning tags. We use a temperature of 0. After filtering, we get 800 conversations for training. The prompt can be found in Appendix G.2.1. For supervised finetuning, we use LlamaFactory as the codebase and train the model for 3 epochs with a learning rate of 1e-5, consine learning rate scheduler, and batch size of 8. Use DeepSpeed Zero2 for distributed training." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.223, + 0.367, + 0.238 + ], + "angle": 0, + "content": "D.2.2 Training on MATH" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.249, + 0.825, + 0.334 + ], + "angle": 0, + "content": "For training of multi-turn ReMA on MATH, we use GRPO [Shao et al., 2024] as the default learning algorithm. We refer to Appendix G.2.2 for prompts. For experiment in Sec 4.3, we use sample 128 prompts, each with 16 trajectories. During training, we drop the KL loss term to improve the numerical stability. We use a learning rate of 1e-6, bfloat16 precision, FSDP backend for distributed training. We split the rollout data into 4 mini-batches for update. For the sake of numerical stability, we do pre-clip before computing the exponential of log-prob for a upperbound of 3.0." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.339, + 0.825, + 0.381 + ], + "angle": 0, + "content": "For the main result in Fig 5, we test different rollout configurations with a max_prompt_length of 4096, training for 500 steps. We use 32 NVIDIA A800 GPUs, the longest training cost about 40 hours due to large scale validation per 10 steps." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.387, + 0.825, + 0.43 + ], + "angle": 0, + "content": "For the ablation results in Fig 6, we use a tiny subset of MATH Level 3-5, training for 300 steps. Specifically, we sample 19 questions for every single type (133 instances in total). We use 8 NVIDIA A800 GPUs, the training cost about 30 hours" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.436, + 0.434, + 0.449 + ], + "angle": 0, + "content": "We test different rollout configurations:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.45, + 0.825, + 0.478 + ], + "angle": 0, + "content": "(1) max_num_turns=30, max_response_length=256, max_prompt_length=4096 (2) \nmax_num_turns=30, max_response_length=1024, max_prompt_length=3072" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.484, + 0.825, + 0.527 + ], + "angle": 0, + "content": "And for the experiment of separate parameter in multi-turn ReMA, we iteratively train each agent with the same configuration as above, but with a switch interval of 10 steps, starting from the metathinking agent." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.552, + 0.373, + 0.569 + ], + "angle": 0, + "content": "E Other Experiments" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.587, + 0.54, + 0.602 + ], + "angle": 0, + "content": "E.1 Reward functions shape cross-agent behaviors" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.615, + 0.825, + 0.727 + ], + "angle": 0, + "content": "We also investigate the impact of different reward function designs on ReMA's behavior. In addition to the base reward setting described in Appendix C.2, we evaluate a consistency-based reward function using Qwen2.5-7B-Instruct. This reward function is designed to encourage the high-level agent to generate more detailed guidance. Indeed, we observe that the high-level agent trained in this manner produces more detailed solution steps compared to the one trained with the basic correctness format reward. However, we also find that this approach often leads to jailbreak behavior, where the high-level agent tends to include the final answer within its output, compromising the intended hierarchical reasoning process." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.732, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Furthermore, we discover an interesting evolution of a pattern during training: although our experimental setup is designed for the high-level agent to provide a solution plan while the lower-level agent executes it, we find that under the consistency-based reward, the lower-level agent significantly increases its attempt of verification rather than straightforward execution. We observed a certain sentence commonly appearing in the low-level agent's responses: \"Let's go through the solution step by step to ensure clarity and correctness.\" To quantify this effect, we track the frequency of it. We analyze this pattern across all mathematical test sets, sampling eight completions per question at a temperature of 0.7. Our empirical results have identified a \\(30\\mathrm{x}\\) increase of such self-verifying patterns in the model trained with the consistency-based reward compared to the one trained with the base reward. Moreover, we also observe additional variations of this pattern, e.g. \"Let's carefully re-evaluate the problem and solution to ensure accuracy and clarity.\" These phrases indicate that the low-level agent is actively exploring to verify the detailed response provided by the high-level agent." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.162 + ], + "angle": 0, + "content": "This suggests that (1) meta-thinking can not only emerge and be reinforced in the high-level agent but also in the low-level agent. During reinforcement learning (RL) training, the two agents develop a novel problem-solving pattern characterized by a role reversal. (2) Consistency-based rewards promote a more self-corrective approach at the lower level, potentially disrupting the intended separation of roles between planning and execution. For a detailed case study, refer to Appendix F.2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.178, + 0.691, + 0.193 + ], + "angle": 0, + "content": "E.2 Detailed Training Curves on Different Datasets of Multi-turn ReMA" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.203, + 0.774, + 0.218 + ], + "angle": 0, + "content": "We show the detailed training curves of the multi-turn ReMA on different datasets in Fig. 8." + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.233, + 0.825, + 0.484 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.24, + 0.489, + 0.757, + 0.505 + ], + "angle": 0, + "content": "Figure 8: Detailed Training Curves on Different Datasets of Multi-turn ReMA" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.54, + 0.37, + 0.557 + ], + "angle": 0, + "content": "F Qualitative Results" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.57, + 0.456, + 0.586 + ], + "angle": 0, + "content": "F.1 High-level policy finds better plans" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.596, + 0.825, + 0.667 + ], + "angle": 0, + "content": "Here is an example of how a high-level policy alters the solving method of an LLM, increasing the likelihood of providing correct answers. As we can see from the following example, without the high-level policy, the LLM counts all integer coordinates, including those on the boundary, and then subtracts the boundary coordinates. In contrast, the high-level policy identifies a better approach, directly instructing the LLM to count only the coordinates strictly inside the boundary." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.682, + 0.506, + 0.697 + ], + "angle": 0, + "content": "F.2 Case study for Experiments in Section E.1" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.708, + 0.61, + 0.723 + ], + "angle": 0, + "content": "Fig. 10 and Fig. 11 show an case study of experiments in Sec. E.1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.728, + 0.825, + 0.812 + ], + "angle": 0, + "content": "Although both agents are prompted with the same instructions as in our main results, the consistency reward of the high-level agent significantly alters the learning dynamics. As illustrated in Fig. 10, the high-level agent generates detailed solution attempts rather than a strategic plan. Consequently, the low-level agent evolves to verify the high-level agent's solutions. This suggests that, during reinforcement learning (RL) training, the two agents develop a novel problem-solving pattern characterized by a role reversal, where one agent generates an answer while the other verifies it." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.828, + 0.517, + 0.843 + ], + "angle": 0, + "content": "F.3 Case study for Experiments in Section 4.2.2" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.853, + 0.825, + 0.909 + ], + "angle": 0, + "content": "Fig. 12 shows the detailed outputs of the two groups of LLMs trained in JSON action. The outputs finally converge to relative easier and shorter sentences, compared to our main experiments. This may due to insufficient capacity of a small LM to produce a valid JSON-format response while simultaneously exploring diverse reasoning strategies." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.188, + 0.1, + 0.806, + 0.121 + ], + "angle": 0, + "content": "Consider the rectangular region with the following points as vertices: \\((5,4),(-5,4),(-5, - 4),(5, - 4)\\) . How many points with integer coordinates will be strictly in the interior of this rectangular region?" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.127, + 0.466, + 0.146 + ], + "angle": 0, + "content": "Output without high-level planning solves this question in a less optimal way, leading to a wrong answer:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.152, + 0.496, + 0.18 + ], + "angle": 0, + "content": "To determine how many points with integer coordinates will be strictly in the interior of the rectangular region with vertices at \\((5,4)\\), \\((-5,4)\\), \\((-5, - 4)\\), and \\((5, - 4)\\), we will follow these steps:" + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.18, + 0.357, + 0.188 + ], + "angle": 0, + "content": "1. Identify the dimensions of the rectangle:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.189, + 0.493, + 0.206 + ], + "angle": 0, + "content": "- The width of the rectangle can be calculated as the distance between the x-coordinates of the vertices. The x-coordinates are \\(-5\\) and 5." + }, + { + "type": "equation", + "bbox": [ + 0.291, + 0.213, + 0.395, + 0.224 + ], + "angle": 0, + "content": "\\[\n\\mathrm {W i d t h} = 5 - (- 5) = 1 0\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.23, + 0.493, + 0.248 + ], + "angle": 0, + "content": "- The height of the rectangle can be calculated as the distance between the y-coordinates of the vertices. The y-coordinates are 4 and -4." + }, + { + "type": "equation", + "bbox": [ + 0.293, + 0.255, + 0.393, + 0.265 + ], + "angle": 0, + "content": "\\[\n\\mathrm {H e i g h t} = 4 - (- 4) = 8\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.271, + 0.49, + 0.316 + ], + "angle": 0, + "content": "2. Calculate the total number of integer points: - The total number of integer points in the rectangle (including the boundary) can be calculated by considering the range of x and y values: - The x-values range from \\(-5\\) to 5, giving us 11 integer values \\((-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5)\\). - The y-values range from \\(-4\\) to 4, giving us 9 integer values (" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.317, + 0.482, + 0.334 + ], + "angle": 0, + "content": "\\(-4, -3, -2, -1, 0, 1, 2, 3, 4)\\). The total number of points in the rectangle (including the boundary) is:" + }, + { + "type": "equation", + "bbox": [ + 0.285, + 0.341, + 0.402, + 0.351 + ], + "angle": 0, + "content": "\\[\n\\text {T o t a l p o i n t s} = 1 1 \\times 9 = 9 9\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.357, + 0.497, + 0.403 + ], + "angle": 0, + "content": "3. Calculate the number of points on the boundary: - The boundary consists of the four sides of the rectangle. - The top and bottom sides (y = 4 and y = -4) each have 11 integer points (from -5 to 5). - The left and right sides (x = 5 and x = -5) each have 9 integer points (from -4 to 4). - Therefore, the total number of points on the boundary is:" + }, + { + "type": "equation", + "bbox": [ + 0.236, + 0.409, + 0.45, + 0.419 + ], + "angle": 0, + "content": "\\[\n\\text {B o u n d a r y p o i n t s} = 2 \\times 1 1 + 2 \\times 9 = 2 2 + 1 8 = 4 0\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.425, + 0.488, + 0.453 + ], + "angle": 0, + "content": "4. Calculate the number of strictly interior points: - To find the number of points strictly in the interior, we subtract the points on the boundary from the total number of points:" + }, + { + "type": "equation", + "bbox": [ + 0.204, + 0.459, + 0.483, + 0.47 + ], + "angle": 0, + "content": "\\[\nI n t e r i o r \\text {p o i n t s} = \\text {T o t a l p o i n t s - B o u n d a r y p o i n t s} = 9 9 - 4 0 = 5 9\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.476, + 0.49, + 0.495 + ], + "angle": 0, + "content": "Thus, the number of points with integer coordinates that will be strictly in the interior of this rectangular region is 59." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.127, + 0.806, + 0.147 + ], + "angle": 0, + "content": "Answer with high-level planning identifies the optimal sub-tasks for the LLM to solve, increasing the chance of answering the question correctly" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.15, + 0.804, + 0.179 + ], + "angle": 0, + "content": "High-level policy planning: First, identify the boundaries of the rectangular region. Then, determine the integer coordinates that lie strictly within these boundaries. Finally, count the number of such points." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.189, + 0.795, + 0.208 + ], + "angle": 0, + "content": "Low-level policy reasoning following the high-policy plan: To solve the problem, we will follow the steps outlined in the instructions." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.215, + 0.801, + 0.262 + ], + "angle": 0, + "content": "Step 1: Identify the boundaries of the rectangular region. The vertices of the rectangular region are: \\((-5,4)\\) \\((-5,4)\\) \\((-5, - 4)\\) \\((5, - 4)\\) From these vertices, we can determine the boundaries:- The left boundary is \\(x = -5\\) The right boundary is \\(x = 5\\) .- The bottom boundary is \\(y = -4\\) .- The top boundary is \\(y = 4\\)" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.269, + 0.781, + 0.297 + ], + "angle": 0, + "content": "Step 2: Determine the integer coordinates that lie strictly within these boundaries. For a point \\((x,y)\\) to be strictly inside the rectangle, it must satisfy: \\(-5 < x < 5 - 4 < y < 4\\)" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.305, + 0.808, + 0.332 + ], + "angle": 0, + "content": "Step 3: Count the integer coordinates that satisfy these conditions. Now, we will find the integer values for \\( x \\) and \\( y \\) that fall within these boundaries. For \\( x \\) : The integer values that satisfy \\( -5 < x < 5 \\) are:" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.332, + 0.802, + 0.359 + ], + "angle": 0, + "content": "\\(-4, -3, -2, -1, 0, 1, 2, 3, 4\\) This gives us 9 possible integer values. For \\( y \\) The integer values that satisfy \\(-4 < y < 4\\) are: \\(-3, -2, -1, 0, 1, 2, 3\\). This gives us 7 possible integer values." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.367, + 0.807, + 0.386 + ], + "angle": 0, + "content": "Step 4: Calculate the total number of points. Now, we multiply the number of possible integer values for \\( x \\) by the number of possible integer values for \\( y \\)." + }, + { + "type": "equation", + "bbox": [ + 0.601, + 0.391, + 0.715, + 0.402 + ], + "angle": 0, + "content": "\\[\n\\text {T o t a l p o i n t s} = 9 \\times 7 = 6 3\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.408, + 0.797, + 0.428 + ], + "angle": 0, + "content": "Finally, the number of points with integer coordinates that lie strictly in the interior of the rectangular region is \\(\\boxed{63}\\)." + }, + { + "type": "image_caption", + "bbox": [ + 0.223, + 0.516, + 0.772, + 0.532 + ], + "angle": 0, + "content": "Figure 9: Case Study comparing with and without high-level metacognition results." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.575, + 0.284, + 0.594 + ], + "angle": 0, + "content": "G Prompts" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.619, + 0.411, + 0.635 + ], + "angle": 0, + "content": "G.1 Single-turn ReMA prompts" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.653, + 0.466, + 0.669 + ], + "angle": 0, + "content": "G.1.1 Prompts for JSON data collection" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.686, + 0.485, + 0.701 + ], + "angle": 0, + "content": "Prompt for metacognition reasoning to rewrite:" + }, + { + "type": "code", + "bbox": [ + 0.177, + 0.709, + 0.818, + 0.891 + ], + "angle": 0, + "content": "System prompt: \nYou are a math expert trying to solve mathematical problems. \nBefore answering a question, your task is to rewrite the original question to make it clearer. \nProvide your rewritten content in JSON format: \n{ \"action\": \"REWRITE\", \"output\": \"{clearer question content}\" \n} \nRespond only with valid JSON. Do not write an introduction or summary. \nUser prompt: \nHere is the question: [problem_text]" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.898, + 0.51, + 0.913 + ], + "angle": 0, + "content": "Prompt for metacognition reasoning to decompose:" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.507, + 0.948 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "code", + "bbox": [ + 0.186, + 0.1, + 0.785, + 0.111 + ], + "angle": 0, + "content": "Question: The equation \\(x^{10} + (13x - 1)^{10} = 0\\) has 10 complex roots \\(r_1,\\overline{r}_1,r_2,\\overline{r}_2,r_3,\\overline{r}_3,r_4,\\overline{r}_4,r_5,\\overline{r}_5\\) , where the bar denotes complex conjugation. Find the value of \\(\\frac{1}{r_1\\overline{r}_1} +\\frac{1}{r_2\\overline{r}_2} +\\frac{1}{r_3\\overline{r}_3} +\\frac{1}{r_4\\overline{r}_4} +\\frac{1}{r_5\\overline{r}_5}\\)" + }, + { + "type": "title", + "bbox": [ + 0.295, + 0.114, + 0.381, + 0.121 + ], + "angle": 0, + "content": "High-Level Agent's Output" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.123, + 0.432, + 0.129 + ], + "angle": 0, + "content": "To solve this problem, let's break it down into steps that a low-level math-solving agent can follow:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.135, + 0.278, + 0.141 + ], + "angle": 0, + "content": "1. Understand the given equation:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.142, + 0.291, + 0.147 + ], + "angle": 0, + "content": "The equation is \\(x^{10} + (13x - 1)^{10} = 0\\)" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.148, + 0.25, + 0.153 + ], + "angle": 0, + "content": "2. Rewrite the equation:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.153, + 0.301, + 0.158 + ], + "angle": 0, + "content": "Notice that the equation can be rewritten as:" + }, + { + "type": "equation", + "bbox": [ + 0.31, + 0.166, + 0.366, + 0.174 + ], + "angle": 0, + "content": "\\[\nx ^ {1 0} = - (1 3 x - 1) ^ {1 0}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.179, + 0.245, + 0.184 + ], + "angle": 0, + "content": "3. Consider the roots:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.185, + 0.48, + 0.195 + ], + "angle": 0, + "content": "The roots of the equation are given as \\(r_1, \\overline{r}_1, r_2, \\overline{r}_2, r_3, \\overline{r}_3, r_4, \\overline{r}_4, r_5, \\overline{r}_5\\), where the bar denotes complex conjugation. 4. Use the property of complex conjugates:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.196, + 0.468, + 0.204 + ], + "angle": 0, + "content": "For any complex number \\(z\\) , the product of \\(z\\) and its conjugate \\(\\overline{z}\\) is the square of the modulus of \\(z\\) i.e., \\(z\\overline{z} = |z|^2\\) 5. Calculate the required sum:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.204, + 0.266, + 0.209 + ], + "angle": 0, + "content": "3. Calculate the required sum \\( \\mathbf{W} = \\mathbf{w}_{1} + \\mathbf{w}_{2} + \\dots + \\mathbf{w}_{n} \\)" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.185, + 0.48, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.286, + 0.221, + 0.389, + 0.235 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{r _ {1} \\overline {{F _ {1}}}} + \\frac {1}{r _ {2} \\overline {{F _ {2}}}} + \\frac {1}{r _ {3} \\overline {{F _ {3}}}} + \\frac {1}{r _ {4} \\overline {{F _ {4}}}} + \\frac {1}{r _ {5} \\overline {{F _ {5}}}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.191, + 0.238, + 0.329, + 0.244 + ], + "angle": 0, + "content": "Using the property from step 4, this can be rewritten as:" + }, + { + "type": "equation", + "bbox": [ + 0.286, + 0.253, + 0.391, + 0.267 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{| r _ {1} | ^ {2}} + \\frac {1}{| r _ {2} | ^ {2}} + \\frac {1}{| r _ {3} | ^ {2}} + \\frac {1}{| r _ {4} | ^ {2}} + \\frac {1}{| r _ {5} | ^ {2}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.271, + 0.257, + 0.276 + ], + "angle": 0, + "content": "6. Simplify the expression:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.277, + 0.483, + 0.286 + ], + "angle": 0, + "content": "Since \\( r_1, r_2, r_3, r_4, r_5 \\) are roots of the equation \\( x^{10} = -(13x - 1)^{10} \\), the magnitudes \\( |r_i| \\) are the same for each pair \\( r_i \\) and \\( \\overline{r}_i \\). Let \\( |r_i| = |r_i| \\) for all \\( i, j \\)." + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.287, + 0.272, + 0.292 + ], + "angle": 0, + "content": "7. Find the common magnitude:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.293, + 0.486, + 0.305 + ], + "angle": 0, + "content": "To find \\( |r_{1}| \\), consider the magnitudes of the terms on both sides of the equation \\( x^{10} = -(13x - 1)^{10} \\). The magnitude of the left side is \\( |x|^{10} \\) and the magnitude of the right side is \\( |13x - 1|^{10} \\). Therefore:" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.271, + 0.486, + 0.305 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.312, + 0.314, + 0.364, + 0.321 + ], + "angle": 0, + "content": "\\[\n\\left| x \\right| ^ {1 0} = \\left| 1 3 x - 1 \\right| ^ {1 0}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.191, + 0.326, + 0.296, + 0.332 + ], + "angle": 0, + "content": "Taking the 10th root of both sides, we get:" + }, + { + "type": "equation", + "bbox": [ + 0.318, + 0.341, + 0.36, + 0.348 + ], + "angle": 0, + "content": "\\[\n| x | = | 1 3 x - 1 |.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.352, + 0.228, + 0.357 + ], + "angle": 0, + "content": "8. Solve for \\( |x| \\):" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.358, + 0.238, + 0.363 + ], + "angle": 0, + "content": "Let \\( |x| = a \\). Then:" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.352, + 0.238, + 0.363 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.365, + 0.276, + 0.37 + ], + "angle": 0, + "content": "\\( \\therefore m - 1 \\neq 0 \\) ;" + }, + { + "type": "equation", + "bbox": [ + 0.318, + 0.373, + 0.357, + 0.38 + ], + "angle": 0, + "content": "\\[\na = \\left| 1 3 a - 1 \\right|.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.385, + 0.252, + 0.39 + ], + "angle": 0, + "content": "This gives us two cases:" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.391, + 0.214, + 0.396 + ], + "angle": 0, + "content": "- \\(a = 13\\)" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.397, + 0.237, + 0.402 + ], + "angle": 0, + "content": "- \\(a = -(13a - 1)\\)" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.403, + 0.246, + 0.407 + ], + "angle": 0, + "content": "Solving the first case:" + }, + { + "type": "list", + "bbox": [ + 0.188, + 0.385, + 0.252, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.282, + 0.416, + 0.394, + 0.429 + ], + "angle": 0, + "content": "\\[\na = 1 3 a - 1 \\Longrightarrow 1 2 a = 1 \\Longrightarrow a = \\frac {1}{1 2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.432, + 0.253, + 0.438 + ], + "angle": 0, + "content": "Solving the second case:" + }, + { + "type": "equation", + "bbox": [ + 0.28, + 0.446, + 0.396, + 0.459 + ], + "angle": 0, + "content": "\\[\na = - 1 3 a + 1 \\Longrightarrow 1 4 a = 1 \\Longrightarrow a = \\frac {1}{1 4}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.462, + 0.486, + 0.472 + ], + "angle": 0, + "content": "Since \\(a\\) must be the same for all roots, we need to check which value is consistent. Given the nature of the problem, we can assume \\(a = \\frac{1}{2}\\) as the correct magnitude." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.473, + 0.256, + 0.478 + ], + "angle": 0, + "content": "9. Calculate the final sum:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.479, + 0.276, + 0.484 + ], + "angle": 0, + "content": "Since \\(|r_i| = \\frac{1}{2}\\) for all \\(i\\), we have" + }, + { + "type": "list", + "bbox": [ + 0.187, + 0.462, + 0.486, + 0.484 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.307, + 0.494, + 0.369, + 0.51 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{| r _ {1} | ^ {2}} = \\frac {1}{(\\frac {1}{4}) ^ {2}} = 1 4 4.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.514, + 0.246, + 0.52 + ], + "angle": 0, + "content": "Therefore, the sum is:" + }, + { + "type": "equation", + "bbox": [ + 0.275, + 0.53, + 0.402, + 0.536 + ], + "angle": 0, + "content": "\\[\n1 4 4 + 1 4 4 + 1 4 4 + 1 4 4 + 1 4 4 = 5 \\times 1 4 4 = 7 2 0.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.541, + 0.233, + 0.546 + ], + "angle": 0, + "content": "10. Final answer:" + }, + { + "type": "equation", + "bbox": [ + 0.245, + 0.546, + 0.328, + 0.552 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{\\frac {1}{n} + \\frac {1}{n}} + \\frac {1}{\\frac {1}{n} + \\frac {1}{n}} + \\frac {1}{\\frac {1}{n} + \\frac {1}{n}} \\text {i s} 7 2 0.\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.612, + 0.114, + 0.697, + 0.121 + ], + "angle": 0, + "content": "Low-Level Agent's Output" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.123, + 0.695, + 0.129 + ], + "angle": 0, + "content": "Let's go through the solution step by step to ensure clarity and correctness." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.134, + 0.588, + 0.139 + ], + "angle": 0, + "content": "1. Understand the given equation:" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.14, + 0.601, + 0.145 + ], + "angle": 0, + "content": "The equation is \\(x^{10} + (13x - 1)^{10} = 0\\)" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.145, + 0.561, + 0.151 + ], + "angle": 0, + "content": "2. Rewrite the equation:" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.152, + 0.611, + 0.157 + ], + "angle": 0, + "content": "Notice that the equation can be rewritten as:" + }, + { + "type": "list", + "bbox": [ + 0.497, + 0.134, + 0.611, + 0.157 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.626, + 0.166, + 0.682, + 0.174 + ], + "angle": 0, + "content": "\\[\nx ^ {1 0} = - (1 3 x - 1) ^ {1 0}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.179, + 0.555, + 0.184 + ], + "angle": 0, + "content": "3. Consider the roots:" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.185, + 0.789, + 0.195 + ], + "angle": 0, + "content": "The roots of the equation are given as \\(r_1, \\overline{r}_1, r_2, \\overline{r}_2, r_3, \\overline{r}_3, r_4, \\overline{r}_4, r_5, \\overline{r}_5\\), where the bar denotes complex conjugation. 4. Use the property of complex conjugates:" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.196, + 0.779, + 0.205 + ], + "angle": 0, + "content": "For any complex number \\(z\\), the product of \\(z\\) and its conjugate \\(\\overline{z}\\) is the square of the modulus of \\(z\\), i.e., \\(z\\overline{z} = |z|^2\\). 5. Calculate the required sum:" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.206, + 0.575, + 0.212 + ], + "angle": 0, + "content": "We need to find the value of:" + }, + { + "type": "list", + "bbox": [ + 0.497, + 0.185, + 0.789, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.603, + 0.221, + 0.706, + 0.235 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{r _ {1} F _ {1}} + \\frac {1}{r _ {2} F _ {2}} + \\frac {1}{r _ {3} F _ {3}} + \\frac {1}{r _ {4} F _ {4}} + \\frac {1}{r _ {5} F _ {5}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.238, + 0.639, + 0.244 + ], + "angle": 0, + "content": "Using the property from step 4, this can be rewritten as:" + }, + { + "type": "equation", + "bbox": [ + 0.602, + 0.252, + 0.706, + 0.266 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{| r _ {1} | ^ {2}} + \\frac {1}{| r _ {2} | ^ {2}} + \\frac {1}{| r _ {3} | ^ {2}} + \\frac {1}{| r _ {4} | ^ {2}} + \\frac {1}{| r _ {5} | ^ {2}} + \\frac {1}{| r _ {6} | ^ {2}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.271, + 0.567, + 0.276 + ], + "angle": 0, + "content": "6. Simplify the expression:" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.276, + 0.81, + 0.286 + ], + "angle": 0, + "content": "Since \\( r_1, r_2, r_3, r_4, r_5 \\) are roots of the equation \\( x^{10} = -(13x - 1)^{10} \\), the magnitudes \\( |r_i| \\) are the same for each pair \\( r_1 \\) and \\( \\overline{r}_i \\). Let \\( |r_i| = |r_i| \\) for all \\( i, j \\)." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.287, + 0.583, + 0.292 + ], + "angle": 0, + "content": "7. Find the common magnitude:" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.293, + 0.806, + 0.305 + ], + "angle": 0, + "content": "To find \\( |\\boldsymbol{r}_i| \\), consider the determinants of the terms on both sides of the equation \\( x^{10} = -(13x - 1)^{10} \\). The magnitude of the left side is \\( |z|^10 \\) and the magnitude of the right side is \\( |13z - 1|^{10} \\). Therefore:" + }, + { + "type": "list", + "bbox": [ + 0.497, + 0.271, + 0.81, + 0.305 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.628, + 0.313, + 0.681, + 0.321 + ], + "angle": 0, + "content": "\\[\n| x | ^ {1 0} = | 1 3 x - 1 | ^ {1 0}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.325, + 0.607, + 0.331 + ], + "angle": 0, + "content": "Taking the 10th root of both sides, we get:" + }, + { + "type": "equation", + "bbox": [ + 0.633, + 0.34, + 0.676, + 0.347 + ], + "angle": 0, + "content": "\\[\n\\left| x \\right| = \\left| 1 3 x - 1 \\right|.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.351, + 0.539, + 0.357 + ], + "angle": 0, + "content": "8. Solve for \\( |x| \\):" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.358, + 0.548, + 0.363 + ], + "angle": 0, + "content": "Let \\( |x| = a \\). Then:" + }, + { + "type": "equation", + "bbox": [ + 0.634, + 0.373, + 0.674, + 0.38 + ], + "angle": 0, + "content": "\\[\na = | 1 3 x - 1 |.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.385, + 0.562, + 0.39 + ], + "angle": 0, + "content": "This gives us two cases:" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.391, + 0.548, + 0.397 + ], + "angle": 0, + "content": "-α=13a-1" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.397, + 0.548, + 0.402 + ], + "angle": 0, + "content": "- \\(a = -(13a - 1)\\)" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.403, + 0.556, + 0.407 + ], + "angle": 0, + "content": "Solving the first case:" + }, + { + "type": "list", + "bbox": [ + 0.497, + 0.385, + 0.562, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.598, + 0.416, + 0.71, + 0.429 + ], + "angle": 0, + "content": "\\[\na = 1 3 a - 1 \\Longrightarrow 1 2 a = 1 \\Longrightarrow a = \\frac {1}{1 2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.432, + 0.564, + 0.437 + ], + "angle": 0, + "content": "Solving the second case:" + }, + { + "type": "equation", + "bbox": [ + 0.597, + 0.446, + 0.712, + 0.459 + ], + "angle": 0, + "content": "\\[\na = - 1 3 a + 1 \\Rightarrow 1 4 a = 1 \\Rightarrow a = \\frac {1}{1 4}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.462, + 0.806, + 0.472 + ], + "angle": 0, + "content": "Since \\( a \\) must be the same for all roots, we need to check which value is consistent. Given the nature of the problem, we can assume \\( a = \\frac{1}{2} \\) as the correct magnitude." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.473, + 0.567, + 0.479 + ], + "angle": 0, + "content": "9. Calculate the final sum:" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.479, + 0.587, + 0.485 + ], + "angle": 0, + "content": "Since \\(|r_i| = \\frac{1}{12}\\) for all i, we have:" + }, + { + "type": "list", + "bbox": [ + 0.497, + 0.462, + 0.806, + 0.485 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.623, + 0.494, + 0.685, + 0.511 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{| r _ {i} | ^ {2}} = \\frac {1}{(\\mathrm {古}) ^ {2}} = 1 4 4.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.514, + 0.557, + 0.519 + ], + "angle": 0, + "content": "Therefore, the sum is:" + }, + { + "type": "equation", + "bbox": [ + 0.591, + 0.529, + 0.718, + 0.536 + ], + "angle": 0, + "content": "\\[\n1 4 4 + 1 4 4 + 1 4 4 + 1 4 4 + 1 4 4 = 5 \\times 1 4 4 = 7 2 0.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.541, + 0.544, + 0.546 + ], + "angle": 0, + "content": "10. Final answer:" + }, + { + "type": "equation", + "bbox": [ + 0.5, + 0.546, + 0.642, + 0.553 + ], + "angle": 0, + "content": "\\[\n\\text {The v a l u e o f} \\frac {1}{r _ {1} r _ {1}} + \\frac {1}{r _ {2} r _ {2}} + \\frac {1}{r _ {3} r _ {3}} + \\frac {1}{r _ {4} r _ {4}} + \\frac {1}{r _ {5} r _ {5}} \\text {i s} \\boxed {7 2 0}.\n\\]" + }, + { + "type": "image_caption", + "bbox": [ + 0.285, + 0.57, + 0.713, + 0.585 + ], + "angle": 0, + "content": "Figure 10: Case Study for consistency reward of high-level agent" + }, + { + "type": "code", + "bbox": [ + 0.186, + 0.606, + 0.784, + 0.617 + ], + "angle": 0, + "content": "Question: The equation \\(x^{10} + (13x - 1)^{10} = 0\\) has 10 complex roots \\(r_1, \\overline{r}_1, r_2, \\overline{r}_2, r_3, \\overline{r}_3, r_4, \\overline{r}_4, r_5, \\overline{r}_5\\), where the bar denotes complex conjugation. Find the value of \\(\\frac{1}{r_1r_4} + \\frac{1}{r_2r_2} + \\frac{1}{r_3r_3} + \\frac{1}{r_4r_4} + \\frac{1}{r_5r_5}\\)." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.619, + 0.28, + 0.624 + ], + "angle": 0, + "content": "High-Level Agent's Output" + }, + { + "type": "text", + "bbox": [ + 0.355, + 0.619, + 0.464, + 0.625 + ], + "angle": 0, + "content": "Low-Level Agent's Output (part I)" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.626, + 0.28, + 0.639 + ], + "angle": 0, + "content": "To solve this problem, we need to follow these steps:" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.643, + 0.28, + 0.655 + ], + "angle": 0, + "content": "1. Understand the given equation: The equation is \\( x^{10} - (13x - 1)^{10} = 0 \\)." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.659, + 0.272, + 0.665 + ], + "angle": 0, + "content": "2. Identify the roots: The roots of the" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.665, + 0.233, + 0.67 + ], + "angle": 0, + "content": "equation are given as" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.671, + 0.289, + 0.684 + ], + "angle": 0, + "content": "\\(\\overline{r_1},\\overline{r_2},\\overline{r_3},\\overline{r_4},\\overline{r_5},\\overline{r_6},\\overline{r_7},\\overline{r_8}\\) where the bar denotes complex conjugation." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.689, + 0.289, + 0.695 + ], + "angle": 0, + "content": "3. Use properties of complex conjugates:" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.696, + 0.29, + 0.712 + ], + "angle": 0, + "content": "For any complex number \\(z\\), the product \\(z \\cdot z\\) is a complex function of the modulus of \\(z\\), i.e., \\(|z \\cdot \\overline{z}| = |z|^2\\)." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.718, + 0.29, + 0.724 + ], + "angle": 0, + "content": "4. Simplify the expression: We need to find" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.725, + 0.265, + 0.73 + ], + "angle": 0, + "content": "the value of 1" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.731, + 0.265, + 0.736 + ], + "angle": 0, + "content": "r1+ r2r3+ r4" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.738, + 0.265, + 0.743 + ], + "angle": 0, + "content": "#" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.745, + 0.284, + 0.75 + ], + "angle": 0, + "content": "Apply an property of complex conjugates. Since \\( \\pi_1 \\cdot \\overline{\\pi}_1 = |\\pi_1|^2 \\), we have" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.751, + 0.207, + 0.756 + ], + "angle": 0, + "content": "1+" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.758, + 0.207, + 0.763 + ], + "angle": 0, + "content": "A" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.765, + 0.28, + 0.77 + ], + "angle": 0, + "content": "6.Sum the reciprocals: Sum the values" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.771, + 0.266, + 0.777 + ], + "angle": 0, + "content": "1+1 1 1" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.783, + 0.291, + 0.807 + ], + "angle": 0, + "content": "7. Use Vieta's formulas: To find the sum of the reciprocals of the moduli squared, we can use the fact that the sum of the roots of the polynomial and their properties." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.812, + 0.282, + 0.817 + ], + "angle": 0, + "content": "8. Finalize the solution: Combine all the" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.818, + 0.247, + 0.823 + ], + "angle": 0, + "content": "steps to find the final value" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.828, + 0.287, + 0.833 + ], + "angle": 0, + "content": "Now, let's guide the low-level math solving" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.834, + 0.258, + 0.84 + ], + "angle": 0, + "content": "agent to implement these steps." + }, + { + "type": "list", + "bbox": [ + 0.178, + 0.626, + 0.291, + 0.84 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.298, + 0.625, + 0.517, + 0.63 + ], + "angle": 0, + "content": "To solve the given problem, we start by analyzing the equation \\( x^{10} + (13x - 1)^{10} = 0 \\)." + }, + { + "type": "text", + "bbox": [ + 0.298, + 0.631, + 0.359, + 0.636 + ], + "angle": 0, + "content": "This can be rewritten as:" + }, + { + "type": "equation", + "bbox": [ + 0.383, + 0.641, + 0.438, + 0.646 + ], + "angle": 0, + "content": "\\[\nx ^ {1 0} = - (1 3 x - 1) ^ {1 0}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.298, + 0.651, + 0.404, + 0.657 + ], + "angle": 0, + "content": "Taking the 10th root on both sides, we get:" + }, + { + "type": "equation", + "bbox": [ + 0.361, + 0.662, + 0.433, + 0.667 + ], + "angle": 0, + "content": "\\[\nx = \\omega (1 3 x - 1),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.298, + 0.672, + 0.507, + 0.684 + ], + "angle": 0, + "content": "where \\(\\omega\\) is a 10th root of \\(-1\\). The 10th roots of \\(-1\\) are given by \\(\\omega = e^{\\pi (2k + 1) / 10}\\) for \\(k = 0, 1, 2, \\ldots, 9\\)." + }, + { + "type": "text", + "bbox": [ + 0.298, + 0.689, + 0.381, + 0.695 + ], + "angle": 0, + "content": "Rewriting the equation, we have:" + }, + { + "type": "text", + "bbox": [ + 0.3, + 0.699, + 0.519, + 0.711 + ], + "angle": 0, + "content": "\\(x = \\omega (13x - 1)\\Rightarrow x - 13\\omega x = -\\omega \\Rightarrow x(1 - 13\\omega) = -\\omega \\Rightarrow x = \\frac{-\\omega}{1 - 13\\omega}\\)" + }, + { + "type": "text", + "bbox": [ + 0.298, + 0.713, + 0.384, + 0.718 + ], + "angle": 0, + "content": "Thus, the roots of the equation are:" + }, + { + "type": "equation", + "bbox": [ + 0.3, + 0.725, + 0.432, + 0.736 + ], + "angle": 0, + "content": "\\[\nr _ {k} = \\frac {- \\omega_ {k}}{1 - 1 3 \\omega_ {k}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.298, + 0.739, + 0.394, + 0.744 + ], + "angle": 0, + "content": "where \\(\\omega_{k} = e^{i\\pi (2k + 1) / 10}\\) for \\(k = 0,1,2\\)" + }, + { + "type": "text", + "bbox": [ + 0.298, + 0.75, + 0.516, + 0.762 + ], + "angle": 0, + "content": "The roots come in conjugate pairs, so the roots are \\( r_1, \\overline{r_1}, r_2, \\overline{r_2}, r_3, \\overline{r_3}, r_4, \\overline{r_4}, r_5, \\overline{r_5} \\). We need to find the value of:" + }, + { + "type": "equation", + "bbox": [ + 0.359, + 0.765, + 0.462, + 0.779 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{r _ {1} \\overline {{r _ {1}}}} + \\frac {1}{r _ {2} \\overline {{r _ {2}}}} + \\frac {1}{r _ {3} \\overline {{r _ {3}}}} + \\frac {1}{r _ {4} \\overline {{r _ {4}}}} + \\frac {1}{r _ {5} \\overline {{r _ {5}}}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.298, + 0.781, + 0.371, + 0.787 + ], + "angle": 0, + "content": "Since \\(r_k\\overline{r_k} = |r_k|^2\\) we have:" + }, + { + "type": "equation", + "bbox": [ + 0.392, + 0.791, + 0.43, + 0.805 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{r _ {k} r _ {k}} = \\frac {1}{| r _ {k} | ^ {2}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.298, + 0.809, + 0.378, + 0.814 + ], + "angle": 0, + "content": "We need to find \\( |r_k|^2 \\). Note that:" + }, + { + "type": "equation", + "bbox": [ + 0.388, + 0.819, + 0.431, + 0.83 + ], + "angle": 0, + "content": "\\[\nr _ {k} = \\frac {- \\omega_ {k}}{1 - 1 3 \\omega_ {k}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.617, + 0.62, + 0.73, + 0.626 + ], + "angle": 0, + "content": "Low-Level Agent's Output (part II)" + }, + { + "type": "text", + "bbox": [ + 0.528, + 0.63, + 0.587, + 0.636 + ], + "angle": 0, + "content": "The magnitude of \\(r_k\\) is:" + }, + { + "type": "equation", + "bbox": [ + 0.609, + 0.639, + 0.735, + 0.653 + ], + "angle": 0, + "content": "\\[\n| r _ {k} | = \\left| \\frac {- \\omega_ {k}}{1 - 1 3 \\omega_ {k}} \\right| = \\left| \\frac {| \\omega_ {k} |}{1 - 1 3 \\omega_ {k}} \\right| = \\frac {1}{1 - 1 3 \\omega_ {k}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.528, + 0.657, + 0.543, + 0.662 + ], + "angle": 0, + "content": "Thus," + }, + { + "type": "equation", + "bbox": [ + 0.645, + 0.666, + 0.7, + 0.679 + ], + "angle": 0, + "content": "\\[\n| r _ {k} | ^ {2} = \\frac {1}{| 1 - 1 3 \\omega_ {k} | ^ {2}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.528, + 0.683, + 0.555, + 0.688 + ], + "angle": 0, + "content": "Therefore," + }, + { + "type": "equation", + "bbox": [ + 0.645, + 0.693, + 0.701, + 0.705 + ], + "angle": 0, + "content": "\\[\n\\frac {1}{r _ {2} r _ {1}} = | 1 - 1 3 \\omega_ {k} | ^ {2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.528, + 0.71, + 0.641, + 0.715 + ], + "angle": 0, + "content": "We need to sum this over the 5 pairs of roots:" + }, + { + "type": "equation", + "bbox": [ + 0.651, + 0.719, + 0.694, + 0.733 + ], + "angle": 0, + "content": "\\[\n\\sum_ {k = 0} ^ {4} | 1 - 1 3 \\omega_ {k} | ^ {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.528, + 0.739, + 0.611, + 0.744 + ], + "angle": 0, + "content": "Since \\(\\omega_{k} = e^{i\\pi (2k + 1) / 10}\\), we have" + }, + { + "type": "text", + "bbox": [ + 0.531, + 0.748, + 0.813, + 0.754 + ], + "angle": 0, + "content": "\\(|1 - 13\\omega_{k}|^{2} = (1 - 13\\omega_{k})(1 - 13\\overline{\\omega_{k}}) = (1 - 13\\omega_{k})(1 - 13e^{-i\\pi (2k + 1) / 10}) = 1 - 13\\omega_{k} - 13e^{-i\\pi (2k + 1) / 10} + 169.\\)" + }, + { + "type": "text", + "bbox": [ + 0.528, + 0.758, + 0.59, + 0.764 + ], + "angle": 0, + "content": "Since \\(\\omega_{k}\\overline{\\omega_{k}} = 1\\) we get:" + }, + { + "type": "list", + "bbox": [ + 0.528, + 0.748, + 0.813, + 0.764 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.528, + 0.769, + 0.817, + 0.782 + ], + "angle": 0, + "content": "\\[\n\\left| 1 - 1 3 \\omega_ {k} \\right| ^ {2} = 1 6 9 - 1 3 (\\omega_ {k} + e ^ {- i \\pi (2 k + 1) \\cdot 1 0}) + 1 = 1 7 0 - 1 3 \\cdot 2 \\cos \\left(\\frac {\\pi (2 k + 1)}{1 0}\\right) = 1 7 0 - 2 6 \\cos \\left(\\frac {\\pi (2 k + 1)}{1 0}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.528, + 0.786, + 0.626, + 0.792 + ], + "angle": 0, + "content": "Summing over \\(k = 0,1,2,3,4\\) we get:" + }, + { + "type": "equation", + "bbox": [ + 0.549, + 0.796, + 0.797, + 0.811 + ], + "angle": 0, + "content": "\\[\n\\sum_ {k = 0} ^ {n - 1} \\left(1 7 0 - 2 6 \\cos \\left(\\frac {\\pi (2 k + 1)}{1 0}\\right)\\right) = 5 \\cdot 1 7 0 - 2 6 \\sum_ {k = 0} ^ {n - 1} \\cos \\left(\\frac {\\pi (2 k + 1)}{1 0}\\right) = 8 5 0 - 2 6 \\cdot 0 = 8 5 0.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.528, + 0.816, + 0.574, + 0.821 + ], + "angle": 0, + "content": "Thus, the value is:" + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.825, + 0.68, + 0.832 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.269, + 0.852, + 0.729, + 0.867 + ], + "angle": 0, + "content": "Figure 11: Case Study for base correctness reward of high-level agent" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "code", + "bbox": [ + 0.242, + 0.098, + 0.756, + 0.111 + ], + "angle": 0, + "content": "Question: The product of \\(3t^{2} + 5t + a\\) and \\(4t^{2} + bt - 2\\) is \\(12t^{4} + 26t^{3} - 8t^{2} - 16t + 6\\). What is \\(a + b\\)?" + }, + { + "type": "code_caption", + "bbox": [ + 0.177, + 0.118, + 0.359, + 0.13 + ], + "angle": 0, + "content": "Llama3.1-8B-Instruct training Output:" + }, + { + "type": "code", + "bbox": [ + 0.178, + 0.135, + 0.498, + 0.183 + ], + "angle": 0, + "content": "{\n \"action\": \"DECOMPOSE\",\n \"output\": \"Decompose the problem into finding the coefficients of the product of two polynomials and then solving for a and b.\"\n}" + }, + { + "type": "code_caption", + "bbox": [ + 0.506, + 0.118, + 0.689, + 0.13 + ], + "angle": 0, + "content": "Llama3.2-1B-Instruct training Output:" + }, + { + "type": "code", + "bbox": [ + 0.509, + 0.135, + 0.822, + 0.184 + ], + "angle": 0, + "content": "{ \"action\": \"EMPTY\", \"output\".\"\" }" + }, + { + "type": "code", + "bbox": [ + 0.178, + 0.19, + 0.482, + 0.467 + ], + "angle": 0, + "content": "Step 1: To find the product of the two polynomials \\(3t^{2} + 5t + a\\) and \\(4t^{2} + bt - 2\\), we need to multiply each term of the first polynomial by each term of the second polynomial and then combine like terms. \nStep 2: The product of the two polynomials can be written as \\((3t^{2} + 5t + a)(4t^{2} + bt - 2)\\). \nStep 3: Using the distributive property, we multiply each term in the first polynomial by each term in the second polynomial: \\((3t^{2})(4t^{2}) + (3t^{2})(bt) + (3t^{2})(-2) + (5t)(4t^{2}) + (5t)(bt) + (5t)(-2) + (a)(4t^{2}) + (a)(bt) + (a)(-2)\\). \nStep 4: Simplifying the products, we get \\(12t^{4} + 3b^{3} - 6t^{2} + 20t^{3} + 5bt^{2} - 10t + 4at^{2} + abt - 2a\\). \nStep 5: Combining like terms, we have \\(12t^{4} + (3b + 20)t^{3} + (5b - 6 + 4a)t^{2} + (ab - 10)t - 2a\\). \nStep 6: We are given that the product of the two polynomials is \\(12t^{4} + 26t^{3} - 8t^{2} - 16t + 6\\). \nWe can equate the coefficients of the corresponding powers of \\(t\\) in the two expressions. \nStep 7: Equating the coefficients of \\(t^3\\), we get \\(3b + 20 = 26\\). \nStep 8: Equating the coefficients of \\(t^2\\), we get \\(5b - 6 + 4a = -8\\). \nStep 9: Equating the constant terms, we get \\(-2a = 6\\). \nStep 10: Solving the equation \\(-2a = 6\\), we find that \\(a = -3\\). \nStep 11: Substituting \\(a = -3\\) into the equation \\(5b - 6 + 4a = -8\\), we get \\(5b - 6 + 4(-3) = -8\\), which simplifies to \\(5b - 18 = -8\\). \nStep 12: Solving for \\(b\\), we find that \\(5b = 10\\), so \\(b = 2\\). \nStep 13: Now that we have found the values of \\(a\\) and \\(b\\), we can calculate their sum: \\(a + b = -3 + 2 = -1\\). \nThe final answer is: \\(\\boxed{-1}\\)" + }, + { + "type": "code", + "bbox": [ + 0.507, + 0.195, + 0.811, + 0.224 + ], + "angle": 0, + "content": "Step 1: The problem asks us to find the values of \\( a \\) and \\( b \\) in the quadratic expressions \\( 3t^2 + 5t + a \\) and \\( 4t^2 + bt - 2 \\), respectively, given that their product is \\( 12t^4 + 26t^3 - 8t^2 - 16t + 6 \\)." + }, + { + "type": "code", + "bbox": [ + 0.508, + 0.233, + 0.819, + 0.262 + ], + "angle": 0, + "content": "Step 2: To solve this, we need to multiply the two quadratic expressions and equate the resulting expression to the given product \\(12t^4 + 26t^3 - 8t^2 - 16t + 6\\)." + }, + { + "type": "code", + "bbox": [ + 0.508, + 0.271, + 0.763, + 0.291 + ], + "angle": 0, + "content": "Step 3: We start by multiplying the two quadratic expressions: \\((3t^{2} + 5t + a)(4t^{2} + bt - 2)\\)." + }, + { + "type": "code", + "bbox": [ + 0.508, + 0.3, + 0.78, + 0.319 + ], + "angle": 0, + "content": "Step 4: This gives us \\(12t^4 + (3b + 5)t^3 + (4a + 5b - 6)t^2 + (-2b + 10t + 2a)t - 2a.\\)" + }, + { + "type": "code", + "bbox": [ + 0.507, + 0.327, + 0.816, + 0.347 + ], + "angle": 0, + "content": "Step 5: We know that the coefficients of the resulting expression must be equal to the coefficients of the given product \\(12t^4 + 26t^3 - 8t^2 - 16t + 6\\)." + }, + { + "type": "code", + "bbox": [ + 0.507, + 0.356, + 0.81, + 0.374 + ], + "angle": 0, + "content": "Step 6: Equating the coefficients of \\( t^3 \\) gives us the equation \\( 3b + 5 = 26 \\), which simplifies to \\( 3b = 21 \\) and thus \\( b = 7 \\)." + }, + { + "type": "code", + "bbox": [ + 0.507, + 0.383, + 0.811, + 0.403 + ], + "angle": 0, + "content": "Step 7: Equating the constant terms gives us \\(-2a = -6\\), which simplifies to \\(a = 3\\)." + }, + { + "type": "code", + "bbox": [ + 0.507, + 0.412, + 0.816, + 0.422 + ], + "angle": 0, + "content": "Step 8: Now that we have the values of \\( b \\) and \\( a \\), we can find the sum \\( a + b \\)." + }, + { + "type": "code", + "bbox": [ + 0.508, + 0.43, + 0.729, + 0.44 + ], + "angle": 0, + "content": "Step 9: Adding \\( a = 3 \\) and \\( b = 7 \\) gives us \\( a + b = 10 \\)." + }, + { + "type": "code", + "bbox": [ + 0.508, + 0.449, + 0.61, + 0.46 + ], + "angle": 0, + "content": "The final answer is: \\(\\boxed{10}\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.266, + 0.477, + 0.731, + 0.492 + ], + "angle": 0, + "content": "Figure 12: Case Study for interpretability experiments in Section 4.2.2" + }, + { + "type": "code_caption", + "bbox": [ + 0.178, + 0.52, + 0.286, + 0.534 + ], + "angle": 0, + "content": "System prompt:" + }, + { + "type": "code", + "bbox": [ + 0.178, + 0.535, + 0.779, + 0.714 + ], + "angle": 0, + "content": "You are a math expert trying to solve mathematical problems. Before answering a question, your task is to decompose the original question to make it clearer. \nProvide your rewritten content in JSON format: \n{\"action\": \"DECOMPOSE\", \"output\": \"{decomposed question content}}\" \n}} \nRespond only with valid JSON. Do not write an introduction or summary. \nUser prompt: \nHere is the question: [problem.text]" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.722, + 0.749, + 0.736 + ], + "angle": 0, + "content": "Prompt for generating final answers using on the question and metacognition reasoning:" + }, + { + "type": "code_caption", + "bbox": [ + 0.178, + 0.745, + 0.285, + 0.759 + ], + "angle": 0, + "content": "System prompt:" + }, + { + "type": "code", + "bbox": [ + 0.178, + 0.761, + 0.817, + 0.816 + ], + "angle": 0, + "content": "You are a math expert tasked with solving problems step by step. Follow the provided instructions precisely, showing all reasoning and intermediate steps. Present the final answer within \\boxed{\\{\\}}\\}." + }, + { + "type": "code_caption", + "bbox": [ + 0.179, + 0.816, + 0.268, + 0.828 + ], + "angle": 0, + "content": "User prompt:" + }, + { + "type": "code", + "bbox": [ + 0.178, + 0.829, + 0.551, + 0.897 + ], + "angle": 0, + "content": "Here is the question and instructions: \nQuestion \n[problem_text] \nProvided Instruction \n[instruction_text]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.092, + 0.427, + 0.108 + ], + "angle": 0, + "content": "G.1.2 Prompts for Math problems" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.115, + 0.273, + 0.131 + ], + "angle": 0, + "content": "VRP prompt:" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.132, + 0.806, + 0.219 + ], + "angle": 0, + "content": "System prompt: \nYou are a math expert tasked with solving problems step by step. Present the final answer within \\boxed{}?. \nUser prompt: \nHere is the question: \n{Question}" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.225, + 0.277, + 0.24 + ], + "angle": 0, + "content": "MRP prompt:" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.242, + 0.82, + 0.383 + ], + "angle": 0, + "content": "System prompt: \nYou are a math expert tasked with solving problems. When solving a problem, your first task is to provide a high-level solution plan as an instruction. Then you need to follow the provided instructions precisely, showing all reasoning and intermediate steps. Finally, you must present the final answer within boxed}. \nUser prompt: \nHere is the question: {Question}" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.39, + 0.304, + 0.405 + ], + "angle": 0, + "content": "MAMRP prompt:" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.405, + 0.286, + 0.419 + ], + "angle": 0, + "content": "high-level agent:" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.42, + 0.83, + 0.616 + ], + "angle": 0, + "content": "System prompt: \nYou are a math expert specialized in solving mathematical problems, you need to teach a weaker agent with minimal capability in math how to solve a problem step-by-step. \nYour task is to provide a high-level solution plan for the given problem, in order to guide a low-level math solving agent to solve the problem. \nYou can not directly answer the question. You'll be punished if you include any answer in your response. \nYou need to first think deeply in mind and output your final instruction. \nUser prompt: \nHere is the question: \n{Question}" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.618, + 0.281, + 0.632 + ], + "angle": 0, + "content": "low-level agent:" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.633, + 0.82, + 0.828 + ], + "angle": 0, + "content": "System prompt: \nYou are a math expert tasked with solving problems step by step. Follow the provided instructions precisely, showing all reasoning and intermediate steps. Present the final answer within \\boxed{}/. User prompt: Here is the question and instructions: [Question] {Question} [End of Question] [Provided Instruction] {instruction} [End of Instruction]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "34" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.092, + 0.506, + 0.108 + ], + "angle": 0, + "content": "G.1.3 Prompts for LLM-as-a-Judge problems" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.115, + 0.486, + 0.13 + ], + "angle": 0, + "content": "We adopt the prompts from Saha et al. [2025a]." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.13, + 0.273, + 0.144 + ], + "angle": 0, + "content": "VRP prompt:" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.146, + 0.83, + 0.521 + ], + "angle": 0, + "content": "System prompt: \nPlease act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. You should choose the assistant that follows the user's instructions and answers the user's question better. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of their responses. Begin your evaluation by comparing the two responses and provide a short explanation. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision.. \nDo not allow the length of the responses to influence your evaluation.. \nDo not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[A]\" if assistant A is better, \"[B]\" if assistant B is better.. \nUser prompt: \n[User Question] {instruction} [End of User Question] [The Start of Assistant A's Answer] {response_A} [The End of Assistant A's Answer] [The Start of Assistant B's Answer] {response_B} [The End of Assistant B's Answer]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "35" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.092, + 0.276, + 0.107 + ], + "angle": 0, + "content": "MRP prompt:" + }, + { + "type": "title", + "bbox": [ + 0.178, + 0.109, + 0.286, + 0.123 + ], + "angle": 0, + "content": "System prompt:" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.124, + 0.826, + 0.634 + ], + "angle": 0, + "content": "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. You should choose the assistant that follows the user's instructions and answers the user's question better. First of your task is to build an evaluation plan that can then be executed to assess the response quality. Whenever appropriate, you can choose to also include a step-by-step reference answer as part of the evaluation plan. Enclose your evaluation plan between the tags \"[Start of Evaluation Plan]\" and \"[End of Evaluation Plan)\". After that, please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. You should choose the assistant that follows the user's instructions and answers the user's question better. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of their responses. Begin your evaluation by comparing the two responses and provide a short explanation. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[A]\" if assistant A is better, \"[B]\" if assistant B is better. User prompt: [User Question] {instruction} [End of User Question] [The Start of Assistant A's Answer] {response_A} [The End of Assistant A's Answer] [The Start of Assistant B's Answer] {response_B} [The End of Assistant B's Answer]" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.641, + 0.416, + 0.656 + ], + "angle": 0, + "content": "MAMRP prompt: high-level agent:" + }, + { + "type": "title", + "bbox": [ + 0.178, + 0.658, + 0.286, + 0.673 + ], + "angle": 0, + "content": "System prompt:" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.673, + 0.798, + 0.756 + ], + "angle": 0, + "content": "We want to evaluate the quality of the responses provided by AI assistants to the user question displayed below. For that, your task is to help us build an evaluation plan that can then be executed to assess the response quality. Whenever appropriate, you can choose to also include a step-by-step reference answer as part of the evaluation plan." + }, + { + "type": "title", + "bbox": [ + 0.179, + 0.756, + 0.269, + 0.769 + ], + "angle": 0, + "content": "User prompt:" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.77, + 0.395, + 0.81 + ], + "angle": 0, + "content": "[User Question] \n{Question} \n[End of User Question]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "36" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.093, + 0.281, + 0.106 + ], + "angle": 0, + "content": "low-level agent:" + }, + { + "type": "title", + "bbox": [ + 0.179, + 0.11, + 0.285, + 0.123 + ], + "angle": 0, + "content": "System prompt:" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.124, + 0.829, + 0.19 + ], + "angle": 0, + "content": "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. Your evaluation should be performed by following the provided evaluation plan step-by-step. Avoid copying the plan when doing the evaluation." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.192, + 0.81, + 0.22 + ], + "angle": 0, + "content": "Please also only stick to the given plan and provide explanation of how the plan is executed to compare the two responses." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.221, + 0.808, + 0.247 + ], + "angle": 0, + "content": "Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.248, + 0.792, + 0.288 + ], + "angle": 0, + "content": "Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.289, + 0.825, + 0.329 + ], + "angle": 0, + "content": "After providing your evaluation, output your final verdict by strictly following this format: \"[A]\" if assistant A is better, \"[B]\" if assistant B is better." + }, + { + "type": "title", + "bbox": [ + 0.18, + 0.331, + 0.269, + 0.344 + ], + "angle": 0, + "content": "User prompt:" + }, + { + "type": "code", + "bbox": [ + 0.179, + 0.344, + 0.522, + 0.51 + ], + "angle": 0, + "content": "[User Question] \n{instruction} \n{End of User Question] \n{The Start of Assistant A's Answer} \n{response_A} \n{The End of Assistant A's Answer} \n{The Start of Assistant B's Answer} \n{response_B} \n{The End of Assistant B's Answer} \n{The Start of Evaluation Plan} \n{evaluation計劃} \n{The End of Evaluation Plan}" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "37" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.092, + 0.409, + 0.108 + ], + "angle": 0, + "content": "G.2 Multi-turn ReMA prompts" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.117, + 0.53, + 0.131 + ], + "angle": 0, + "content": "G.2.1 SFT data collection of multi-turn MAMRP" + }, + { + "type": "title", + "bbox": [ + 0.178, + 0.142, + 0.286, + 0.156 + ], + "angle": 0, + "content": "System prompt:" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.156, + 0.814, + 0.224 + ], + "angle": 0, + "content": "You are classifying reasoning process data into two types of thinking. You will be given a question-answer pair from a reasoning dataset. Your task is to split all words into two parts. These words are crucial for analyzing reasoning patterns, so do not skip any details." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.225, + 0.798, + 0.293 + ], + "angle": 0, + "content": "- **Meta-Thinking Agent (MTA):** Responsible for high-level thought processes. This includes planning, evaluating steps, expressing uncertainty, making observations, or setting goals. Avoid detailed calculations. The content should be enclosed in `` and ``." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.293, + 0.808, + 0.349 + ], + "angle": 0, + "content": "- \\(\\star \\star\\) Reasoning Agent (RA): \\(\\star \\star\\) Responsible for detailed problem-solving steps, such as calculations, logical deductions, or breaking down a problem into subproblems. The content should be enclosed in `` and ``." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.35, + 0.378, + 0.361 + ], + "angle": 0, + "content": "\\*\\*Rules to follow: \\*\\*" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.363, + 0.807, + 0.417 + ], + "angle": 0, + "content": "1. **Do not assign large chunks of text to a single type of thinking.** The reasoning process consists of small, nonlinear thinking steps, so alternate appropriately between Meta-Thinking and Reasoning steps." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.418, + 0.756, + 0.459 + ], + "angle": 0, + "content": "2. **Keep the words from the original solution unmodified whenever possible.** Words like \"Wait,\" \"Hmm,\" \"But,\" etc., typically indicate Meta-Thinking and should be preserved." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.46, + 0.525, + 0.472 + ], + "angle": 0, + "content": "3. \\(\\star \\star\\) When finalizing the answer: \\(\\star \\star\\)" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.473, + 0.798, + 0.5 + ], + "angle": 0, + "content": "- The \\*\\*Meta-Thinking Agent (MTA) \\*\\* must explicitly confirm the answer before completion and output '[FINISH]'." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.501, + 0.768, + 0.526 + ], + "angle": 0, + "content": "- The \\(\\star \\star\\) Reasoning Agent (RA) \\(\\star \\star\\) should then provide the final answer in the correct format." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.528, + 0.731, + 0.554 + ], + "angle": 0, + "content": "4. **Do not skip any reasoning steps, even if they seem redundant, incorrect or irrelevant**" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.555, + 0.817, + 0.596 + ], + "angle": 0, + "content": "5. **Do not modify or remove any part of the original reasoning process**, even if it seems redundant or repetitive. The goal is to **preserve the exact flow of thought** as it naturally occurs." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.597, + 0.817, + 0.638 + ], + "angle": 0, + "content": "6. **Retain all expressions such as \"Wait,\" \"Hmm,\" \"But wait,\" etc., exactly as they appear. These indicate important cognitive processes and should not be skipped or altered.**" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.639, + 0.434, + 0.665 + ], + "angle": 0, + "content": "Here are examples for you: [Examples] ..." + }, + { + "type": "title", + "bbox": [ + 0.178, + 0.666, + 0.268, + 0.679 + ], + "angle": 0, + "content": "User prompt:" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.68, + 0.365, + 0.762 + ], + "angle": 0, + "content": "[Begin of Question] \n{question} \n[End of Question] \n[Begin of Solution] \n{solution} \n[End of Solution]" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "38" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.092, + 0.417, + 0.108 + ], + "angle": 0, + "content": "G.2.2 Prompt for math problems" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.115, + 0.385, + 0.13 + ], + "angle": 0, + "content": "Meta-Thinking Agent (MTA):" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.138, + 0.827, + 0.322 + ], + "angle": 0, + "content": "System prompt: \nYou are a meta-think agent that represents human high-level think process, when solving a question, you will have a discussion with human, each time you think about what to do next: e.g. \n- Exploring multiple angles and approaches \n- Breaking down the solution into clear steps \n- Continuously reflecting on intermediate results honestly and adapt your strategy as you progress \n- Backtracking when necessary \n- Requesting exploration of multiple solutions individually \n- Finally confirm the answer with the tag [FINISH] \nUser prompt: \n{question}" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.328, + 0.339, + 0.344 + ], + "angle": 0, + "content": "Reasoning Agent (RA):" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.351, + 0.808, + 0.438 + ], + "angle": 0, + "content": "System prompt: Please reason step by step follow the given instruction, when asked to finalize your answer, put your answer within \\boxed{} User prompt: {question} {instruction}" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "39" + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09501/7c196e4e-1362-4974-a470-65c83d863927_origin.pdf b/data/2025/2503_09xxx/2503.09501/7c196e4e-1362-4974-a470-65c83d863927_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f945eb03ea30030c63d54d502929627d6ee64d7e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/7c196e4e-1362-4974-a470-65c83d863927_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d97124752934b219f49f7d517e9e61a977326fdc765a9f6512c2949e106e159 +size 6136590 diff --git a/data/2025/2503_09xxx/2503.09501/full.md b/data/2025/2503_09xxx/2503.09501/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8ee45810363ff40c7c7e47061203309e6b9f69d8 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/full.md @@ -0,0 +1,1466 @@ +# ReMA: Learning to Meta-think for LLMs with Multi-agent Reinforcement Learning + +Ziyu Wan $^{1,2*}$ , Yunxiang Li $^{3*}$ , Xiaoyu Wen $^{1,2}$ , Yan Song $^{4}$ , Hanjing Wang $^{1}$ , Linyi Yang $^{4}$ , Mark Schmidt $^{3}$ , Jun Wang $^{4}$ , Weinan Zhang $^{1}$ , Shuyue Hu $^{2\ddagger}$ , Ying Wen $^{1\ddagger}$ + +1 Shanghai Jiao Tong University +$^{2}$ Shanghai Artificial Intelligence Laboratory +3 University of British Columbia +4 University College London + +# Abstract + +Recent research on Reasoning of Large Language Models (LLMs) has sought to further enhance their performance by integrating meta-thinking—enabling models to monitor, evaluate, and control their reasoning processes for more adaptive and effective problem-solving. However, current single-agent work lacks a specialized design for acquiring meta-thinking, resulting in low efficacy. To address this challenge, we introduce Reinforced Meta-thinking Agents (ReMA), a novel framework that leverages Multi-Agent Reinforcement Learning (MARL) to elicit meta-thinking behaviors, encouraging LLMs to think about thinking. ReMA decouples the reasoning process into two hierarchical agents: a high-level meta-thinking agent responsible for generating strategic oversight and plans, and a low-level reasoning agent for detailed executions. Through iterative reinforcement learning with aligned objectives, these agents explore and learn collaboration, leading to improved generalization and robustness. Empirical results from single-turn experiments demonstrate that ReMA outperforms single-agent RL baselines on complex reasoning tasks, including competitive-level mathematical benchmarks and LLM-as-a-Judge benchmarks. Additionally, we further extend ReMA to multi-turn interaction settings, leveraging turn-level ratio and parameter sharing to improve efficiency. Comprehensive ablation studies further illustrate the evolving dynamics of each distinct agent, providing valuable insights into how the meta-thinking reasoning process enhances the reasoning capabilities of LLMs. Our code can be found in https://github.com/ziyuwan/ReMA-public + +# 1 Introduction + +Large language models (LLMs) have demonstrated remarkable capabilities in knowledge understanding and complex reasoning tasks [Chowdhery et al., 2023, Achiam et al., 2023, Anil et al., 2023, Dubey et al., 2024]. The paradigm in developing LLM-based reasoning models is shifting from scaling training-time computation towards scaling test-time computation [Snell et al., 2024]. Recent advancements, such as OpenAI-o1 [OpenAI, 2024], Deepseek R1 [DeepSeek-AI et al., 2025], and Gemini 2.0 Flash Thinking [DeepMind, 2025], have demonstrated that allowing LLMs to think before generating answers can significantly enhance performance and lead to the emergence of human-like reasoning patterns. These patterns like "Wait, hold on." or "Let's break this down." + +Question: $T = 9.5$ . If $\log_2 x^T - \log_4 x = \log_8 x^k$ is an identity for all $x > 0$ , compute the value of $k$ . + +![](images/fd5b40d644792636991a20db79d8c4202f072f2e023bbd6106c186898b906286.jpg) +Figure 1: Left: A construction-based method that fine-tunes LLMs using rejection sampling, searching among combinations of pre-defined templates. Middle: R1-like method learns to mix meta-thinking and detailed reasoning steps during training. Right: Our method ReMA separates the meta-thinking and reasoning steps in a multi-agent system and updated by reinforcement learning. + +indicate that LLMs can develop a form of meta-thinking abilities that can generalize well to out-of-distribution (OOD) tasks [Xiang et al., 2025]. Meta-thinking, also known as metacognitive skills [Flavell, 1979], is an ability traditionally considered uniquely human [Didolkar et al., 2024]. + +To cultivate meta-thinking patterns from LLMs themselves, recent construction-based supervised approaches leverage supervised finetuning on structured reasoning trajectories. Specifically, these methods sampling reasoning trajectories from predefined meta-thinking templates and then use supervised finetuning (SFT) or direct preference optimization (DPO) [Rafailov et al., 2023] to teach LLMs imitate these patterns [Qi et al., 2024, Yue et al., Xi et al., 2024, Yang et al., 2025, Muenighoff et al., 2025, Ye et al., 2025c]. However, such methods lack sufficient flexibility for LLMs to explore suitable meta-thinking patterns. Thus, they often fail to generalize to out-of-distribution (OOD) problems, leading to unstable performance on unseen data [Kirk et al., Chu et al., 2025]. Besides construction-based methods, R1-like single-agent reinforcement learning (SARL) has also been adopted for meta-thinking in reasoning [DeepSeek-AI et al., 2025, Xie et al., 2025]. However, these SARL attempts typically rely on strong foundational models for easier exploration or extensive task-specific fine-tuning for stable training [Xu et al., 2025, Gandhi et al., 2025]. Furthermore, SARL needs to learn meta-thinking and reasoning within a single forward pass, seeking to capture complex reasoning structures purely in an autoregressive manner [Xie et al., 2025]. This can potentially lead to issues such as inefficient exploration as well as reduced readability and early convergence to local optima [DeepSeek-AI et al., 2025, Xiang et al., 2025]. + +To address these limitations, we introduce Reinforced Meta-thinking Agents (ReMA), a novel framework that leverages multi-agent reinforcement learning (MARL) to encourage LLMs to think about thinking. Our approach employs a multi-agent system (MAS) composed of a high-level meta-thinking agent, responsible for strategic oversight and instruction generation, and a low-level reasoning agent tasked with detailed executing processes based on provided guidance. We compare the inference process among the construction-based method, R1-like method, and ReMA in Fig. 1. Since MAS distributes the exploration space of SARL into multiple agents, it enables each agent to explore more structurally and efficiently during training. Then we apply reinforcement learning on each agent with aligned reward functions. In this way, ReMA effectively balances the trade-off between generalization capability and exploration efficiency. As a result, they can learn to play the best of their role (either to meta-think or to follow instructions), at the present of the other agent. + +To our knowledge, we are the first to formally define and optimize a multi-agent meta-thinking reasoning process (MAMRP) through multi-agent reinforcement learning. Our extensive experiments span both math reasoning and LLM-as-a-Judge tasks, where ReMA consistently achieves the highest average performance across three backbone pretrained models. We further extend ReMA to multi-turn interaction settings on math reasoning tasks, implementing turn-level ratio to optimize trajectory returns and stabilize training. Through comprehensive ablation studies, we illustrate the evolving dynamics between agents, revealing unexpected interaction patterns such as role reversals + +under different reward settings. These findings provide valuable insights into how meta-thinking processes enhance the reasoning capabilities of LLMs. + +# 2 Preliminaries + +In this section, we outline the formulation of the vanilla reasoning process (Sec. 2.1) and the representative training methods (Sec. 2.2) along with the notation used throughout the paper. + +# 2.1 Vanilla Reasoning Process (VRP) + +The probability of generating a response $\mathbf{y}$ equals the product of its stepwise probabilities. Given a model $\pi_{\theta}$ and a prompt $\mathbf{x} = (x_1, \ldots, x_N)$ , the vanilla reasoning process (VRP) autoregressively produces a response $\mathbf{y} = (y_1, \ldots, y_L)$ with + +$$ +\pi_ {\theta} (\mathbf {y} | \mathbf {x}) = \prod_ {l = 1} ^ {L} \pi_ {\theta} (y _ {l} | x _ {1}, x _ {2}, \dots x _ {N}, y _ {1}, \dots , y _ {l - 1}) = \prod_ {l = 1} ^ {L} \pi_ {\theta} (\mathbf {y} _ {l} | \mathbf {x}, \mathbf {y} _ {< l}) +$$ + +The response usually contains intermediate reasoning steps before arriving at the final answer, this process is also known as chain-of-thought (CoT) [Wei et al., 2022], which can be represented as: + +$$ +\mathbf {x} \xrightarrow {\text {r e a s o n i n g s t e p s}} \mathbf {y} \sim \mathbf {a}, \tag {1} +$$ + +where $\mathbf{a}$ is the extracted final answer, which is included in the answer $\mathbf{y}$ . + +# 2.2 Training VRP via Reinforcement Learning + +RL frames VRP decoding process as a deterministic, token-level Markov Decision process (MDP) [Wang et al., 2024a]. Its objective is + +$$ +\mathcal {J} (\theta) = \mathbb {E} _ {(\mathbf {x}, \mathbf {y} ^ {*}) \sim \mathcal {D}, \mathbf {y} \sim \pi_ {\theta}} \left[ R (\mathbf {y}, \mathbf {y} ^ {*}) \right]. +$$ + +where $R(\cdot, \cdot)$ represents a reward function comparing generated answer $\mathbf{y}$ with the golden answer $\mathbf{y}^*$ for any question $\mathbf{x}$ sampled from dataset $\mathcal{D}$ . + +To compute the gradient $\nabla_{\theta}\mathcal{J}(\theta)$ , computationally efficient algorithms GRPO [Shao et al., 2024] and REINFORCE++ [Hu, 2025] are widely adopted. Take GRPO as an example, given a question-answer pair $\mathbf{x},\mathbf{y}^*$ and a group of $G$ generated responses $\mathbf{y}_i$ , denote $\mathbf{y}_{i,j}$ as the $j$ -th token of the $i$ -th response, it optimizes the following token-level objective: + +$$ +\begin{array}{l} \mathcal {J} (\boldsymbol {\theta}) = \mathbb {E} _ {(\mathbf {x}, \mathbf {y} ^ {*}) \sim \mathcal {D}, \{\mathbf {y} _ {i} \} _ {i = 1} ^ {G} \sim \pi_ {\boldsymbol {\theta} _ {\mathrm {o l d}}} (\cdot | \mathbf {x})} \\ \left[ \frac {1}{G} \sum_ {i = 1} ^ {G} \frac {1}{| \mathbf {y} _ {i} |} \sum_ {j = 1} ^ {| \mathbf {y} _ {i} |} \left(\min \left(r _ {i, j} (\theta) \hat {A} _ {i, j}, \operatorname {c l i p} \left(r _ {i, j} (\theta), 1 - \epsilon , 1 + \epsilon\right) \hat {A} _ {i, j}\right) - \beta D _ {\mathrm {K L}} \left(\pi_ {\theta} \| \pi_ {\text {r e f}}\right)\right) \right], \tag {2} \\ \end{array} +$$ + +where the token-level ratio $r_{i,j}(\theta)$ and the group-normalized advantage $\hat{A}_{i,j}$ are defined as: + +$$ +r _ {i, j} (\theta) = \frac {\pi_ {\theta} \left(\mathbf {y} _ {i , j} \mid \mathbf {x} , \mathbf {y} _ {i , < j}\right)}{\pi_ {\theta_ {\mathrm {o l d}}} \left(\mathbf {y} _ {i , j} \mid \mathbf {x} , \mathbf {y} _ {i , < j}\right)}, \hat {A} _ {i, j} = \frac {R _ {i} - \operatorname {m e a n} \left(\left\{R _ {i} \right\} _ {i = 1} ^ {G}\right)}{\operatorname {s t d} \left(\left\{R _ {i} \right\} _ {i = 1} ^ {G}\right)}. +$$ + +However, RL on base LLMs that haven't been well-aligned may suffer from issues like poor readability and language mixing, preventing researchers from verifying, understanding, and further developing their LLMs. And huge searching space makes efficient learning of meta-thinking daunting. + +# 3 Method + +In this section, we present Reinforced Meta-thinking Agents (ReMA), a RL method integrating meta-thinking into the reasoning process of LLM under multi-agent settings (Sec. 3.1), then describe the learning process enabled by MARL of single- and multi-turn LLM setting (Secs. 3.2.1 and 3.2.2). + +# 3.1 Deploying Meta-Thinking Reasoning Process for LLMs + +Beyond VRP (Sec. 2.1), recent studies [Muennighoff et al., 2025, Ye et al., 2025c] have shown that integrating meta-thinking behaviors in reasoning process can largely improve the accuracy of the final answers. By integrating Meta-thinking, ReMA decomposes problem solving into two sequential phases: a meta-thinking phase that plans, monitors, or revises strategy, followed by a reasoning phase that produces the detailed solution. We analyse Meta-thinking Reasoning Process along two orthogonal axes—single- vs. multi-agent and single- vs. multi-turn. + +In a single-agent setting, such a process calls LLM once and generates meta-thinking and the following reasoning autoregressively. We formulate the meta-thinking reasoning process (MRP) below: + +$$ +\mathbf {y} \sim \pi_ {\theta} (\mathbf {y} \mid \mathbf {x}, \mathbf {m}) \cdot \pi_ {\theta} (\mathbf {m} \mid \mathbf {x}), \tag {3} +$$ + +where $\mathbf{m}$ and $\mathbf{y}$ are the output of meta-thinking and reasoning respectively. We present the procedure as shown below: + +$$ +\mathbf {x} \xrightarrow {\text {m e t a - t h i n k i n g}} \mathbf {m} \xrightarrow {\text {r e a s o n i n g s t e p s}} \mathbf {y} \sim \mathbf {a}. \tag {4} +$$ + +Exploring MRP reasoning through a single-agent approach is often inefficient, as it requires the language model to simultaneously master both meta-thinking and detailed problem-solving within one call. Prior research has demonstrated that activating different model capabilities through specialized agents significantly improves MRP exploration efficiency. To leverage this insight, we decouple meta-thinking and reasoning into two separate LLM agents: a high-level agent dedicated to generating meta-thinking, and a low-level agent focused on executing reasoning steps. + +During a conversation, the high-level and low-level agents (i.e., $\pi_h$ and $\pi_l$ ) act in an interleaving manner. The high-level agent generates and summarizes meta-thoughts from the prompt and interaction history, while the low-level agent executes detailed problem-solving under those instructions. We formulate the multi-agent meta-thinking reasoning process (MAMRP) as follows: + +$$ +\mathbf {y} \sim \pi_ {l} (\mathbf {y} \mid \mathbf {x}, \mathbf {m}) \pi_ {h} (\mathbf {m} \mid \mathbf {x}). \tag {5} +$$ + +While the single-turn MAMRP offers a straightforward approach, it lacks the ability to perform immediate and fine-grained cognitive switching during the reasoning process, which limits its effectiveness on complex, long-horizon planning tasks. Therefore, we extend Eq. (5) and formulate the multi-turn MAMRP as follows: + +$$ +\mathbf {y} _ {T} \sim \prod_ {t = 1} ^ {T} \pi_ {l} \left(\mathbf {y} _ {t} \mid \mathbf {x}, \{\mathbf {m}, \mathbf {y} \} _ {< t}, \mathbf {m} _ {t}\right) \pi_ {h} \left(\mathbf {m} _ {t} \mid \mathbf {x}, \{\mathbf {m}, \mathbf {y} \} _ {< t}\right) \tag {6} +$$ + +where $T$ is the number of turns. Similarly, we present the process with a directed graph: + +$$ +\mathbf {x} \xrightarrow [ \pi_ {h} ]{\text {m e t a - t h i n k i n g}} \mathbf {m} _ {1} \xrightarrow [ \pi_ {l} ]{\text {r e a s o n i n g}} \mathbf {y} _ {1} \xrightarrow [ \pi_ {h} ]{\text {m e t a - t h i n k i n g}} \mathbf {m} _ {2} \xrightarrow [ \pi_ {l} ]{\text {r e a s o n i n g}} \mathbf {y} _ {2} \xrightarrow [ \pi_ {h} ]{\text {m e t a - t h i n k i n g}} \dots \xrightarrow [ \pi_ {l} ]{\text {r e a s o n i n g}} \mathbf {y} _ {T} \sim \mathbf {a}. \tag {7} +$$ + +As a complex reasoning system, MAMRP provides various optimization opportunities in scaling inference-time computation. We leave further discussion of these aspects in Appendix C.1. + +# 3.2 Training MAMRP: A Multi-Agent RL Method + +Multi-agent RL, unlike single-agent RL in a deterministic MDP, must contend with stochastic, nonstationary dynamics and rewards, making optimization more challenging. We start by considering an easier case, the optimization of single-turn MAMRP. + +# 3.2.1 Optimizing Single-turn MAMRP + +To train the system from Sec. 3.1, we embed it as a Markov Game between the two agents. Suppose the two LLM agents are parameterized by $\theta_h$ and $\theta_l$ , respectively. Define a joint hierarchical policy over sequential decisions $\mathbf{m}$ and $\mathbf{y}$ : + +$$ +\mathbf {y} \sim \pi_ {\left(\theta_ {h}, \theta_ {l}\right)} (\mathbf {y} \mid \mathbf {x}) := \pi_ {\theta_ {l}} (\mathbf {y} \mid \mathbf {x}, \mathbf {m}) \cdot \pi_ {\theta_ {h}} (\mathbf {m} \mid \mathbf {x}), \tag {8} +$$ + +Let $R(\mathbf{y}, \mathbf{y}^*)$ denote the final reward serves as the objective function $\mathcal{J}(\theta_h, \theta_l)$ for the joint policy: + +$$ +\mathcal {J} \left(\theta_ {h}, \theta_ {l}\right) = \mathbb {E} _ {\mathbf {x}, \mathbf {y} ^ {*}} \mathbb {E} _ {\mathbf {y} \sim \pi \left(\theta_ {h}, \theta_ {l}\right)} R (\mathbf {y}, \mathbf {y} ^ {*}). \tag {9} +$$ + +![](images/f246502d1a54bf77abbf1b84a3d339ae985d445d9515b42b122262754119fa92.jpg) +RL for VRP & MRP + +![](images/7f3231499e748935cfc4e1cfa6d56049cf9d1cab539e1879a81522875ddbfa66.jpg) +ReMA with Separate Parameters + +![](images/c1b6f6aaf161ccb2d707eb51858e4b49250221c289f22f7d7fd725b003912a00.jpg) +ReMA with Shared Parameters +Figure 2: Comparison of training pipelines. Left: RL training of VRP and MRP, where a single LM agent is updated either with mixed (VRP) or explicit (MRP) meta-thinking. Middle: ReMA with separate parameters for the high-level (meta-thinking) and low-level (reasoning) agents; training alternates between freezing one agent and updating the other. Right: ReMA with shared parameters and multi-turn interactions: both agents share the same parameters and are distinguished by their system prompts. Training employs a turn-level ratio for stable multi-turn reinforcement learning and efficient updates, ensuring each turn's contribution is controlled to prevent instability. + +During optimization procedure, the high-level policy $\pi_{\theta_h}$ and low-level policy $\pi_{\theta_l}$ aim to maximize their respective rewards independently. The optimization goals for agents are: + +$$ +\theta_ {h} ^ {*} = \arg \max _ {\theta_ {h}} \mathbb {E} _ {(\mathbf {x}, \mathbf {y} ^ {*}) \sim \mathcal {D}, \mathbf {m} \sim \pi_ {\theta_ {h}}, \mathbf {y} \sim \pi_ {\theta_ {l} ^ {*}}} \left[ R _ {h} (\mathbf {m}, \mathbf {y}, \mathbf {y} ^ {*}) \right], \tag {10} +$$ + +$$ +\theta_ {l} ^ {*} \left(\theta_ {h}\right) = \arg \max _ {\theta_ {l}} \mathbb {E} _ {\left(\mathbf {x}, \mathbf {y} ^ {*}\right) \sim \mathcal {D}, \mathbf {m} \sim \pi_ {\theta_ {h}}, \mathbf {y} \sim \pi_ {\theta_ {l}}} \left[ R _ {l} \left(\mathbf {m}, \mathbf {y}, \mathbf {y} ^ {*}\right) \right], \tag {11} +$$ + +where $R_{h}$ and $R_{l}$ are policies' individual reward functions, including $R$ and regularization according to tasks and models, e.g., different format rewards (refer to Appendix C.2 for details). The detailed algorithm is in the Algorithm 1. We illustrate the MAMRP inference procedure and the proposed training method in Fig. 2. We also provide an analysis of different loss functions in Appendix C.5. + +# 3.2.2 Scaling up to Multi-turn MAMRP + +To scale up to multi-turn MAMRP, we can still adopt the iterative training strategy in Sec. 3.2.1. However, we make some changes to improve the efficiency of rollout and training. + +First, we implement a parameter-sharing strategy where both high-level and low-level agents utilize identical model weights $\theta$ , distinguished only by role-specific system prompts $S_{h}$ and $S_{l}$ . Formally, we define $\pi_h = \pi_\theta (\cdot |S_h,\cdot)$ and $\pi_l = \pi_\theta (\cdot |S_l,\cdot)$ , sharing the same underlying parameters rather than maintaining separate model instances. This approach eliminates the need for frequent model swapping on GPU during rollout, avoiding inefficient wait times, while enabling larger batch sizes during training to simultaneously optimize policies for both meta-thinking and reasoning roles. + +Second, we propose a multi-turn GRPO with turn-level ratio to address the challenges of multi-turn MAMRP. The trajectory-level averaged objective with turn-level ratio of $\pi_{l}$ is defined as (The objective of $\pi_h$ is the similar but with different system prompt): + +$$ +\mathcal {J} (\boldsymbol {\theta}) = \mathbb {E} _ {(\mathbf {x}, \mathbf {y} ^ {*}) \sim \mathcal {D}, \{(\mathbf {m} _ {i}, \mathbf {y} _ {i}) \} _ {i = 1} ^ {G} \sim \pi_ {\theta_ {\mathrm {o l d}}} (\cdot | \mathbf {x}) +$$ + +$$ +\left. \left[ \frac {1}{G} \sum_ {i = 1} ^ {G} \frac {1}{T _ {i}} \sum_ {t = 1} ^ {T _ {i}} \frac {1}{| \mathbf {y} _ {i , t} |} \sum_ {j = 1} ^ {| \mathbf {y} _ {i, t} |} \left(\min \left(r _ {i, t} (\theta) \hat {A} _ {i, t, j}, \operatorname {c l i p} \left(r _ {i, t} (\theta), 1 - \epsilon , 1 + \epsilon\right) \hat {A} _ {i, t, j}\right) - \beta D _ {\mathrm {K L}} \left(\pi_ {\theta} \| \pi_ {\text {r e f}}\right)\right) \right] \right. \tag {12} +$$ + +where $\mathbf{y}_{i,t,j}$ is the $j$ -th token at turn $t$ of the reasoning agent of the $i$ -th trajectory. And the turn-level ratio for clipping is defined as: + +$$ +r _ {i, t} (\theta) = \frac {1}{| \mathbf {y} _ {i , t} |} \sum_ {j = 1} ^ {| \mathbf {y} _ {i, t} |} r _ {i, t, j} (\theta) = \frac {1}{| \mathbf {y} _ {i , t} |} \sum_ {j = 1} ^ {| \mathbf {y} _ {i, t} |} \frac {\pi_ {\theta} \left(\mathbf {y} _ {i , t , j} \mid \mathbf {x} , \left\{\mathbf {m} _ {i , ,} , \mathbf {y} _ {i ,} \right\} _ {< t} , \mathbf {m} _ {i , t} , \mathbf {y} _ {i , t , < j}\right)}{\pi_ {\theta_ {\mathrm {o l d}}} \left(\mathbf {y} _ {i , t , j} \mid \mathbf {x} , \left\{\mathbf {m} _ {i , ,}, \mathbf {y} _ {i ,} \right\} _ {< t} , \mathbf {m} _ {i , t} , \mathbf {y} _ {i , t , < j}\right)}. \tag {13} +$$ + +The introduction of the turn-level ratio serves two key purposes. First, using a token-level ratio (Eq. (2)) in the objective introduces bias for multi-turn training, as it averages over all tokens in a trajectory. This means that tokens within longer turns (those containing more tokens) can disproportionately influence the overall loss, and averaging at the token level may encourage excessively long single-turn responses. Second, clipping each token independently risks instability during training. + +In contrast, the turn-level ratio aligns more closely with the underlying MDP formulation by treating all tokens within a turn as a single action and applying clipping at the turn level. Intuitively, this approach stabilizes training by preventing the LLM from making unstable updates that could result in extreme outputs, such as overly long repetitions or incoherent text. We conduct experimental verification in subsequent empirical results (Sec. 4.3). + +# 4 Experiments + +To evaluate the effectiveness and efficiency of ReMA, we conduct experiments on challenging benchmarks for two types of tasks: mathematical reasoning and LLM-as-a-Judge with three different LLMs. Then, we investigate the models' performance in both single- & multi-turn settings. Finally, we provide ablation studies and qualitative analyses of our method. + +# 4.1 Experiment Settings + +We first analyze the single-turn case of ReMA, i.e., $T = 1$ . The high-level agent generates a complete meta-thinking trace in one shot, and the low-level agent follows the instructions and outputs the final results. Single-turn ReMA reduces stochasticity and training cost while our experiments show that it still provides meaningful performance gains. + +Benchmarks We conduct experiments on two types of tasks: mathematical reasoning and LLM-as-a-Judge. For mathematical reasoning experiments, we train models on 7.5k training samples in MATH [Hendrycks et al., 2021] and use MATH500 [Lightman et al., 2023] as the in-distribution test dataset. Additionally, we test the optimized models on out-of-distribution datasets: GSM8K [Cobbe et al., 2021], AIME24 $^{4}$ , AMC23 $^{5}$ , GaoKao2023En [Zhang et al., 2023], Minerva Math [Lewkowycz et al., 2022], and Olympiad Bench [He et al., 2024]. + +For LLM-as-a-Judge benchmarks, we train models on RewardBench [Lambert et al., 2024]. Specifically, we convert the original data into a pair-ranking format and split it into a training set of 5k items and a test set of 970 items, denoted as RewardBench970. The models are also tested on JudgeBench [Tan et al., 2024] to assess out-of-distribution performance. We refer to Appendix D.1.2 for detailed comparisons and results. + +Baselines, Models, Training Settings We compare pass@1 performance across the following methods: (1) VRP (CoT, step-by-step prompting, Sec. 3.1); (2) $\mathbf{VRP}_{\mathbf{RL}}$ (RL under VRP); (3) $\mathbf{MRP}_{\mathbf{RL}}$ (RL under MRP with high-level task analysis, Eq. (4)), and (4) ReMA (ours, RL under MAMRP, Eq. (7)). + +We train and test Llama-3-8B-Instruct, Llama-3.1-8B-Instruct [Dubey et al., 2024], and Qwen2.5-7B-Instruct [Team, 2024] on mathematical reasoning benchmarks. For LLM-as-a-judge benchmarks, we train and test Llama-3.1-8B-Instruct and Qwen2.5-7B-Instruct. We use instruct-tuned LLMs to prompt them to perform VRP, MRP, and MAMRP directly during training. Unless specified, we use two separate copies of the same model for high- and low-level agents in ReMA. We use the base reward setting in Appendix C.2 by default. And for the underlying RL algorithm, we use REINFORCE++ [Hu, 2025]. We refer to Appendix D for detailed training settings. + +# 4.2 Results of Single-turn ReMA + +Question 1. Does single-turn ReMA outperforms baselines on both in-distribution and out-of-distribution test sets? + +Table 1: Performance on in-distribution test sets and out-of-distribution test sets. We also report the improvement/degradation w.r.t. basic CoT performance(VRP). On average, ReMA outperforms all baselines. Particularly on out-of-distribution datasets, ReMA achieves the highest performance on most of the benchmarks. + +(a) Performance on math benchmarks + +
ModelBenchmarkVRP(CoT)\( \mathbf{V R P_{R L}} \)\( \mathbf{M R P_{R L}} \)ReMA(Ours)
Llama3-8B-InstructMATH50030.8033.40 (+2.60)32.80 (+2.00)33.80 (+3.00)
GSM8K67.4881.80 (+14.32)79.68 (+12.20)79.38 (+11.90)
AIME240.000.00 (+0.00)3.33 (+3.33)0.00 (+0.00)
AMC232.5010.00 (+7.50)12.50 (+10.00)22.50 (+20.00)
Gaokao2023en22.3427.53 (+5.19)23.38 (+1.04)28.57 (+6.23)
Minerva Math8.8216.54 (+7.72)18.01 (+9.19)13.97 (+5.15)
Olympiad Bench8.448.89 (+0.45)9.33 (+0.89)8.89 (+0.45)
Average20.0525.45 (+5.40)25.58 (+5.53)26.73 (+6.68)
Llama3.1-8B-InstructMATH50050.8050.20 (-0.60)48.60 (-2.20)53.20 (+2.40)
GSM8K86.0584.53 (-1.52)85.37 (-0.68)87.26 (+1.21)
AIME2410.003.33 (-6.67)6.67 (-3.33)13.33 (+3.33)
AMC2327.5012.50 (-15.00)30.00 (+2.50)20.00 (-7.50)
Gaokao2023en38.9636.10 (-2.86)37.14 (-1.82)37.14 (-1.82)
Minerva Math22.7926.84 (+4.05)25.37 (+2.58)28.31 (+5.52)
Olympiad Bench15.1119.70 (+4.59)15.70 (+0.59)19.56 (+4.45)
Average35.8933.32 (-2.57)35.55 (-0.34)36.97 (+1.08)
Qwen2.5-7B-InstructMATH50075.0077.20 (+2.20)76.40 (+1.40)74.40 (-0.60)
GSM8K92.0491.36 (-0.68)91.81 (-0.23)90.60 (-1.44)
AIME246.676.67 (+0.00)10.00 (+3.33)20.00 (+13.33)
AMC2347.5050.00 (+2.50)52.50 (+5.00)57.50 (+10.00)
Gaokao2023en56.6254.81 (-1.81)55.06 (-1.56)57.92 (+1.30)
Minerva Math35.6634.93 (-0.73)32.35 (-3.31)34.93 (-0.73)
Olympiad Bench38.2238.37 (+0.15)37.78 (-0.44)36.30 (-1.92)
Average50.2450.48 (+0.24)50.84 (+0.60)53.09 (+2.85)
+ +(b) Performance on LLM-as-a-Judge benchmarks + +
ModelBenchmarkVRP(CoT)\( \mathbf{V R P_{R L}} \)\( \mathbf{M R P_{R L}} \)ReMA(Ours)
Llama3.1-8B-InstructRewardBench97069.4882.89 (+13.41)81.13 (+11.65)83.71 (+14.23)
JudgeBench51.2951.94 (+0.65)52.90 (+1.61)52.90 (+1.61)
Average60.3967.41 (+7.02)67.02 (+6.63)68.31 (+7.92)
Qwen2.5-7B-InstructRewardBench97078.5685.36 (+6.80)86.49 (+7.93)83.51 (+4.95)
JudgeBench58.3956.94 (-1.45)58.39 (+0.00)56.94 (-1.45)
Average68.4771.15 (+2.68)72.44 (+3.97)70.22 (+1.75)
+ +Table 1 compares the greedy decoding performance of ReMA against various RL baselines across mathematical benchmarks (Table 1a) and LLM-as-a-Judge benchmarks (Table 1b). Results across different LLMs indicate that, on average, ReMA outperforms all baselines, achieving a maximum improvement of $6.68\%$ on mathematical benchmarks and $8.49\%$ on LLM-as-a-Judge benchmarks. + +Notably, ReMA achieves the highest performance on most benchmarks, particularly on out-of-distribution datasets, with a maximum improvement of $20\%$ on AMC23 for Llama3-8B-Instruct, $13.33\%$ on AIME24 for Qwen2.5-7B-Instruct, $14.23\%$ on RewardBench970 for Llama3.1-8B-Instruct. These results demonstrate the superior out-of-distribution generalization ability conferred by the meta-thinking mechanism in ReMA. However, we observe that the accuracy gains from RL training on instruction-tuned LMs are smaller than from base models (Sec. 4.2.1). This may be due to the higher initial performance and the relatively fixed output distribution of instruction-tuned models, which limits the improvement and peak performance in RL. + +![](images/37ed86a4d76064b8e7dc589771be62484945d0a0fbaf36c2bdeedac73830355d.jpg) +Figure 3: An RL experiment with 3 training schemes. While RL from SFT excels on easier problems, RL under Meta-thinking shows superior generalization to harder problems like AIME24. + +![](images/7cef31214b1e22f8feceb774c7c2fda0d8822a64668c93c676a445aa329e03c2.jpg) +Figure 4: Average problem difficulty by action type during training. Left: 1B LM collapses to the EMPTY action. Right: 8B LM adapts to a more complex meta-thinking strategy for harder problems. + +![](images/8067de4a34648160295745db52239f10161f855ef4f748f78627686ff344515e.jpg) + +# 4.2.1 Meta-thoughts boost low-level generalization + +# Question 2. Can Reasoning benefit from Meta-thinking? + +Here we provide a tiny but motivating example of how ReMA gives better learning dynamics. We use Qwen2.5-Math-7B [Yang et al., 2024] as the starting base model, MATH (level 3-5, about $5.5\mathrm{K}$ number of instances) as the training dataset, and we compare three reinforcement learning training schemes, in particular: (1) RL from Base: train the base model directly on MATH with binary outcome reward; (2) RL from SFT: SFT the base model with GPT-4o's CoT answers; then RL on train dataset with binary outcome reward; (3) RL under Meta-thinking: SFT the base model with GPT-4o's meta-thinking plans; then RL on train dataset with binary outcome reward. + +The models are evaluated on 3 benchmarks (Fig. 3). SFT brings the best initial accuracy on in-distribution and easier sets, but fails to improve on harder ones. RL from Base yields limited gains. In contrast, RL under Meta-thinking achieves the best learning dynamics and generalizes better to challenging problems (AIME24). See Appendix F.1 for case studies. + +# 4.2.2 Diverse meta-thinking characteristics of LLMs + +# Question 3. How well can LLMs learn to meta-think? + +To further analyze meta-thinking behaviors, we train models with structured JSON-format actions inspired by Yue et al.. The meta-thinking agent generate two entries in one LM call, first selects from three actions: DECOMPOSE (breaking into subproblems), REWRITE (simplifying the problem), or EMPTY (direct solving), then generates the corresponding text. We compare Llama-3.1-8B-Instruct and Llama-3.2-1B-Instruct to study scale effects (two 1B models vs two 8B models) on meta-thinking agent's training. We use vLLM guided JSON decoding [Dong et al., 2024] for valid formatting and base reward (reasoning agent's solution accuracy with format constraints). + +We observe that smaller LMs produce simpler outputs, likely due to limited capacity to maintain valid JSON formatting while exploring diverse reasoning strategies. As Fig. 4 shows, smaller LMs like Llama-3.2-1B-Instruct quickly converge to the simplest EMPTY action to avoid formatting penalties, while larger LMs like Llama-3.1-8B-Instruct can adapt meta-thinking strategies based on problem difficulty. See Appendix F.3 for detailed case studies. + +![](images/7be77f52a452cae631a3999a5c1a37f14186bd400761aa6b2200170fc2818d62.jpg) +Figure 5: Training results of multi-turn Figure 6: Ablations of multi-turn ReMA on a tiny subReMA on MATH-Level3-5-8K under different set of MATH, we only show here the training curves of different training and rollout configurations. + +![](images/0a12a930e142fe2cdce1144a9d1bcf93a65808b7f66fd19e01bc8a6c088ffc04.jpg) + +# 4.3 Extending ReMA to Multi-turn MAMRP + +We further extend ReMA to multi-turn MAMRP settings, enabling multiple rounds of interaction between the meta-thinking agent and the reasoning agent as defined in Eq. (7). + +Unlike the inherent VRP capabilities of most LLMs, multi-turn ReMA requires initial bootstrapping. Thus, we constructed a supervised fine-tuning dataset (about 0.8K samples) from LIMO [Ye et al., 2025c] using GPT-4o to establish the starting point for multi-turn interaction capabilities. Then we finetune Qwen2.5-7B before RL training. + +As described in Sec.3.2.2, we deploy the proposed GRPO with turn-level ratio clipping and trajectory-level averaging loss during training. And we remove the KL-divergence term to allow more flexible exploration. By default, the agents share the same parameters and are simultaneously updated using their trajectories. We refer to details in Appendix D.2. + +# 4.3.1 Results and Ablations + +# Question 4. Can ReMA be scaled to multi-turn settings? + +There are two key points revealed by our multi-turn ReMA experiments, as shown in Fig. 5. On one hand, the algorithm can demonstrate effective convergence on the training set, with accuracy steadily increasing from approximately $55\%$ to $70\%$ during training. It also achieves an average performance gain of about $5\%$ across all seven test benchmarks, indicating stable improvements on out-of-distribution data. (Experiment with the rollout config of turn30_token512, see Appendix D.2.2 and Fig. 8 for more details.) + +On the other hand, we observe that the performance of multi-turn ReMA is highly sensitive to hyperparameters such as the maximum response length per turn and the maximum number of turns. For certain configurations, the model either collapses into producing massive repetitions within a single turn or generates empty responses after only a few turns. Similar phenomena have been reported in concurrent works such as RAGEN [Wang et al., 2025], where these issues are attributed to the lack of fine-grained, reasoning-aware guidance. As a result, multi-turn RL becomes susceptible to long-horizon credit assignment challenges and state drift, often leading to reduced exploration diversity—a phenomenon referred to as the "Echo Trap". To address this challenge, it is essential to comprehensively explore the training recipe w.r.t. model, data, and algorithm. + +# Question 5. How does parameter sharing and turn-level ratio affect multi-turn ReMA? + +As shown in Fig. 6, we compare different configurations on a smaller dataset consisting of 133 samples—19 from each of the 7 MATH problem types—to evaluate sample efficiency and convergence speed. First, all configurations eventually achieve nearly $100\%$ accuracy on the training dataset. Notably, the trajectory-level loss with turn-level ratio (Turn-Ratio, Eq. (13)) demonstrates substantially better sample efficiency than its token-level variants (Eq. (2)), reaching higher training rewards with fewer steps. We also present the training curve of separate weight setting, the empirical results show that shared parameters with simultaneous updates converge noticeably faster. + +# 5 Conclusion + +In this paper, we introduced ReMA, a novel framework that leverages multi-agent reinforcement learning to elicit meta-thinking in large language models. By explicitly separating meta-thinking and reasoning processes into distinct agents, our approach enhances both exploration during training and the interpretability of model outputs. We tailored RL algorithms and reward functions to ensure reliable performance. Through comprehensive experiments on mathematical reasoning and LLM-as-a-Judge benchmarks, ReMA consistently achieved superior results, particularly on out-of-distribution datasets. We further extend ReMA to multi-turn settings, enabling the framework to handle more complex reasoning scenarios that require more communication between agents. Our ablations demonstrate how effective coordination between agents evolves, highlighting the promise of reinforcement learning and structured agents' collaboration for advancing the capabilities of language models in complex reasoning tasks. + +# References + +Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. +Elif Akata, Lion Schulz, Julian Coda-Forno, Seong Joon Oh, Matthias Bethge, and Eric Schulz. Playing repeated games with large language models. arXiv preprint arXiv:2305.16867, 2023. +Cem Anil, Guodong Zhang, Yuhuai Wu, and Roger B. Grosse. Learning to give checkable answers with prover-verifier games. CoRR, abs/2108.12099, 2021. URL https://arxiv.org/abs/2108.12099. +Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: A family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 1, 2023. +Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. +Jiaqi Chen, Yuxian Jiang, Jiachen Lu, and Li Zhang. S-agents: Self-organizing agents in open-ended environments. arXiv preprint arXiv:2402.04578, 2024a. +Qiguang Chen, Libo Qin, Jiaqi WANG, Jingxuan Zhou, and Wanxiang Che. Unlocking the capabilities of thought: A reasoning boundary framework to quantify and optimize chain-of-thought. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024b. URL https://openreview.net/forum?id=pC44UMwy2v. +Shuhao Chen, Weisen Jiang, Baijiong Lin, James T Kwok, and Yu Zhang. Routersc: Query-based router by dual contrastive learning for assembling large language models. arXiv preprint arXiv:2409.19886, 2024c. +Weize Chen, Yusheng Su, Jingwei Zuo, Cheng Yang, Chenfei Yuan, Chi-Min Chan, Heyang Yu, Yaxi Lu, Yi-Hsin Hung, Chen Qian, et al. Agentverse: Facilitating multi-agent collaboration and exploring emergent behaviors. In The Twelfth International Conference on Learning Representations, 2023. +Yongchao Chen, Jacob Arkin, Charles Dawson, Yang Zhang, Nicholas Roy, and Chuchu Fan. Autotamp: Autoregressive task and motion planning with llms as translators and checkers. In 2024 IEEE International conference on robotics and automation (ICRA), pages 6695-6702. IEEE, 2024d. +Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. Palm: Scaling language modeling with pathways. Journal of Machine Learning Research, 24(240): 1-113, 2023. +Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V Le, Sergey Levine, and Yi Ma. Sft memorizes, rl generalizes: A comparative study of foundation model post-training. arXiv preprint arXiv:2501.17161, 2025. +Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021. +Google DeepMind. Gemini flash thinking, 2025. URL https://deepmind.google/technologies/gemini/flash-thinking/. Accessed: 2025-01-29. +DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, + +Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun, T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W. L. Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X. Q. Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y. K. Li, Y. Q. Wang, Y. X. Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong, Ying He, Yishi Piao, Yisong Wang, Yixuan Tan, Yiyang Ma, Yiyuan Liu, Yongqiang Guo, Yuan Ou, Yuduan Wang, Yue Gong, Yuheng Zou, Yujia He, Yunfan Xiong, Yuxiang Luo, Yuxiang You, Yuxuan Liu, Yuyang Zhou, Y. X. Zhu, Yanhong Xu, Yanping Huang, Yaohui Li, Yi Zheng, Yuchen Zhu, Yunxian Ma, Ying Tang, Yukun Zha, Yuting Yan, Z. Z. Ren, Zehui Ren, Zhangli Sha, Zhe Fu, Zhean Xu, Zhenda Xie, Zhengyan Zhang, Zhewen Hao, Zhicheng Ma, Zhigang Yan, Zhiyu Wu, Zihui Gu, Zijia Zhu, Zijun Liu, ZiLin Li, Ziwei Xie, Ziyang Song, Zizheng Pan, Zhen Huang, Zhipeng Xu, Zhongyu Zhang and Zhen Zhang. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. 2025. URL https://arxiv.org/abs/2501.12948. +Aniket Didolkar, Anirudh Goyal, Nan Rosemary Ke, Siyuan Guo, Michal Valko, Timothy Lillicrap, Danilo Rezende, Yoshua Bengio, Michael Mozer, and Sanjeev Arora. Metacognitive capabilities of llms: An exploration in mathematical problem solving. arXiv preprint arXiv:2405.12205, 2024. +Dujian Ding, Ankur Mallick, Chi Wang, Robert Sim, Subhabrata Mukherjee, Victor Ruhle, Laks VS Lakshmanan, and Ahmed Hassan Awadallah. Hybrid llm: Cost-efficient and quality-aware query routing. arXiv preprint arXiv:2404.14618, 2024. +Kefan Dong and Tengyu Ma. Stp: Self-play llm theorem provers with iterative conjecturing and proving, 2025. URL https://arxiv.org/abs/2502.00212. +Yixin Dong, Charlie F Ruan, Yaxing Cai, Ruihang Lai, Ziyi Xu, Yilong Zhao, and Tianqi Chen. Xgrammar: Flexible and efficient structured generation engine for large language models. arXiv preprint arXiv:2411.15100, 2024. +Yilun Du, Shuang Li, Antonio Torralba, Joshua B Tenenbaum, and Igor Mordatch. Improving factuality and reasoning in language models through multiagent debate. In *Forty-first International Conference on Machine Learning*, 2023. +Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. +Andrew Estornell, Jean-Francois Ton, Yuanshun Yao, and Yang Liu. Acc-debate: An actor-critic approach to multi-agent debate, 2024. URL https://arxiv.org/abs/2411.00053. +John H Flavell. Metacognition and cognitive monitoring: A new area of cognitive-developmental inquiry. American psychologist, 34(10):906, 1979. +Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D. Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars. 2025. URL https://arxiv.org/abs/2503.01307. +Peizhong Gao, Ao Xie, Shaoguang Mao, Wenshan Wu, Yan Xia, Haipeng Mi, and Furu Wei. Meta reasoning for large language models. arXiv preprint arXiv:2406.11698, 2024. + +Alex Graves. Sequence transduction with recurrent neural networks. arXiv preprint arXiv:1211.3711, 2012. +Fatemeh Haji, Mazal Bethany, Maryam Tabar, Jason Chiang, Anthony Rios, and Peyman Najafirad. Improving llm reasoning with multi-agent tree-of-thought validator agent, 2024. URL https://arxiv.org/abs/2409.11527. +Rui Hao, Linmei Hu, Weijian Qi, Qingliu Wu, Yirui Zhang, and Liqiang Nie. Chatlm network: More brains, more intelligence. AI Open, 2025. +Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, et al. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008, 2024. +Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874, 2021. +Sirui Hong, Xiawu Zheng, Jonathan Chen, Yuheng Cheng, Jinlin Wang, Ceyao Zhang, Zili Wang, Steven Ka Shing Yau, Zijuan Lin, Liyang Zhou, et al. Metagpt: Meta programming for multiagent collaborative framework. arXiv preprint arXiv:2308.00352, 3(4):6, 2023. +Jian Hu. Reinforce++: A simple and efficient approach for aligning large language models. arXiv preprint arXiv:2501.03262, 2025. +Jian Hu, Xibin Wu, Zilin Zhu, Xianyu, Weixun Wang, Dehao Zhang, and Yu Cao. Openrlhf: An easy-to-use, scalable and high-performance rlhf framework. arXiv preprint arXiv:2405.11143, 2024a. +Qitian Jason Hu, Jacob Bieker, Xiuyu Li, Nan Jiang, Benjamin Keigwin, Gaurav Ranganath, Kurt Keutzer, and Shriyash Kaustubh Upadhyay. Routerbench: A benchmark for multi-llm routing system. arXiv preprint arXiv:2403.12031, 2024b. +Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024. +Fangkai Jiao, Geyang Guo, Xingxing Zhang, Nancy F Chen, Shafiq Joty, and Furu Wei. Preference optimization for reasoning with pseudo feedback. arXiv preprint arXiv:2411.16345, 2024. +Bowen Jin, Hansi Zeng, Zhenrui Yue, Jinsung Yoon, Sercan Arik, Dong Wang, Hamed Zamani, and Jiawei Han. Search-r1: Training llms to reason and leverage search engines with reinforcement learning, 2025. +Jan Hendrik Kirchner, Yining Chen, Harri Edwards, Jan Leike, Nat McAleese, and Yuri Burda. Prover-verifier games improve legibility of llm outputs. arXiv preprint arXiv:2407.13692, 2024. +Robert Kirk, Ishita Mediratta, Christoforos Nalmpantis, Jelena Luketina, Eric Hambro, Edward Grefenstette, and Roberta Raileanu. Understanding the effects of rlhf on llm generalisation and diversity. In The Twelfth International Conference on Learning Representations. +Aviral Kumar, Vincent Zhuang, Rishabh Agarwal, Yi Su, John D Co-Reyes, Avi Singh, Kate Baumli, Shariq Iqbal, Colton Bishop, Rebecca Roelofs, et al. Training language models to self-correct via reinforcement learning. arXiv preprint arXiv:2409.12917, 2024. +Nathan Lambert, Valentina Pyatkin, Jacob Morrison, LJ Miranda, Bill Yuchen Lin, Khyathi Chandu, Nouha Dziri, Sachin Kumar, Tom Zick, Yejin Choi, et al. Rewardbench: Evaluating reward models for language modeling. arXiv preprint arXiv:2403.13787, 2024. +Pat Langley, Kirstin Cummings, and Daniel Shapiro. Hierarchical skills and cognitive architectures. In Proceedings of the annual meeting of the cognitive science society, volume 26, 2004. + +Aitor Lewkowycz, Anders Andreassen, David Dohan, Ethan Dyer, Henryk Michalewski, Vinay Ramasesh, Ambrose Slone, Cem Anil, Imanol Schlag, Theo Gutman-Solo, et al. Solving quantitative reasoning problems with language models. Advances in Neural Information Processing Systems, 35:3843-3857, 2022. +Ming Li, Jiuhai Chen, Lichang Chen, and Tianyi Zhou. Can llms speak for diverse people? tuning llms via debate to generate controllable controversial statements. arXiv preprint arXiv:2402.10614, 2024. +Tian Liang, Zhiwei He, Wenxiang Jiao, Xing Wang, Yan Wang, Rui Wang, Yujiu Yang, Shuming Shi, and Zhaopeng Tu. Encouraging divergent thinking in large language models through multiagent debate. arXiv preprint arXiv:2305.19118, 2023. +Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. arXiv preprint arXiv:2305.20050, 2023. +Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024. +Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective. CoRR, abs/2503.20783, 2025. doi: 10.48550/ARXIV.2503.20783. URL https://doi.org/10.48550/arXiv.2503.20783. +Chengdong Ma, Ziran Yang, Minquan Gao, Hai Ci, Jun Gao, Xuehai Pan, and Yaodong Yang. Red teaming game: A game-theoretic framework for red teaming language models. arXiv preprint arXiv:2310.00322, 2023. +Hao Ma, Tianyi Hu, Zhiqiang Pu, Boyin Liu, Xiaolin Ai, Yanyan Liang, and Min Chen. Coevolving with the other you: Fine-tuning LLM with sequential cooperative multi-agent reinforcement learning. CoRR, abs/2410.06101, 2024. doi: 10.48550/ARXIV.2410.06101. URL https://doi.org/10.48550/arXiv.2410.06101. +Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegrefe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, et al. Self-refine: Iterative refinement with self-feedback. Advances in Neural Information Processing Systems, 36:46534-46594, 2023. +Dakota Mahan, Duy Van Phung, Rafael Rafailov, Chase Blagden, Nathan Lile, Louis Castricato, Jan-Philipp Franken, Chelsea Finn, and Alon Albalak. Generative reward models. arXiv preprint arXiv:2410.12832, 2024. +Sumeet Ramesh Motwani, Chandler Smith, Rocktim Jyoti Das, Markian Rybchuk, Philip H. S. Torr, Ivan Laptev, Fabio Pizzati, Ronald Clark, and Christian Schroeder de Witt. Malt: Improving reasoning with multi-agent llm training, 2024. URL https://arxiv.org/abs/2412.01928. +Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025. +OpenAI. Openai o1 system card, 2024. URL https://openai.com/ol/. +Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in neural information processing systems, 35: 27730-27744, 2022. +Chanwoo Park, Seungju Han, Xingzhi Guo, Asuman Ozdaglar, Kaiqing Zhang, and Joo-Kyung Kim. Maporl: Multi-agent post-co-training for collaborative large language models with reinforcement learning. 2025. URL https://arxiv.org/abs/2502.18439. + +Ethan Perez, Saffron Huang, Francis Song, Trevor Cai, Roman Ring, John Aslanides, Amelia Glaese, Nat McAleese, and Geoffrey Irving. Red teaming language models with language models. arXiv preprint arXiv:2202.03286, 2022. +Israel Puerta-Merino, Carlos Núñez-Molina, Pablo Mesejo, and Juan Fernández-Olivares. A roadmap to guide the integration of llms in hierarchical planning. arXiv preprint arXiv:2501.08068, 2025. +Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual reasoning makes smaller llms stronger problem-solvers. arXiv preprint arXiv:2408.06195, 2024. +Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, and Pengfei Liu. O1 replication journey: A strategic progress report - part 1, 2024. URL https://arxiv.org/abs/2410.18982. +Lv Qingsong, Yangning Li, Zihua Lan, Zishan Xu, Jiwei Tang, Yinghui Li, Wenhao Jiang, Hai-Tao Zheng, and Philip S. Yu. Raise: Reinforenced adaptive instruction selection for large language models, 2025. +Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023. +Krishan Rana, Jesse Haviland, Sourav Garg, Jad Abou-Chakra, Ian Reid, and Niko Suenderhauf. Sayplan: Grounding large language models using 3d scene graphs for scalable robot task planning. arXiv preprint arXiv:2307.06135, 2023. +Swarnadeep Saha, Xian Li, Marjan Ghazvininejad, Jason Weston, and Tianlu Wang. Learning to plan & reason for evaluation with thinking-llm-as-a-judge. arXiv preprint arXiv:2501.18099, 2025a. +Swarnadeep Saha, Xian Li, Marjan Ghazvininejad, Jason Weston, and Tianlu Wang. Learning to plan & reason for evaluation with thinking-llm-as-a-judge, 2025b. URL https://arxiv.org/abs/2501.18099. +John Schulman, Sergey Levine, Pieter Abbeel, Michael I. Jordan, and Philipp Moritz. Trust region policy optimization. In Francis R. Bach and David M. Blei, editors, Proceedings of the 32nd International Conference on Machine Learning, ICML 2015, Lille, France, 6-11 July 2015, volume 37 of JMLR Workshop and Conference Proceedings, pages 1889-1897. JMLR.org, 2015. URL http://proceedings.mlr.press/v37/schulman15.html. +John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. CoRR, abs/1707.06347, 2017. URL http://arxiv.org/abs/1707.06347. +Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. +Maohao Shen, Guangtao Zeng, Zhenting Qi, Zhang-Wei Hong, Zhenfang Chen, Wei Lu, Gregory Wornell, Subhro Das, David Cox, and Chuang Gan. Satori: Reinforcement learning with chain-of-action-thought enhances llm reasoning via autoregressive search, 2025. URL https:// arxiv.org/abs/2502.02508. +Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv: 2409.19256, 2024. +Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024. +Chan Hee Song, Jiaman Wu, Clayton Washington, Brian M Sadler, Wei-Lun Chao, and Yu Su. Llm-planner: Few-shot grounded planning for embodied agents with large language models. In Proceedings of the IEEE/CVF international conference on computer vision, pages 2998-3009, 2023. + +Dimitris Stripelis, Zijian Hu, Jipeng Zhang, Zhaozhuo Xu, Alay Dilipbhai Shah, Han Jin, Yuhang Yao, Salman Avestimehr, and Chaoyang He. Tensoropera router: A multi-model router for efficient llm inference. arXiv preprint arXiv:2408.12320, 2024. +Vighnesh Subramaniam, Yilun Du, Joshua B. Tenenbaum, Antonio Torralba, Shuang Li, and Igor Mordatch. Multiagent finetuning: Self improvement with diverse reasoning chains, 2025. URL https://arxiv.org/abs/2501.05707. +Chuanneng Sun, Songjun Huang, and Dario Pompili. Retrieval-augmented hierarchical in-context reinforcement learning and hindsight modular reflections for task planning with llms. arXiv preprint arXiv:2408.06520, 2024. +Richard Sutton. The bitter lesson. Incomplete Ideas (blog), 13(1):38, 2019. +Sijun Tan, Siyuan Zhuang, Kyle Montgomery, William Y Tang, Alejandro Cuadron, Chenguang Wang, Raluca Ada Popa, and Ion Stoica. Judgebench: A benchmark for evaluating llm-based judges. arXiv preprint arXiv:2410.12784, 2024. +Xiangru Tang, Anni Zou, Zhuosheng Zhang, Ziming Li, Yilun Zhao, Xingyao Zhang, Arman Cohan, and Mark Gerstein. Medagents: Large language models as collaborators for zero-shot medical reasoning. arXiv preprint arXiv:2311.10537, 2023. +Qwen Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwenlm.github.io/blog/qwen2.5/. +Jun Wang, Meng Fang, Ziyu Wan, Muning Wen, Jiachen Zhu, Anjie Liu, Ziqin Gong, Yan Song, Lei Chen, Lionel M Ni, et al. Openr: An open source framework for advanced reasoning with large language models. arXiv preprint arXiv:2410.09671, 2024a. +Tianlu Wang, Ilia Kulikov, Olga Golovneva, Ping Yu, Weizhe Yuan, Jane Dwivedi-Yu, Richard Yuanzhe Pang, Maryam Fazel-Zarandi, Jason Weston, and Xian Li. Self-taught evaluators. arXiv preprint arXiv:2408.02666, 2024b. +Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. arXiv preprint arXiv:2203.11171, 2022. +Yuqing Wang and Yun Zhao. Metacognitive prompting improves understanding in large language models. arXiv preprint arXiv:2308.05342, 2023. +Zhenhailong Wang, Shaoguang Mao, Wenshan Wu, Tao Ge, Furu Wei, and Heng Ji. Unleashing the emergent cognitive synergy in large language models: A task-solving agent through multi-personal self-collaboration. arXiv preprint arXiv:2307.05300, 2023. +Zihan Wang, Kangrui Wang, Qineng Wang, Pingyue Zhang, Linjie Li, Zhengyuan Yang, Kefan Yu, Minh Nhat Nguyen, Licheng Liu, Eli Gottlieb, Monica Lam, Yiping Lu, Kyunghyun Cho, Jiajun Wu, Li Fei-Fei, Lijuan Wang, Yejin Choi, and Manling Li. Ragen: Understanding self-evolution in llm agents via multi-turn reinforcement learning, 2025. +Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022. +Sean Welleck, Ximing Lu, Peter West, Faeze Brahman, Tianxiao Shen, Daniel Khashabi, and Yejin Choi. Generating sequences by learning to self-correct. arXiv preprint arXiv:2211.00053, 2022. +Muning Wen, Ziyu Wan, Weinan Zhang, Jun Wang, and Ying Wen. Reinforcing language agents via policy optimization with action decomposition. CoRR, abs/2405.15821, 2024. doi: 10.48550/ ARXIV.2405.15821. URL https://doi.org/10.48550/arXiv.2405.15821. +Zhiheng Xi, Dingwen Yang, Jixuan Huang, Jiafu Tang, Guanyu Li, Yiwen Ding, Wei He, Boyang Hong, Shihan Do, Wenyu Zhan, et al. Enhancing llm reasoning via critique models with test-time and training-time supervision. arXiv preprint arXiv:2411.16579, 2024. + +Violet Xiang, Charlie Snell, Kanishk Gandhi, Alon Albalak, Anikait Singh, Chase Blagden, Duy Phung, Rafael Rafailov, Nathan Lile, Dakota Mahan, et al. Towards system 2 reasoning in llms: Learning how to think with meta chain-of-though. arXiv preprint arXiv:2501.04682, 2025. +Yihang Xiao, Jinyi Liu, Yan Zheng, Xiaohan Xie, Jianye Hao, Mingzhi Li, Ruitao Wang, Fei Ni, Yuxiao Li, Jintian Luo, et al. Cellagent: An llm-driven multi-agent framework for automated single-cell data analysis. BioRxiv, pages 2024-05, 2024. +Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768, 2025. +Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, et al. Towards large reasoning models: A survey of reinforced reasoning with large language models. arXiv preprint arXiv:2501.09686, 2025. +Prateek Yadav, Tu Vu, Jonathan Lai, Alexandra Chronopoulou, Manaal Faruqui, Mohit Bansal, and Tsendsuren Munkhdalai. What matters for model merging at scale? arXiv preprint arXiv:2410.03617, 2024. +Xue Yan, Yan Song, Xinyu Cui, Filippos Christianos, Haifeng Zhang, David Henry Mguni, and Jun Wang. Ask more, know better: Reinforce-learned prompt questions for decision making with large language models. arXiv preprint arXiv:2310.18127, 2023. +An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, Keming Lu, Mingfeng Xue, Runji Lin, Tianyu Liu, Xingzhang Ren, and Zhenru Zhang. Qwen2.5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024. +Ling Yang, Zhaochen Yu, Bin Cui, and Mengdi Wang. Reasonflux: Hierarchical llm reasoning via scaling thought templates. arXiv preprint arXiv:2502.06772, 2025. +Guanghao Ye, Khiem Duc Pham, Xinzhi Zhang, Sivakanth Gopi, Baolin Peng, Beibin Li, Janardhan Kulkarni, and Huseyin A Inan. On the emergence of thinking in llms i: Searching for the right intuition. arXiv preprint arXiv:2502.06773, 2025a. +Peijun Ye, Tao Wang, and Fei-Yue Wang. A survey of cognitive architectures in the past 20 years. IEEE transactions on cybernetics, 48(12):3280-3290, 2018. +Yaowen Ye, Cassidy Laidlaw, and Jacob Steinhardt. Iterative label refinement matters more than preference optimization under weak supervision. arXiv preprint arXiv:2501.07886, 2025b. +Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning. arXiv preprint arXiv:2502.03387, 2025c. +Le Yu, Bowen Yu, Haiyang Yu, Fei Huang, and Yongbin Li. Language models are super mario: Absorbing abilities from homologous models as a free lunch. In *Forty-first International Conference on Machine Learning*, 2024. +Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, Haibin Lin, Zhiqi Lin, Bole Ma, Guangming Sheng, Yuxuan Tong, Chi Zhang, Mofan Zhang, Wang Zhang, Hang Zhu, Jinhua Zhu, Jiaze Chen, Jiangjie Chen, Chengyi Wang, Hongli Yu, Weinan Dai, Yuxuan Song, Xiangpeng Wei, Hao Zhou, Jingjing Liu, Wei-Ying Ma, Ya-Qin Zhang, Lin Yan, Mu Qiao, Yonghui Wu, and Mingxuan Wang. Dapo: An open-source llm reinforcement learning system at scale, 2025. +Murong Yue, Wenlin Yao, Haitao Mi, Dian Yu, Ziyu Yao, and Dong Yu. Dots: Learning to reason dynamically in llms via optimal reasoning trajectories search. In The Thirteenth International Conference on Learning Representations. +Murong Yue, Wenlin Yao, Haitao Mi, Dian Yu, Ziyu Yao, and Dong Yu. DOTS: learning to reason dynamically in llms via optimal reasoning trajectories search. CoRR, abs/2410.03864, 2024. doi: 10.48550/ARXIV.2410.03864. URL https://doi.org/10.48550/arXiv.2410.03864. + +Yanwei Yue, Guibin Zhang, Boyang Liu, Guancheng Wan, Kun Wang, Dawei Cheng, and Yiyan Qi. Masrouter: Learning to route llms for multi-agent systems. arXiv preprint arXiv:2502.11133, 2025a. +Yu Yue, Yufeng Yuan, Qiying Yu, Xiaochen Zuo, Ruofei Zhu, Wenyuan Xu, Jiaze Chen, Chengyi Wang, TianTian Fan, Zhengyin Du, Xiangpeng Wei, Xiangyu Yu, Gaohong Liu, Juncai Liu, Lingjun Liu, Haibin Lin, Zhiqi Lin, Bole Ma, Chi Zhang, Mofan Zhang, Wang Zhang, Hang Zhu, Ru Zhang, Xin Liu, Mingxuan Wang, Yonghui Wu, and Lin Yan. Vapo: Efficient and reliable reinforcement learning for advanced reasoning tasks, 2025b. +Weihao Zeng, Yuzhen Huang, Qian Liu, Wei Liu, Keqing He, Zejun Ma, and Junxian He. Simplerl-zoo: Investigating and taming zero reinforcement learning for open base models in the wild, 2025. +Di Zhang, Jianbo Wu, Jingdi Lei, Tong Che, Jiatong Li, Tong Xie, Xiaoshui Huang, Shufei Zhang, Marco Pavone, Yuqiang Li, Wanli Ouyang, and Dongzhan Zhou. Llama-berry: Pairwise optimization for o1-like olympiad-level mathematical reasoning, 2024a. URL https://arxiv.org/abs/2410.02884. +Hangfan Zhang, Zhiyao Cui, Xinrun Wang, Qiaosheng Zhang, Zhen Wang, Dinghao Wu, and Shuyue Hu. If multi-agent debate is the answer, what is the question? arXiv preprint arXiv:2502.08788, 2025a. +Jiayi Zhang, Jinyu Xiang, Zhaoyang Yu, Fengwei Teng, Xionghui Chen, Jiaqi Chen, Mingchen Zhuge, Xin Cheng, Sirui Hong, Jinlin Wang, et al. Aflow: Automating agentic workflow generation. arXiv preprint arXiv:2410.10762, 2024b. +Xiaotian Zhang, Chunyang Li, Yi Zong, Zhengyu Ying, Liang He, and Xipeng Qiu. Evaluating the performance of large language models on gaokao benchmark. arXiv preprint arXiv:2305.12474, 2023. +Yiqun Zhang, Peng Ye, Xiaocui Yang, Shi Feng, Shufei Zhang, Lei Bai, Wanli Ouyang, and Shuyue Hu. Nature-inspired population-based evolution of large language models. arXiv preprint arXiv:2503.01155, 2025b. +Rosie Zhao, Alexandru Meterez, Sham Kakade, Cengiz Pehlevan, Samy Jelassi, and Eran Malach. Echo chamber: Rl post-training amplifies behaviors learned in pretraining, 2025. +Yu Zhao, Huifeng Yin, Bo Zeng, Hao Wang, Tianqi Shi, Chenyang Lyu, Longyue Wang, Weihua Luo, and Kaifu Zhang. Marco-ol: Towards open reasoning models for open-ended solutions, 2024. URL https://arxiv.org/abs/2411.14405. +Yifei Zhou, Andrea Zanette, Jiayi Pan, Sergey Levine, and Aviral Kumar. Archer: Training language model agents via hierarchical multi-turn rl, 2024. +Mingchen Zhuge, Haozhe Liu, Francesco Faccio, Dylan R Ashley, Róbert Csordás, Anand Gopalakrishnan, Abdullah Hamdi, Hasan Abed Al Kader Hammoud, Vincent Herrmann, Kazuki Irie, et al. Mindstorms in natural language-based societies of mind. arXiv preprint arXiv:2305.17066, 2023. + +# Appendix Table of Contents + +- A Related work 19 + +-A.1 Single LLM Reasoning 19 +-A.2MultipleLLMReasoning 20 +-A.3 Hierarchical Reasoning 20 +-A.4RL in LLM 21 + +B Limitation and Future Work 21 + +C Supplementary Materials for Method in Section 3 21 + +- C.1 Inference-time Scaling For ReMA 21 +- C.2 Detailed reward design 22 +- C.3 Pseudocode of ReMA 23 +- C.4 Brief convergence analysis 23 +- C.5 Learning to reason from the perspective of Leader Follower Game 24 + +D Training Details 26 + +- D.1 Single-turn ReMA 26 + +\* D.1.1 Supervised fine-tuning data collection 27 +\* D.1.2 Dataset Curation of RewardBench970 27 +\*D.1.3 Training on MATH 28 +\* D.1.4 Training on Reward Bench 28 + +- D.2 Multi-turn ReMA 28 + +\* D.2.1 SFT data collection of multi-turn MAMRP 29 +\* D.2.2 Training on MATH 29 + +E Other Experiments 29 + +-E.1 Reward functions shape cross-agent behaviors 29 +- E.2 Detailed Training Curves on Different Datasets of Multi-turn ReMA 30 + +F Qualitative results 30 + +- F.1 High-level policy finds better plans 30 +- F.2 Case study for Experiments of Different Reward Functions in Appendix E.1 .30 +- F.3 Case study for Adaptive Meta-thinking in Single-Turn ReMA in Section 4.2.2 30 + +G Prompts 31 + +# A Related work + +Drawing from the bitter lesson [Sutton, 2019], two methods that appear to scale effectively are searching and learning, aligning with current trends in large language models [Xu et al., 2025]. At present, researchers are leveraging these methods to maximize the capabilities of individual transformers, while other efforts are exploring architectures that involve multiple interacting entities. In this paper, we examine this divergence within the context of LLM reasoning, a capability that allows large language models to solve problems through logical reasoning, step-by-step analysis, and inference [Wang et al., 2024a]. + +# A.1 Single LLM Reasoning + +Main research works in reasoning involving a single LLM utilize search-based and post-training methods. The fundamental elements of searching methods are text generation and evaluation. Generation schemes include In-Context Learning [Brown et al., 2020], Beam Search [Graves, 2012], and various tree-based searching [Snell et al., 2024]; Evaluation approaches often use outcome accuracy, self-consistency [Wang et al., 2022], or process reward signal [Lightman et al., 2023] as the criteria to select high-quality responses from the generated texts. Post-training method is another research line in opposition to pre-training. Popular training pipelines often involve specific data construction followed by Supervised Fine-tuning [Qin et al., 2024, Ouyang et al., 2022, Hui et al., 2024, Liu et al., 2024], or reinforcement learning to interactively explore learning patterns [Wang et al., 2024a, Zhang et al., 2024a, DeepSeek-AI et al., 2025, Xu et al., 2025]. + +# A.2 Multiple LLM Reasoning + +Integrating multiple entities can potentially surpass the intelligence of the individual model [Chen et al., 2023]. With the rapid emergence of large language models showing a varying level of abilities, some studies have explored facilitating discussions among multiple off-the-shelf LLMs [Zhang et al., 2025a, Chen et al., 2024a, Wang et al., 2023, Du et al., 2023, Zhuge et al., 2023, Tang et al., 2023, Hao et al., 2025, Akata et al., 2023, Hong et al., 2023, Zhang et al., 2024b], taking the form of free discussion [Du et al., 2023, Liang et al., 2023] or structured role assignments [Hong et al., 2023, Zhang et al., 2024b]. Some have applied routing mechanisms to assign tasks to the most suitable expert models [Hu et al., 2024b, Stripelis et al., 2024, Ding et al., 2024, Yue et al., 2025a, Chen et al., 2024c] or merging mechanisms to develop more versatile models [Yadav et al., 2024, Yu et al., 2024, Zhang et al., 2025b]. Beyond aggregating static knowledge from multiple agents, multi-agent LLM training can also enhance reasoning capabilities. For example, multi-agent debates can generate diverse synthetic data, which can subsequently be used for supervised fine-tuning [Estornell et al., 2024, Li et al., 2024, Motwani et al., 2024, Dong and Ma, 2025, Perez et al., 2022, Ye et al., 2025a, Subramaniam et al., 2025]. Reinforcement learning (RL) methods have also been adopted to improve LLM reasoning in areas such as alignment [Perez et al., 2022, Ma et al., 2023] and legibility [Kirchner et al., 2024]. Motwani et al. [2024] utilize a three-agent system for generation and fine-tune the models using Direct Preference Optimization (DPO). Reinforcement Learning with Generative Reward Models (GenRM) [Mahan et al., 2024, Ye et al., 2025b, Jiao et al., 2024, Wang et al., 2024b] represents another common approach of multi-agent training, where the reward signal is derived from the token probabilities of another LLM, coupled with the reasoning process. While our work aligns with these efforts, it diverges by using an additional tunable LLM to provide metacognitive instructions, guiding the low-level LLM during learning, rather than relying on a static GenRM. The most closely related works to ours are MAPoRL [Park et al., 2025] and COPYR [Ma et al., 2024]. MAPoRL is a multi-agent debating framework that uses multi-agent reinforcement learning (MARL) with a learned verifier to fine-tune each LLM agent. COPYR duplicates an LLM into two agents, training them simultaneously in the roles of pioneer and observer using RL. Shen et al. [2025] trained with a novel Chain-of-Action-Thought (COAT) framework that embeds meta-action tokens for self-reflection and exploration into an autoregressive search process. However, unlike our approach, which explicitly separates metacognition from plan execution, these methods do not decompose the reasoning process but instead focus on improving direct chain-of-thought generation. Furthermore, our experiments are conducted on a larger scale and include more challenging problems. + +# A.3 Hierarchical Reasoning + +Partitioning reasoning into hierarchical processes has been explored in prior research to make biological sense [Ye et al., 2018, Langley et al., 2004]. In the context of language models, a hierarchical structure has been used to facilitate diverse reasoning patterns, including planning [Puerta-Merino et al., 2025, Sun et al., 2024, Song et al., 2023, Rana et al., 2023, Chen et al., 2024d, Yan et al., 2023, Xiao et al., 2024], validation [Haji et al., 2024, Xi et al., 2024] and self-refinement [Madaan et al., 2023, Kumar et al., 2024, Welleck et al., 2022]. For instance, EvalPlanner [Saha et al., 2025b] is a framework that conducts reasoning through plan generation and execution. DOTS [Yue et al., 2024] extends decomposition by integrating a tree-based searching method with Analysis, Solution, and Verification layers. Marco-o1 [Zhao et al., 2024] focuses on open-ended problem-solving and abstract thinking, dynamically adjusting reasoning granularity and incorporating reflection mechanisms to enhance reasoning performance. Beyond these approaches, metacognition [Flavell, 1979] has been identified as another critical component of reasoning, referring to the intuitive understanding of one's own cognitive and reasoning processes [Gao et al., 2024, Wang and Zhao, 2023]. Wang and Zhao [2023] proposed a metacognitive prompting strategy to improve large language model (LLM) capabilities. Didolkar et al. [2024] further developed a prompt-guided method that enables models to label math problems with the required skills and subsequently use these labels to solve new problems. Gao et al. [2024] introduce meta-reasoner which use contextual multi-arm bandit to learn a high-level "advisor" over low-level reasoning process. Xiang et al. [2025] provides a Meta-CoT framework to think about its own thinking. They use construction-based methods as well as reinforcement learning to develop meta-cognitive skills. Qingsong et al. [2025] introduces a RL framework for dynamic instruction selection during fine-tuning. In our work, we also value reflect- + +ing on reasoning processes, and we enhance metacognitive abilities through two-agent interaction and reinforcement learning at both end. + +# A.4 RL in LLM + +Recent advancements in applying RL to LLMs have enhanced their reasoning and decision-making capabilities. Liu et al. [2025] examines token-level optimization biases by introducing Dr. GRPO to stabilize policy gradients. VAPO [Yue et al., 2025b] enhances PPO with value-aware perturbations and adaptive reward shaping to improve robustness in sparse-reward reasoning tasks. DAPO [Yu et al., 2025] provides a scalable, modular RL framework that integrates distributed rollout collection and dynamic replay buffers for reproducible training at scale. SimpleRL-Zoo [Zeng et al., 2025] conducts zero-shot RL experiments across open-base LLMs to uncover emergent cognitive behaviors under minimal reward signals. Echo Chamber [Zhao et al., 2025] investigates how RL fine-tuning algorithms can amplify pretrained model biases and proposes regularization to mitigate over-amplification. Wen et al. [2024] decomposes high-level language actions into token-level operations to achieve finer-grained credit assignment. Some works push RL training for single-turn to multi-turn. Search-R1 [Jin et al., 2025] trains LLMs to orchestrate multi-turn search strategies with RL-optimized decision policies to improve question-answering accuracy. ArCHer [Zhou et al., 2024] employs a hierarchical, multi-turn RL architecture with manager and worker policies to efficiently handle long-horizon dialogue tasks. RAGEN [Wang et al., 2025] introduces trajectory filtering and critic modules within a multi-turn RL framework to stabilize learning and reduce shallow policy behaviors. + +# B Limitation and Future Work + +In this work, we only test ReMA on math and LLM-as-a-Judge benchmarks. Though the results show the effectiveness of ReMA, adopting ReMA to tasks where naturally needs multi-turn interaction between several interleaved agents has great potential. Moreover, a comprehensive understanding of the learning dynamics of multi-turn RL and multi-turn MARL for LLMs is needed. Finally, there's still sufficient space to further improve the procedure of multi-turn multi-agent rollout through modern LLM speed up techniques, e.g. prefetch-decode disaggregation and asynchronous rollout. + +# C Supplementary Materials for Method in Section 3 + +# C.1 Inference-time Scaling of ReMA + +In this section, we discuss how to enhance the inference-time computation of our hierarchical system, specifically focusing on the interaction between the high-level and low-level agents. The total number of model samples required for inference is determined by the product of the sampling budget allocated to each agent. + +For instance, in a simple single-turn setting, if the high-level agent samples $k_{1}$ responses and each of these responses leads to $k_{2}$ samples from the low-level agent, the total number of model calls required is: + +$$ +\text {T o t a l s a m p l e s} = k _ {1} \times k _ {2}. +$$ + +Given a fixed computational budget, an important question arises: how should the sampling budget be distributed between the high-level and low-level agents to maximize performance? Allocating more samples to the high-level agent may increase diversity in reasoning strategies while allocating more to the low-level agent may yield more refined solutions for a given metacognitive plan. + +Another crucial consideration is how to perform reranking on the final outputs. Two potential strategies include: + +- Hierarchical reranking: First, for each high-level response, rank and aggregate the low-level responses under it. Then, rank the aggregated results across different high-level responses. + +- Flat reranking: Directly rank all sampled responses together, regardless of the hierarchy of high-level reasoning steps. + +Balancing sampling allocation and designing an effective reranking strategy are key challenges in efficiently scaling our multi-agent reasoning system. In the next section, we explore empirical results comparing different allocation strategies and ranking methods. + +# C.2 Detailed reward design + +As described in Sec. 3.2, we update both high-level and low-level agents by assigning rewards based on the low-level policy output. Below, we outline several potential reward designs: + +- Correctness reward: For tasks with explicit ground truth, we assign rewards based on the correctness of the low-level agent's output. +- Format reward: For tasks that require a specific output format, we enforce adherence to the prescribed structure by providing a format reward. +- To encourage the high-level agent to generate informative and unambiguous meta-thinking, and to stabilize the low-level outputs, we reward the high-level agent when the low-level agent produces consistent responses. Specifically, the consistency reward is defined as + +$$ +R _ {h} = \frac {\text {m a x o c c u r r e n c e o f a n a n s w e r}}{\text {t o t a l n u m b e r o f r e s p o n s e s}}. +$$ + +To examine multi-agent metacognition-integrated reasoning with different reward designs, we experiment with different reward function designs to encourage effective collaboration and structured reasoning. Below, we introduce and justify several reward schemes. + +1. Correctness and Format-Aware Reward (Base Setting) In our primary reward setting, the system's overall correctness is used as the primary reward signal, supplemented by format-based rewards for both the high-level and low-level agents. Using mathematical problem-solving as an example: + +- Low-level agent $(\pi_{\theta_l})$ : Receives a reward of $+1.0$ for a correct answer. If the answer is incorrect, the agent is further penalized based on format compliance. Specifically: + +- If the output contains the designated answer-indicating format (e.g., boxed in Latex), it receives $-0.5$ . +- Otherwise, it receives $-1.0$ , as a missing format often suggests an incomplete or unstructured response. + +- High-level agent $(\pi_{\theta_h})$ : Receives the average correctness of the low-level agent's sampled responses as its reward. Additionally, to prevent the high-level agent from directly generating explicit answers instead of guiding reasoning, a strong penalty of $-1.0$ is applied if it includes an explicit answer format (e.g., boxed). + +2. Consistency-Based Reward Instead of using correctness as the high-level reward signal, this approach rewards the high-level agent for promoting consistent responses from the low-level agent, regardless of actual correctness. The consistency reward is defined as the proportion of the most frequently occurring answer among all sampled responses: + +$$ +R _ {h} = \frac {\text {m a x o c c u r r e n c e o f a n a n s w e r}}{\text {t o t a l n u m b e r o f r e s p o n s e s}} \tag {14} +$$ + +If the majority of responses do not contain a definitive answer, the reward is set to zero. We also add the format penalty to the high-level agent if its output contains the designated answer-indicating format. This incentivizes the high-level agent to guide the low-level agent toward more stable, detailed, reproducible outputs rather than erratic reasoning paths. + +These different reward formulations allow us to investigate various dimensions of metacognitive reasoning: correctness, consistency, etc. We empirically compare their effects on learned metacognitive reasoning patterns in Sec. E.1. + +![](images/315cafbb1d2055dded17eaabf88749be03cfea772911a86fa7b194d2357e1c64.jpg) +Figure 7: Our method can be viewed as a combination of practical TRPO and block coordinate ascent, with the high and low-level models treated as distinct components within a larger neural network. Note that the figure does not represent the exact gradient back-propagation flow but rather highlights the key idea that we separate the high- and low-level models. This separation allows for the independent computation of gradients and the independent training of each model. + +# C.3 Pseudocode of ReMA + +The pseudocode is shown in Algorithm 1. + +Algorithm 1 Single turn MAMRP +Require: High-level policy $\pi_h$ , Low-level policy $\pi_l$ , Dataset $\mathcal{D}$ , Optimizers for $\pi_h$ and $\pi_l$ . $\varepsilon_{\mathrm{min}}, \varepsilon_{\mathrm{max}}$ to filter training dataset +1: Initialize $\pi_h$ and $\pi_l$ +2: while not converged do +3: build training dataset $\mathcal{D}_l$ with $\pi_h, \pi_l, \varepsilon_{\mathrm{min}}, \varepsilon_{\mathrm{max}}$ +4: for Sample $(\mathbf{x}, \mathbf{m}, \mathbf{y}^*) \sim \mathcal{D}_l$ do +5: Generate $\mathbf{y} \sim \pi_l(\mathbf{x}, \mathbf{m})$ +6: Compute low-level reward $R_l(\mathbf{y}, \mathbf{y}^*)$ +7: Update $\pi_l$ using $\nabla_{\theta_l} \mathbb{E}[R_l]$ +8: end for +9: build training dataset $\mathcal{D}_h$ with $\pi_h, \pi_l, \varepsilon_{\mathrm{min}}, \varepsilon_{\mathrm{max}}$ +10: for Sample $(\mathbf{x}, \mathbf{y}^*) \sim \mathcal{D}_h$ do +11: Generate $\mathbf{m} \sim \pi_h(\mathbf{x})$ and $\mathbf{y} \sim \pi_l(\mathbf{x}, \mathbf{m})$ +12: Compute high-level reward $R_h(\mathbf{m}, \mathbf{y}, \mathbf{y}^*)$ +13: Update $\pi_h$ using $\nabla_{\theta_h} \mathbb{E}[R_h]$ +14: end for +15: end while + +# C.4 Brief convergence analysis + +We reuse the notations from Sec. 3.2, where $\mathbf{x}$ is task prompt, $\mathbf{y}$ is generated answer, $\mathbf{y}^*$ is groundtruth, $\mathbf{m}$ is metacognition on task solving, $\pi_{\theta_h}$ and $\pi_{\theta_l}$ are high- and low-level agents with parameters $\theta_h$ and $\theta_l$ . We consider the joint hierarchical policy defined in Eq. (8) and update the objective as in Eq. (9). + +To leverage existing RL and optimization convergence analysis methods, we treat the two models as components of a larger model, as illustrated in Fig. 7. When updating one model, we treat the other + +model as part of a stationary environment. The gradients with respect to $\theta_h$ and $\theta_l$ are: + +$$ +\nabla_ {\theta_ {h}} J (\theta_ {h}, \theta_ {l}) = \mathbb {E} _ {\mathbf {x}, \mathbf {y} ^ {*}} \sum_ {\mathbf {m} \sim \pi_ {h} (\mathbf {m} | \mathbf {x}; \theta_ {h})} \nabla_ {\theta_ {h}} \pi_ {h} (\mathbf {m} | \mathbf {x}; \theta_ {h}) \left[ \mathbb {E} _ {\mathbf {y} \sim \pi_ {l} (\mathbf {y} | \mathbf {x}, \mathbf {m})} R (\mathbf {y}, \mathbf {y} ^ {*}) \right], +$$ + +$$ +\nabla_ {\theta_ {l}} J (\theta_ {h}, \theta_ {l}) = \mathbb {E} _ {\mathbf {x}, \mathbf {y} ^ {*}} \sum_ {\mathbf {y} \sim \pi (\theta_ {h}, \theta_ {l})} \nabla_ {\theta_ {l}} \pi_ {l} (\mathbf {y} \mid \mathbf {x}, \mathbf {m}; \theta_ {h}); \theta_ {l}) R (\mathbf {y}, \mathbf {y} ^ {*}). +$$ + +We can compute the gradients with log trick and estimate $\mathbb{E}_{\mathbf{y}\sim \pi_l(\mathbf{y}|\mathbf{x},\mathbf{m})}R(\mathbf{y},\mathbf{y}^*)$ with Monte Carlo method. + +Equipped with the objective function and gradient computation, we update the models iteratively. Without loss of generality, we analyze the case where the high-level policy is updated first: + +$$ +\theta_ {h} ^ {(t + 1)} = \arg \max _ {\theta_ {h}} J (\theta_ {h}, \theta_ {l} ^ {(t)}), +$$ + +$$ +\theta_ {l} ^ {(t + 1)} = \arg \max _ {\theta_ {l}} J \left(\theta_ {h} ^ {(t + 1)}, \theta_ {l}\right). +$$ + +Regarding the different regularizations $R_{h}$ and $R_{l}$ in Eqs. (10) and (11) for the different policies, instead of directly integrating them into the loss function, we treat them as constraints, as done in Trust Region Policy Optimization (TRPO) [Schulman et al., 2015]. Note that when one policy is fixed, the other policy operates in a stationary decision process. + +Based on the defined objective and update method, we apply TRPO and block coordinate ascent. First, recall that when updating a single policy, TRPO guarantees monotonic improvement by optimizing a lower bound. Specifically, let $\pi_{\mathrm{old}}$ and $\pi$ represent the old and current policies, respectively. We define a surrogate objective as: + +$$ +L _ {\pi_ {\mathrm {o l d}}} (\pi) = \mathbb {E} _ {s \sim \pi_ {\mathrm {o l d}}, a \sim \pi_ {\mathrm {o l d}}} \left[ \frac {\pi (a | s)}{\pi_ {\mathrm {o l d}} (a | s)} A ^ {\pi_ {\mathrm {o l d}}} (s, a) \right], +$$ + +As shown by Schulman et al. [2015], the true objective of $\pi$ is lower-bounded by: + +$$ +J (\pi) \geq L _ {\pi_ {\mathrm {o l d}}} (\pi) - C \cdot \max _ {s} \mathrm {K L} [ \pi_ {\mathrm {o l d}} (\cdot | s), \pi (\cdot | s) ], +$$ + +for some constant $C$ . By optimizing the right-hand side of the above inequality, we are guaranteed to improve the performance of $\pi$ . Therefore, for policies $\pi^t$ and $\pi^{t + 1}$ obtained from iterations $t$ and $t + 1$ using the TRPO method, we have: + +$$ +J (\pi^ {t + 1}) \geq J (\pi^ {t}). +$$ + +Now, returning to our updating method, we treat the high- and low-level policies as two blocks of a single agent. The iterative update process can thus be viewed as a cyclic block coordinate ascent, where the two policies are updated in a fixed order. By updating each block using the TRPO method, and improving the surrogate objective within the KL constraint, each block update does not decrease $J$ : + +$$ +J \left(\theta_ {h} ^ {t + 1}, \theta_ {l} ^ {t}\right) \geq J \left(\theta_ {h} ^ {t}, \theta_ {l} ^ {t}\right), +$$ + +$$ +J \left(\theta_ {h} ^ {t + 1}, \theta_ {l} ^ {t + 1}\right) \geq J \left(\theta_ {h} ^ {t + 1}, \theta_ {l} ^ {t}\right). +$$ + +Thus $J(\theta_h^{t + 1},\theta_l^{t + 1})\geq J(\theta_h^t,\theta_l^t)$ . This repeated coordinate maximization converges to a fixed point, where no single coordinate update can further improve $J(\theta_h,\theta_l)$ . + +Given the theoretical monotonic improvement with TRPO and block coordinate ascent, we adopt a practical version of TRPO in our experiments, specifically Proximal Policy Optimization (PPO) [Schulman et al., 2017] or GRPO [Shao et al., 2024]. + +# C.5 Learning to reason from the perspective of Leader Follower Game + +Besides the loss function in the main part, we also propose to frame the problem as a leader-follower game. By analyzing the equilibria of the leader-follower game, we demonstrate that our framework inherently identifies the optimal sub-tasks aligned with the capabilities of the low-level model. This ensures that the high-level decisions are guided by the low-level model's strengths, leading to more efficient and targeted task decomposition. + +# C.5.1 Leader-follower game + +The leader-follower game, also known as the Stackelberg game, models interaction between two agents with parametrized strategies $\pmb{\theta} = (\pmb{\theta}_1, \pmb{\theta}_2)$ and differentiable objective functions $(\mathcal{L}_1, \mathcal{L}_2): \mathbb{R}^d \to \mathbb{R}$ . In this framework, the leader announces its strategy first, and the follower observes this decision to respond optimally. This sequential structure enables the leader to anticipate the follower's reaction and adjust its strategy accordingly. A Stackelberg equilibrium occurs when neither agent can unilaterally improve its objective. Denoting $\pmb{\theta}_1$ as the leader's strategy and $\pmb{\theta}_2$ as the follower's, the loss functions $\mathcal{L}_1$ and $\mathcal{L}_2$ are optimized with the following bi-level structure: + +$$ +\boldsymbol {\theta} _ {1} ^ {*} = \operatorname {a r g m i n} _ {\boldsymbol {\theta} _ {1}} \mathcal {L} _ {1} (\boldsymbol {\theta}, \boldsymbol {\theta} _ {2} ^ {*} (\boldsymbol {\theta} _ {1})), \quad \boldsymbol {w} _ {2} ^ {*} (\boldsymbol {\theta} _ {1}) = \operatorname {a r g m i n} _ {\boldsymbol {\theta} _ {2}} \mathcal {L} _ {2} (\boldsymbol {\theta} _ {1}, \boldsymbol {\theta} _ {2}). +$$ + +Anil et al. [2021] apply the leader-follower game to ensure checkable answers in a prover-verifier game (PVG). The objective is a verifier that is both complete (accepts all correct proofs from a verifier) and sound (rejects all incorrect proofs from a verifier). They analyze different scenarios where the verifier acts as the leader, the prover as the follower, and both announce strategies simultaneously, forming a Nash equilibrium. The study concludes that in verifier-led SVG, a Stackelberg equilibrium is both necessary and sufficient for achieving a sound and complete verifier, whereas in other configurations, a Stackelberg equilibrium is not necessary or sufficient for this outcome. + +# C.5.2 Efficacy of LLM + +Because the high-level policy possesses strong generalization capabilities, it is impractical for it to exhaustively explore every potential sub-task for each question. Instead, it naturally focuses on tasks within a feasible range of difficulty, leveraging only a limited set of coarse planning actions. Rather than pinpointing perfectly tailored sub-tasks, the policy searches for general tasks of particular computational complexity, i.e., difficulty, that it can handle reliably. Motivated by this perspective, we incorporate the concept of a reasoning boundary for large language models (LLMs) [Chen et al., 2024b]. Intuitively, the reasoning boundary circumscribes the maximum difficulty of problems a model can solve at a desired accuracy level. Formally, for a model $\theta$ , a task $t$ , and a predefined threshold $A$ , the reasoning boundary of $\theta$ represents the maximum problem difficulty $d$ that satisfies: + +$$ +\mathcal {B} _ {A c c = A} (t | \theta) = \sup _ {d} \{d | A c c (t | d, \theta) = A \}. +$$ + +where $d$ denotes the problem difficulty. By quantifying the difficulty level a model can reliably handle, the reasoning boundary provides a systematic way to align the high-level policy's focus with the model's actual capabilities, gauge the efficacy of the low-level policy, and determine the optimal strategy for solving the question. + +# C.5.3 Leader-follower Game for LLM Reasoning + +Our goal is to find the high-level policy that searches for the sub-task sequence based on the efficacy of the low-level policy to solve the question. We design the loss functions as follows: + +$$ +\mathcal {L} _ {h} = \mathbb {E} _ {(x, y) \sim p _ {D}, t _ {1: K}} \left[ - \log \pi_ {l} \left(y _ {K} \mid x, t _ {1: K}, y _ {1: K - 1}\right) \right], +$$ + +$$ +\mathcal {L} _ {l} = \mathbb {E} _ {x \sim p _ {D}, t _ {1: k} \sim \pi_ {h}, \hat {y} _ {k} \sim \pi_ {l}} \left[ - r \left(y _ {k}, \hat {y} _ {k} \mid x, t _ {1: k}, y _ {1: k - 1}\right) \right], +$$ + +where $r(y_k, \hat{y}_k \mid x, t_{1:k}, y_{1:k-1})$ represents the step reward for the correctness of $\hat{y}_k$ derived from the question $x$ , the sub-task sequence $t_{1:k}$ from the high policy and prior intermediate answer $y_{1:k-1}$ . The loss functions can be interpreted as follows: the high-level policy is incentivized to find subtasks that lead to the correct answer based on the capabilities of the low-level policy, while the low-level policy is incentivized to enhance its instruction-following ability. + +How to minimize the loss functions and whether such minimization leads to the desired results remain questions. To explore this, we consider a simplified case of our method, where the high-level policy plans the complete sub-task sequence at the beginning and the low-level executes the instruction in a single interaction. The corresponding parameterized policies are defined as $\pi_h((t_1,\ldots ,t_K)\mid x)$ and $\pi_l((\hat{y}_1,\dots ,\hat{y}_K)\mid x,(t_1,\dots ,t_K))$ . The corresponding loss functions are: + +$$ +\mathcal {L} _ {h} = \mathbb {E} _ {(x, y) \sim p _ {D}, t _ {1: K}} \left[ - \log \pi_ {l} \left(y _ {K} \mid x, t _ {1: K}\right) \right], \tag {15} +$$ + +$$ +\mathcal {L} _ {l} = \mathbb {E} _ {x \sim p _ {D}, t _ {1: k} \sim \pi_ {h}, \hat {y} _ {k} \sim \pi_ {l}} \left[ - r \left(y _ {k}, \hat {y} _ {k} \mid x, t _ {1: k}, y _ {1: k - 1}\right) \right]. \tag {16} +$$ + +In this step, the high-level policy generates the entire sub-task sequence without relying on intermediate answers, while the low-level policy follows the sequence to produce answers for the sub-tasks. The low-level policy can still leverage prior intermediate answers to sequentially refine its responses. + +To analyze the result agents by minimizing the loss functions, we adopt the completeness and soundness properties from the PVG framework for LLM reasoning. Specifically, if the high-level policy generates a sub-task sequence that is executable within the low-level policy's capabilities, the problem must be solved (completeness). Conversely, if the sub-task sequence is incorrect or beyond the low-level policy's capacity, the problem cannot be solved (soundness). To achieve this, we utilize the conclusion from Anil et al. [2021], which positions the low-level policy as the leader and the high-level policy as the follower, equilibria guarantee the complete and sound low-level policy. + +When the high-level policy takes the lead, the low-level policy is forced to adapt to the specific strategy defined by the high-level policy, which can result in neither complete nor sound low-level policy. For example, if the high-level policy dictates that it will only generate sub-tasks involving addition and subtraction, the low-level policy is constrained to optimize only for these tasks. While they may reach an equilibrium, the low-level policy remains incomplete, and this limitation impacts both policies. In the case of the simultaneous PVG game, convergence to a Nash equilibrium is possible, but it is not sufficient for completeness and soundness. For instance, the low-level policy might disregard the high-level policy entirely (e.g., if the high-level provides incorrect instructions, but the low-level still performs correctly). This approach, however, is challenging to implement due to the significantly larger search space involved. + +Furthermore, the loss functions we design ensure that, at a Stackelberg equilibrium, the high-level policy identifies sub-task sequences that the low-level policy can execute to solve the problem with the highest probability. With the low-level policy acting as the leader, it establishes its reasoning boundary for tasks. Based on the reasoning boundary, let $\theta_h$ and $\theta_l$ represent the policy parameters for the high-level and low-level policies, respectively. The probability that the low-level policy correctly solves the question is defined as: + +$$ +\pi_ {l} \left(y _ {K} \mid x, t _ {1: K}\right) = \prod_ {k = 1} ^ {K} \operatorname {A c c} \left(t _ {k} \mid x, \theta_ {l}\right), +$$ + +where we can compute the difficulty $d_{k}$ from $t_k$ and $x$ . where the difficulty $d_{k}$ can be derived from $t_k$ and $x$ . The loss function in Eq. (15) ensures that the selected sub-tasks are optimal for the low-level policy. Here we provide a theoretical condition under which the most efficient solution strategy can be identified, according to the efficacy of the LLM. + +This approach can be viewed as a game between a high-level "prover" and a low-level "verifier". The verifier, representing the low-level policy, adheres the high-level policy's instructions to validate its reasoning. Unlike the classic PVG setting, where the prover has ground-truth labels, the label of our high-level policy depends on the tunable low-level policy. This distinction, where the low-level policy (leader) is inherently more complex, contrasts with traditional PVG setups and adds complexity due to the interdependence between the high- and low-level policies. + +By framing the problem-solving process as a leader-follower game, with the low-level policy designated as the leader, we can construct a bi-level optimization problem to identify an equilibrium. Following the formulation in Sec. C.5.1, the problem is expressed as: + +$$ +\theta_ {l} ^ {*} = \underset {\theta_ {l}} {\arg \min } \mathcal {L} _ {l} (\theta_ {h} ^ {*} (\theta_ {l}), \theta_ {l}) \quad \theta_ {h} ^ {*} (\theta_ {l}) = \underset {\theta_ {l}} {\arg \min } \mathcal {L} _ {h} (\theta_ {h}, \theta_ {l}). +$$ + +Then we can apply bi-level optimization techniques. + +# D Training Details + +# D.1 Single-turn ReMA + +We refer to Appendix G for prompts we use during training. We implement the training pipeline with OpenRLHF [Hu et al., 2024a] which is a highly efficient codebase and is easy to scale up. We select REINFORCE++ to save resources and for efficient training. All experiments are conducted in a node of 8 NVIDIA A100 GPUs. We use bf16, Zero2, Flash-Attention and gradient checkpointing to run our experiments. + +During rollout, we set temperature $= 1.0$ , top_p $= 1.0$ , top_k $= -1$ , and use vLLM for inference acceleration. We set the max generation length to be 2048 and, the rollout batch size to be 1000. The number of samples per prompt is 4. During training, we use Adam Optimizer with a learning rate of 5e-7. We set the mini-batch size to be 500, and the clip ratio to be 0.2. Other hyperparameters, such as KL coefficients and the number of training episodes, were carefully tuned based on validation set performance to ensure robust and reliable results. To align with the hyperparameter in OpenRLHF, we use #Training Episode as the number of reinforcement learning epoch on the entire dataset. + +In ReMA, during prompt filtering of the high-level model, the high-level agent first samples 10 candidates for each question with $t = 1.0$ , and for each output the low-level agents sample 1 solution with $t = 0.0$ , then we select questions of success rate between $[\varepsilon_{\mathrm{min}}, \varepsilon_{\mathrm{max}}]$ . And for the low-level agent's prompt filtering, the high-level agent first samples 1 candidate for each question with $t = 0.0$ and for each output the low-level agents sample 10 solutions with $t = 1.0$ , then we select questions of success rate between $[\varepsilon_{\mathrm{min}}, \varepsilon_{\mathrm{max}}]$ and use the high-level agent to sample 4 meta-thoughts with $t = 1.0$ as the input. + +# D.1.1 Supervised fine-tuning data collection + +For experiments in Sec. 4.2.1, we collect expert data to enhance the reasoning pattern, i.e. $RL$ from SFT. Specifically, we collect demonstration data from GPT-4o Mini on MATH training dataset (7.5k problems) Hendrycks et al. [2021] and use it to fine-tune the LLMs. The data generation follows these steps: First, we prompt GPT-4o Mini to produce metacognitive reasoning for high-level model training. Specifically, we use different prompts to instruct it to rewrite and decompose a given question without providing a final answer. We collect metacognitive reasoning using two predefined actions, "rewrite" and "decompose", which align with human approaches to complex problem-solving while preserving answer diversity. Next, we use the generated instructions to prompt GPT-4o Mini to follow the metacognitive steps and solve the question, obtaining SFT data for low-level policy training. Below, we present the prompts used for both high-level and low-level models. Prompts can be found in Appendix G.1.1. + +# D.1.2 Dataset Curation of RewardBench970 + +Table 2: Performance on LLM-as-a-Judge benchmarks, trained on dataset under the loose setting. The two-agent workflow in ReMA + +
ModelBenchmarkVRP(CoT)\( \mathbf{V R P_{R L}} \)\( \mathbf{M R P_{R L}} \)ReMA(Ours)
Llama3.1-8B-InstructRewardBench97071.2481.86 (+10.62)80.41 (+9.17)86.29 (+15.05)
JudgeBench51.7751.45 (-0.32)50.65 (-1.12)53.71 (+1.94)
Average61.5166.65 (+5.14)65.53 (+4.02)70.00 (+8.49)
Qwen2.5-7B-InstructRewardBench97086.4987.22 (+0.73)80.31 (-6.18)90.72 (+4.23)
JudgeBench58.3954.84 (-3.55)55.81 (-2.58)58.71 (+0.32)
Average72.4471.03 (-1.41)68.06 (-4.38)74.72 (+2.28)
+ +We process the original dataset in RewardBench by splitting it into a training set containing 5,000 tuples of (instruction, response A, response B) and a test set with the remaining 970 tuples. + +To ensure a meaningful dataset split, we validate two separation strategies: + +- Loose setting: We only ensure that there is no direct overlap of tuples between the training and test sets. +- Strict setting: We further enforce that no instruction appears in both the training and test sets. The results for this setting are presented in the main results (Table 1b). + +Additionally, since the original RewardBench data originates from different subsets, we ensure that all original subsets are evenly represented in both the training and test sets. + +Table 2 reports the learning performance of various methods under the loose dataset split setting. Compared to the results in Table 1b, ReMA significantly outperforms other RL tuning baselines + +across all models, particularly on out-of-distribution (OOD) benchmarks. The consistent improvements on OOD datasets of these two settings suggest that ReMA enhances meta-thinking ability, resulting in better generalization across diverse task distributions. + +# D.1.3 Training on MATH + +VRP For Llama3-8B-Instruct, Llama3.1-8B-Instruct, and Qwen2.5-7B-Instruct, we all use a KL coefficient of 1e-2, and for #Training Episode, we use 12,6,6 for these 3 models respectively. For Llama3-8B-Instruct, we set the learning rate of 2e-7 for stable training. + +MRP For Llama3-8B-Instruct, Llama3.1-8B-Instruct, and Qwen2.5-7B-Instruct, we all use a KL coefficient of 1e-2, and for #Training Episode, we use 10,6,6 for these 3 models respectively. + +MAMRP We use $\varepsilon_{\mathrm{min}} = 0.2, \varepsilon_{\mathrm{max}} = 0.8$ for prompt filtering. We use the same #Training Episode=4 for all models, and for #Update Iteration, we use 3 for Llama3-8B-Instruct and Llama3.1-8B-Instruct, 10 for Qwen2.5-7B-Instruct. And we set the KL coefficient to be 1e-2 for all the 3 models. + +# D.1.4 Training on Reward Bench + +VRP For Llama3.1-8B-Instruct, and Qwen2.5-7B-Instruct, we all use a KL coefficient of 1e-2, and for #Training Episode, we use 4,6 for these 2 models respectively. + +MRP For Llama3.1-8B-Instruct, and Qwen2.5-7B-Instruct, we all use a KL coefficient of 1e-2, and for #Training Episode, we use 4,6 for these 2 models respectively. + +MAMRP We set #Update Iteration=1 for all models. We set the KL coefficient to be 1e-2 for Llama3.1-8B-Instruct and 1e-2 for Qwen2.5-7B-Instruct all models. For Llama3.1-8B-Instruct, we use $\varepsilon_{\mathrm{min}} = 0.2$ , $\varepsilon_{\mathrm{max}} = 0.8$ for prompt filtering and we use #Training Episode of 2 during training. For Llama3.1-8B-Instruct, we use $\varepsilon_{\mathrm{min}} = 0.1$ , $\varepsilon_{\mathrm{max}} = 0.9$ for prompt filtering and we use #Training Episode of 1 during training. + +# D.2 Multi-turn ReMA + +We refer to Appendix G for prompts we use during training. We implement a multi-turn ReMA training pipeline with VeRL [Sheng et al., 2024] since it's easier to implement complex training pipeline with a single centralized controller. Similar to OpenRLHF, VeRL is also a highly efficient and scalable codebase for further development. + +For the multi-turn ReMA rollout, we use parameter sharing and simultaneous update by default. In details, we maintain two message lists with the system prompt of meta-thinking agent and reasoning agent respectively. During rollout, each agent acts as 'assistant' in its own message list and the other agent acts as 'user'. We use three hyperparameters to control the rollout length: (1) 'max_num_turns': the maximum number of turns for each trajectory. (2) 'max_response_length': the maximum number of tokens for each turn's response. (3) 'max_prompt_length': the maximum number of tokens for each trajectory. + +During training, we apply the collected message list to Qwen2.5-7B's chat template and build loss masks in order to compute the loss for all turns of one trajectory (message list). + +Moreover, for multi-turn ReMA rollout, unlike single agent single turn rollout, we need to carefully design the termination logic. Basically, we let the meta-thinking agent automatically decide when to finish the solving procedure, we use a special tag '[FINISH]' to indicate the end of the solving procedure. After we detect this tag, we will terminate trajectory after the reasoning agent generates its output. + +We also design other termination conditions to ensure the quality of the generated trajectories. If the last agent's response is too long, we will terminate the whole trajectory and setting the reward to 0. We also introduce a different version of format reward: we give a reward of 1.0 only if the reasoning agent's last turn response is correct and the meta-thinking agent's last turn response include '[FINISH]'. We use math_check as the default verifier. + +# D.2.1 SFT data collection of multi-turn MAMRP + +We use GPT-4o to translate 817 samples in LIMO [Ye et al., 2025c] by prompting it to wrap each sentence with meta-thinking and reasoning tags. We use a temperature of 0. After filtering, we get 800 conversations for training. The prompt can be found in Appendix G.2.1. For supervised finetuning, we use LlamaFactory as the codebase and train the model for 3 epochs with a learning rate of 1e-5, consine learning rate scheduler, and batch size of 8. Use DeepSpeed Zero2 for distributed training. + +# D.2.2 Training on MATH + +For training of multi-turn ReMA on MATH, we use GRPO [Shao et al., 2024] as the default learning algorithm. We refer to Appendix G.2.2 for prompts. For experiment in Sec 4.3, we use sample 128 prompts, each with 16 trajectories. During training, we drop the KL loss term to improve the numerical stability. We use a learning rate of 1e-6, bfloat16 precision, FSDP backend for distributed training. We split the rollout data into 4 mini-batches for update. For the sake of numerical stability, we do pre-clip before computing the exponential of log-prob for a upperbound of 3.0. + +For the main result in Fig 5, we test different rollout configurations with a max_prompt_length of 4096, training for 500 steps. We use 32 NVIDIA A800 GPUs, the longest training cost about 40 hours due to large scale validation per 10 steps. + +For the ablation results in Fig 6, we use a tiny subset of MATH Level 3-5, training for 300 steps. Specifically, we sample 19 questions for every single type (133 instances in total). We use 8 NVIDIA A800 GPUs, the training cost about 30 hours + +We test different rollout configurations: + +(1) max_num_turns=30, max_response_length=256, max_prompt_length=4096 (2) +max_num_turns=30, max_response_length=1024, max_prompt_length=3072 + +And for the experiment of separate parameter in multi-turn ReMA, we iteratively train each agent with the same configuration as above, but with a switch interval of 10 steps, starting from the metathinking agent. + +# E Other Experiments + +# E.1 Reward functions shape cross-agent behaviors + +We also investigate the impact of different reward function designs on ReMA's behavior. In addition to the base reward setting described in Appendix C.2, we evaluate a consistency-based reward function using Qwen2.5-7B-Instruct. This reward function is designed to encourage the high-level agent to generate more detailed guidance. Indeed, we observe that the high-level agent trained in this manner produces more detailed solution steps compared to the one trained with the basic correctness format reward. However, we also find that this approach often leads to jailbreak behavior, where the high-level agent tends to include the final answer within its output, compromising the intended hierarchical reasoning process. + +Furthermore, we discover an interesting evolution of a pattern during training: although our experimental setup is designed for the high-level agent to provide a solution plan while the lower-level agent executes it, we find that under the consistency-based reward, the lower-level agent significantly increases its attempt of verification rather than straightforward execution. We observed a certain sentence commonly appearing in the low-level agent's responses: "Let's go through the solution step by step to ensure clarity and correctness." To quantify this effect, we track the frequency of it. We analyze this pattern across all mathematical test sets, sampling eight completions per question at a temperature of 0.7. Our empirical results have identified a $30\mathrm{x}$ increase of such self-verifying patterns in the model trained with the consistency-based reward compared to the one trained with the base reward. Moreover, we also observe additional variations of this pattern, e.g. "Let's carefully re-evaluate the problem and solution to ensure accuracy and clarity." These phrases indicate that the low-level agent is actively exploring to verify the detailed response provided by the high-level agent. + +This suggests that (1) meta-thinking can not only emerge and be reinforced in the high-level agent but also in the low-level agent. During reinforcement learning (RL) training, the two agents develop a novel problem-solving pattern characterized by a role reversal. (2) Consistency-based rewards promote a more self-corrective approach at the lower level, potentially disrupting the intended separation of roles between planning and execution. For a detailed case study, refer to Appendix F.2. + +# E.2 Detailed Training Curves on Different Datasets of Multi-turn ReMA + +We show the detailed training curves of the multi-turn ReMA on different datasets in Fig. 8. + +![](images/3a07830c095bd1b0e7d492fba662d270532fb4263b681ea523bd6daaeb0902da.jpg) +Figure 8: Detailed Training Curves on Different Datasets of Multi-turn ReMA + +# F Qualitative Results + +# F.1 High-level policy finds better plans + +Here is an example of how a high-level policy alters the solving method of an LLM, increasing the likelihood of providing correct answers. As we can see from the following example, without the high-level policy, the LLM counts all integer coordinates, including those on the boundary, and then subtracts the boundary coordinates. In contrast, the high-level policy identifies a better approach, directly instructing the LLM to count only the coordinates strictly inside the boundary. + +# F.2 Case study for Experiments in Section E.1 + +Fig. 10 and Fig. 11 show an case study of experiments in Sec. E.1. + +Although both agents are prompted with the same instructions as in our main results, the consistency reward of the high-level agent significantly alters the learning dynamics. As illustrated in Fig. 10, the high-level agent generates detailed solution attempts rather than a strategic plan. Consequently, the low-level agent evolves to verify the high-level agent's solutions. This suggests that, during reinforcement learning (RL) training, the two agents develop a novel problem-solving pattern characterized by a role reversal, where one agent generates an answer while the other verifies it. + +# F.3 Case study for Experiments in Section 4.2.2 + +Fig. 12 shows the detailed outputs of the two groups of LLMs trained in JSON action. The outputs finally converge to relative easier and shorter sentences, compared to our main experiments. This may due to insufficient capacity of a small LM to produce a valid JSON-format response while simultaneously exploring diverse reasoning strategies. + +Consider the rectangular region with the following points as vertices: $(5,4),(-5,4),(-5, - 4),(5, - 4)$ . How many points with integer coordinates will be strictly in the interior of this rectangular region? + +# Output without high-level planning solves this question in a less optimal way, leading to a wrong answer: + +To determine how many points with integer coordinates will be strictly in the interior of the rectangular region with vertices at $(5,4)$ , $(-5,4)$ , $(-5, - 4)$ , and $(5, - 4)$ , we will follow these steps: + +1. Identify the dimensions of the rectangle: + +- The width of the rectangle can be calculated as the distance between the x-coordinates of the vertices. The x-coordinates are $-5$ and 5. + +$$ +\mathrm {W i d t h} = 5 - (- 5) = 1 0 +$$ + +- The height of the rectangle can be calculated as the distance between the y-coordinates of the vertices. The y-coordinates are 4 and -4. + +$$ +\mathrm {H e i g h t} = 4 - (- 4) = 8 +$$ + +2. Calculate the total number of integer points: - The total number of integer points in the rectangle (including the boundary) can be calculated by considering the range of x and y values: - The x-values range from $-5$ to 5, giving us 11 integer values $(-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5)$ . - The y-values range from $-4$ to 4, giving us 9 integer values ( + +$-4, -3, -2, -1, 0, 1, 2, 3, 4)$ . The total number of points in the rectangle (including the boundary) is: + +$$ +\text {T o t a l p o i n t s} = 1 1 \times 9 = 9 9 +$$ + +3. Calculate the number of points on the boundary: - The boundary consists of the four sides of the rectangle. - The top and bottom sides (y = 4 and y = -4) each have 11 integer points (from -5 to 5). - The left and right sides (x = 5 and x = -5) each have 9 integer points (from -4 to 4). - Therefore, the total number of points on the boundary is: + +$$ +\text {B o u n d a r y p o i n t s} = 2 \times 1 1 + 2 \times 9 = 2 2 + 1 8 = 4 0 +$$ + +4. Calculate the number of strictly interior points: - To find the number of points strictly in the interior, we subtract the points on the boundary from the total number of points: + +$$ +I n t e r i o r \text {p o i n t s} = \text {T o t a l p o i n t s - B o u n d a r y p o i n t s} = 9 9 - 4 0 = 5 9 +$$ + +Thus, the number of points with integer coordinates that will be strictly in the interior of this rectangular region is 59. + +Answer with high-level planning identifies the optimal sub-tasks for the LLM to solve, increasing the chance of answering the question correctly + +High-level policy planning: First, identify the boundaries of the rectangular region. Then, determine the integer coordinates that lie strictly within these boundaries. Finally, count the number of such points. + +Low-level policy reasoning following the high-policy plan: To solve the problem, we will follow the steps outlined in the instructions. + +Step 1: Identify the boundaries of the rectangular region. The vertices of the rectangular region are: $(-5,4)$ $(-5,4)$ $(-5, - 4)$ $(5, - 4)$ From these vertices, we can determine the boundaries:- The left boundary is $x = -5$ The right boundary is $x = 5$ .- The bottom boundary is $y = -4$ .- The top boundary is $y = 4$ + +Step 2: Determine the integer coordinates that lie strictly within these boundaries. For a point $(x,y)$ to be strictly inside the rectangle, it must satisfy: $-5 < x < 5 - 4 < y < 4$ + +Step 3: Count the integer coordinates that satisfy these conditions. Now, we will find the integer values for $x$ and $y$ that fall within these boundaries. For $x$ : The integer values that satisfy $-5 < x < 5$ are: + +$-4, -3, -2, -1, 0, 1, 2, 3, 4$ This gives us 9 possible integer values. For $y$ The integer values that satisfy $-4 < y < 4$ are: $-3, -2, -1, 0, 1, 2, 3$ . This gives us 7 possible integer values. + +Step 4: Calculate the total number of points. Now, we multiply the number of possible integer values for $x$ by the number of possible integer values for $y$ . + +$$ +\text {T o t a l p o i n t s} = 9 \times 7 = 6 3 +$$ + +Finally, the number of points with integer coordinates that lie strictly in the interior of the rectangular region is $\boxed{63}$ . + +Figure 9: Case Study comparing with and without high-level metacognition results. + +# G Prompts + +# G.1 Single-turn ReMA prompts + +# G.1.1 Prompts for JSON data collection + +Prompt for metacognition reasoning to rewrite: + +```txt +System prompt: +You are a math expert trying to solve mathematical problems. +Before answering a question, your task is to rewrite the original question to make it clearer. +Provide your rewritten content in JSON format: +{ "action": "REWRITE", "output": "{clearer question content}" +} +Respond only with valid JSON. Do not write an introduction or summary. +User prompt: +Here is the question: [problem_text] +``` + +Prompt for metacognition reasoning to decompose: + +```txt +Question: The equation $x^{10} + (13x - 1)^{10} = 0$ has 10 complex roots $r_1,\overline{r}_1,r_2,\overline{r}_2,r_3,\overline{r}_3,r_4,\overline{r}_4,r_5,\overline{r}_5$ , where the bar denotes complex conjugation. Find the value of $\frac{1}{r_1\overline{r}_1} +\frac{1}{r_2\overline{r}_2} +\frac{1}{r_3\overline{r}_3} +\frac{1}{r_4\overline{r}_4} +\frac{1}{r_5\overline{r}_5}$ +``` + +# High-Level Agent's Output + +To solve this problem, let's break it down into steps that a low-level math-solving agent can follow: + +1. Understand the given equation: + +The equation is $x^{10} + (13x - 1)^{10} = 0$ + +2. Rewrite the equation: + +Notice that the equation can be rewritten as: + +$$ +x ^ {1 0} = - (1 3 x - 1) ^ {1 0}. +$$ + +3. Consider the roots: + +The roots of the equation are given as $r_1, \overline{r}_1, r_2, \overline{r}_2, r_3, \overline{r}_3, r_4, \overline{r}_4, r_5, \overline{r}_5$ , where the bar denotes complex conjugation. 4. Use the property of complex conjugates: +For any complex number $z$ , the product of $z$ and its conjugate $\overline{z}$ is the square of the modulus of $z$ i.e., $z\overline{z} = |z|^2$ 5. Calculate the required sum: +3. Calculate the required sum $\mathbf{W} = \mathbf{w}_{1} + \mathbf{w}_{2} + \dots + \mathbf{w}_{n}$ + +$$ +\frac {1}{r _ {1} \overline {{F _ {1}}}} + \frac {1}{r _ {2} \overline {{F _ {2}}}} + \frac {1}{r _ {3} \overline {{F _ {3}}}} + \frac {1}{r _ {4} \overline {{F _ {4}}}} + \frac {1}{r _ {5} \overline {{F _ {5}}}} +$$ + +Using the property from step 4, this can be rewritten as: + +$$ +\frac {1}{| r _ {1} | ^ {2}} + \frac {1}{| r _ {2} | ^ {2}} + \frac {1}{| r _ {3} | ^ {2}} + \frac {1}{| r _ {4} | ^ {2}} + \frac {1}{| r _ {5} | ^ {2}}, +$$ + +6. Simplify the expression: +Since $r_1, r_2, r_3, r_4, r_5$ are roots of the equation $x^{10} = -(13x - 1)^{10}$ , the magnitudes $|r_i|$ are the same for each pair $r_i$ and $\overline{r}_i$ . Let $|r_i| = |r_i|$ for all $i, j$ . +7. Find the common magnitude: +To find $|r_{1}|$ , consider the magnitudes of the terms on both sides of the equation $x^{10} = -(13x - 1)^{10}$ . The magnitude of the left side is $|x|^{10}$ and the magnitude of the right side is $|13x - 1|^{10}$ . Therefore: + +$$ +\left| x \right| ^ {1 0} = \left| 1 3 x - 1 \right| ^ {1 0}. +$$ + +Taking the 10th root of both sides, we get: + +$$ +| x | = | 1 3 x - 1 |. +$$ + +8. Solve for $|x|$ : +Let $|x| = a$ . Then: + +$\therefore m - 1 \neq 0$ ; + +$$ +a = \left| 1 3 a - 1 \right|. +$$ + +This gives us two cases: +- $a = 13$ +- $a = -(13a - 1)$ +Solving the first case: + +$$ +a = 1 3 a - 1 \Longrightarrow 1 2 a = 1 \Longrightarrow a = \frac {1}{1 2}. +$$ + +Solving the second case: + +$$ +a = - 1 3 a + 1 \Longrightarrow 1 4 a = 1 \Longrightarrow a = \frac {1}{1 4}. +$$ + +Since $a$ must be the same for all roots, we need to check which value is consistent. Given the nature of the problem, we can assume $a = \frac{1}{2}$ as the correct magnitude. +9. Calculate the final sum: +Since $|r_i| = \frac{1}{2}$ for all $i$ , we have + +$$ +\frac {1}{| r _ {1} | ^ {2}} = \frac {1}{(\frac {1}{4}) ^ {2}} = 1 4 4. +$$ + +Therefore, the sum is: + +$$ +1 4 4 + 1 4 4 + 1 4 4 + 1 4 4 + 1 4 4 = 5 \times 1 4 4 = 7 2 0. +$$ + +10. Final answer: + +$$ +\frac {1}{\frac {1}{n} + \frac {1}{n}} + \frac {1}{\frac {1}{n} + \frac {1}{n}} + \frac {1}{\frac {1}{n} + \frac {1}{n}} \text {i s} 7 2 0. +$$ + +# Low-Level Agent's Output + +Let's go through the solution step by step to ensure clarity and correctness. + +1. Understand the given equation: +The equation is $x^{10} + (13x - 1)^{10} = 0$ +2. Rewrite the equation: +Notice that the equation can be rewritten as: + +$$ +x ^ {1 0} = - (1 3 x - 1) ^ {1 0}. +$$ + +3. Consider the roots: + +The roots of the equation are given as $r_1, \overline{r}_1, r_2, \overline{r}_2, r_3, \overline{r}_3, r_4, \overline{r}_4, r_5, \overline{r}_5$ , where the bar denotes complex conjugation. 4. Use the property of complex conjugates: +For any complex number $z$ , the product of $z$ and its conjugate $\overline{z}$ is the square of the modulus of $z$ , i.e., $z\overline{z} = |z|^2$ . 5. Calculate the required sum: +We need to find the value of: + +$$ +\frac {1}{r _ {1} F _ {1}} + \frac {1}{r _ {2} F _ {2}} + \frac {1}{r _ {3} F _ {3}} + \frac {1}{r _ {4} F _ {4}} + \frac {1}{r _ {5} F _ {5}} +$$ + +Using the property from step 4, this can be rewritten as: + +$$ +\frac {1}{| r _ {1} | ^ {2}} + \frac {1}{| r _ {2} | ^ {2}} + \frac {1}{| r _ {3} | ^ {2}} + \frac {1}{| r _ {4} | ^ {2}} + \frac {1}{| r _ {5} | ^ {2}} + \frac {1}{| r _ {6} | ^ {2}} +$$ + +6. Simplify the expression: +Since $r_1, r_2, r_3, r_4, r_5$ are roots of the equation $x^{10} = -(13x - 1)^{10}$ , the magnitudes $|r_i|$ are the same for each pair $r_1$ and $\overline{r}_i$ . Let $|r_i| = |r_i|$ for all $i, j$ . +7. Find the common magnitude: +To find $|\boldsymbol{r}_i|$ , consider the determinants of the terms on both sides of the equation $x^{10} = -(13x - 1)^{10}$ . The magnitude of the left side is $|z|^10$ and the magnitude of the right side is $|13z - 1|^{10}$ . Therefore: + +$$ +| x | ^ {1 0} = | 1 3 x - 1 | ^ {1 0}. +$$ + +Taking the 10th root of both sides, we get: + +$$ +\left| x \right| = \left| 1 3 x - 1 \right|. +$$ + +8. Solve for $|x|$ : + +Let $|x| = a$ . Then: + +$$ +a = | 1 3 x - 1 |. +$$ + +This gives us two cases: +-α=13a-1 +- $a = -(13a - 1)$ +Solving the first case: + +$$ +a = 1 3 a - 1 \Longrightarrow 1 2 a = 1 \Longrightarrow a = \frac {1}{1 2}. +$$ + +Solving the second case: + +$$ +a = - 1 3 a + 1 \Rightarrow 1 4 a = 1 \Rightarrow a = \frac {1}{1 4}. +$$ + +Since $a$ must be the same for all roots, we need to check which value is consistent. Given the nature of the problem, we can assume $a = \frac{1}{2}$ as the correct magnitude. +9. Calculate the final sum: +Since $|r_i| = \frac{1}{12}$ for all i, we have: + +$$ +\frac {1}{| r _ {i} | ^ {2}} = \frac {1}{(\mathrm {古}) ^ {2}} = 1 4 4. +$$ + +Therefore, the sum is: + +$$ +1 4 4 + 1 4 4 + 1 4 4 + 1 4 4 + 1 4 4 = 5 \times 1 4 4 = 7 2 0. +$$ + +10. Final answer: + +$$ +\text {The v a l u e o f} \frac {1}{r _ {1} r _ {1}} + \frac {1}{r _ {2} r _ {2}} + \frac {1}{r _ {3} r _ {3}} + \frac {1}{r _ {4} r _ {4}} + \frac {1}{r _ {5} r _ {5}} \text {i s} \boxed {7 2 0}. +$$ + +```latex +Question: The equation $x^{10} + (13x - 1)^{10} = 0$ has 10 complex roots $r_1, \overline{r}_1, r_2, \overline{r}_2, r_3, \overline{r}_3, r_4, \overline{r}_4, r_5, \overline{r}_5$ , where the bar denotes complex conjugation. Find the value of $\frac{1}{r_1r_4} + \frac{1}{r_2r_2} + \frac{1}{r_3r_3} + \frac{1}{r_4r_4} + \frac{1}{r_5r_5}$ . +``` + +High-Level Agent's Output + +Low-Level Agent's Output (part I) + +To solve this problem, we need to follow these steps: +1. Understand the given equation: The equation is $x^{10} - (13x - 1)^{10} = 0$ . +2. Identify the roots: The roots of the +equation are given as +$\overline{r_1},\overline{r_2},\overline{r_3},\overline{r_4},\overline{r_5},\overline{r_6},\overline{r_7},\overline{r_8}$ where the bar denotes complex conjugation. +3. Use properties of complex conjugates: +For any complex number $z$ , the product $z \cdot z$ is a complex function of the modulus of $z$ , i.e., $|z \cdot \overline{z}| = |z|^2$ . +4. Simplify the expression: We need to find +the value of 1 +r1+ r2r3+ r4 +# +Apply an property of complex conjugates. Since $\pi_1 \cdot \overline{\pi}_1 = |\pi_1|^2$ , we have +1+ +A +6.Sum the reciprocals: Sum the values +1+1 1 1 +7. Use Vieta's formulas: To find the sum of the reciprocals of the moduli squared, we can use the fact that the sum of the roots of the polynomial and their properties. +8. Finalize the solution: Combine all the +steps to find the final value +Now, let's guide the low-level math solving +agent to implement these steps. + +To solve the given problem, we start by analyzing the equation $x^{10} + (13x - 1)^{10} = 0$ . + +This can be rewritten as: + +$$ +x ^ {1 0} = - (1 3 x - 1) ^ {1 0}. +$$ + +Taking the 10th root on both sides, we get: + +$$ +x = \omega (1 3 x - 1), +$$ + +where $\omega$ is a 10th root of $-1$ . The 10th roots of $-1$ are given by $\omega = e^{\pi (2k + 1) / 10}$ for $k = 0, 1, 2, \ldots, 9$ . + +Rewriting the equation, we have: + +$x = \omega (13x - 1)\Rightarrow x - 13\omega x = -\omega \Rightarrow x(1 - 13\omega) = -\omega \Rightarrow x = \frac{-\omega}{1 - 13\omega}$ + +Thus, the roots of the equation are: + +$$ +r _ {k} = \frac {- \omega_ {k}}{1 - 1 3 \omega_ {k}} +$$ + +where $\omega_{k} = e^{i\pi (2k + 1) / 10}$ for $k = 0,1,2$ + +The roots come in conjugate pairs, so the roots are $r_1, \overline{r_1}, r_2, \overline{r_2}, r_3, \overline{r_3}, r_4, \overline{r_4}, r_5, \overline{r_5}$ . We need to find the value of: + +$$ +\frac {1}{r _ {1} \overline {{r _ {1}}}} + \frac {1}{r _ {2} \overline {{r _ {2}}}} + \frac {1}{r _ {3} \overline {{r _ {3}}}} + \frac {1}{r _ {4} \overline {{r _ {4}}}} + \frac {1}{r _ {5} \overline {{r _ {5}}}} +$$ + +Since $r_k\overline{r_k} = |r_k|^2$ we have: + +$$ +\frac {1}{r _ {k} r _ {k}} = \frac {1}{| r _ {k} | ^ {2}} +$$ + +We need to find $|r_k|^2$ . Note that: + +$$ +r _ {k} = \frac {- \omega_ {k}}{1 - 1 3 \omega_ {k}} +$$ + +Low-Level Agent's Output (part II) + +The magnitude of $r_k$ is: + +$$ +| r _ {k} | = \left| \frac {- \omega_ {k}}{1 - 1 3 \omega_ {k}} \right| = \left| \frac {| \omega_ {k} |}{1 - 1 3 \omega_ {k}} \right| = \frac {1}{1 - 1 3 \omega_ {k}} +$$ + +Thus, + +$$ +| r _ {k} | ^ {2} = \frac {1}{| 1 - 1 3 \omega_ {k} | ^ {2}} +$$ + +Therefore, + +$$ +\frac {1}{r _ {2} r _ {1}} = | 1 - 1 3 \omega_ {k} | ^ {2}. +$$ + +We need to sum this over the 5 pairs of roots: + +$$ +\sum_ {k = 0} ^ {4} | 1 - 1 3 \omega_ {k} | ^ {2} +$$ + +Since $\omega_{k} = e^{i\pi (2k + 1) / 10}$ , we have + +$|1 - 13\omega_{k}|^{2} = (1 - 13\omega_{k})(1 - 13\overline{\omega_{k}}) = (1 - 13\omega_{k})(1 - 13e^{-i\pi (2k + 1) / 10}) = 1 - 13\omega_{k} - 13e^{-i\pi (2k + 1) / 10} + 169.$ +Since $\omega_{k}\overline{\omega_{k}} = 1$ we get: + +$$ +\left| 1 - 1 3 \omega_ {k} \right| ^ {2} = 1 6 9 - 1 3 (\omega_ {k} + e ^ {- i \pi (2 k + 1) \cdot 1 0}) + 1 = 1 7 0 - 1 3 \cdot 2 \cos \left(\frac {\pi (2 k + 1)}{1 0}\right) = 1 7 0 - 2 6 \cos \left(\frac {\pi (2 k + 1)}{1 0}\right) +$$ + +Summing over $k = 0,1,2,3,4$ we get: + +$$ +\sum_ {k = 0} ^ {n - 1} \left(1 7 0 - 2 6 \cos \left(\frac {\pi (2 k + 1)}{1 0}\right)\right) = 5 \cdot 1 7 0 - 2 6 \sum_ {k = 0} ^ {n - 1} \cos \left(\frac {\pi (2 k + 1)}{1 0}\right) = 8 5 0 - 2 6 \cdot 0 = 8 5 0. +$$ + +Thus, the value is: + +![](images/6cce98a8906c58ccf3aa6cedf13fe8485572fcdb57dd27e9b6cd696eb48335fd.jpg) +Figure 10: Case Study for consistency reward of high-level agent +Figure 11: Case Study for base correctness reward of high-level agent + +Llama3.2-1B-Instruct training Output: +```txt +Question: The product of $3t^{2} + 5t + a$ and $4t^{2} + bt - 2$ is $12t^{4} + 26t^{3} - 8t^{2} - 16t + 6$ . What is $a + b$ ? +``` + +Llama3.1-8B-Instruct training Output: +```json +{ + "action": "DECOMPOSE", + "output": "Decompose the problem into finding the coefficients of the product of two polynomials and then solving for a and b." +} +``` + +```txt +{ "action": "EMPTY", "output"."" } +``` + +```txt +Step 1: To find the product of the two polynomials $3t^{2} + 5t + a$ and $4t^{2} + bt - 2$ , we need to multiply each term of the first polynomial by each term of the second polynomial and then combine like terms. +Step 2: The product of the two polynomials can be written as $(3t^{2} + 5t + a)(4t^{2} + bt - 2)$ . +Step 3: Using the distributive property, we multiply each term in the first polynomial by each term in the second polynomial: $(3t^{2})(4t^{2}) + (3t^{2})(bt) + (3t^{2})(-2) + (5t)(4t^{2}) + (5t)(bt) + (5t)(-2) + (a)(4t^{2}) + (a)(bt) + (a)(-2)$ . +Step 4: Simplifying the products, we get $12t^{4} + 3b^{3} - 6t^{2} + 20t^{3} + 5bt^{2} - 10t + 4at^{2} + abt - 2a$ . +Step 5: Combining like terms, we have $12t^{4} + (3b + 20)t^{3} + (5b - 6 + 4a)t^{2} + (ab - 10)t - 2a$ . +Step 6: We are given that the product of the two polynomials is $12t^{4} + 26t^{3} - 8t^{2} - 16t + 6$ . +We can equate the coefficients of the corresponding powers of $t$ in the two expressions. +Step 7: Equating the coefficients of $t^3$ , we get $3b + 20 = 26$ . +Step 8: Equating the coefficients of $t^2$ , we get $5b - 6 + 4a = -8$ . +Step 9: Equating the constant terms, we get $-2a = 6$ . +Step 10: Solving the equation $-2a = 6$ , we find that $a = -3$ . +Step 11: Substituting $a = -3$ into the equation $5b - 6 + 4a = -8$ , we get $5b - 6 + 4(-3) = -8$ , which simplifies to $5b - 18 = -8$ . +Step 12: Solving for $b$ , we find that $5b = 10$ , so $b = 2$ . +Step 13: Now that we have found the values of $a$ and $b$ , we can calculate their sum: $a + b = -3 + 2 = -1$ . +The final answer is: $\boxed{-1}$ +``` + +```txt +Step 1: The problem asks us to find the values of $a$ and $b$ in the quadratic expressions $3t^2 + 5t + a$ and $4t^2 + bt - 2$ , respectively, given that their product is $12t^4 + 26t^3 - 8t^2 - 16t + 6$ . +``` + +```txt +Step 2: To solve this, we need to multiply the two quadratic expressions and equate the resulting expression to the given product $12t^4 + 26t^3 - 8t^2 - 16t + 6$ . +``` + +```txt +Step 3: We start by multiplying the two quadratic expressions: $(3t^{2} + 5t + a)(4t^{2} + bt - 2)$ . +``` + +```txt +Step 4: This gives us $12t^4 + (3b + 5)t^3 + (4a + 5b - 6)t^2 + (-2b + 10t + 2a)t - 2a.$ +``` + +```txt +Step 5: We know that the coefficients of the resulting expression must be equal to the coefficients of the given product $12t^4 + 26t^3 - 8t^2 - 16t + 6$ . +``` + +```txt +Step 6: Equating the coefficients of $t^3$ gives us the equation $3b + 5 = 26$ , which simplifies to $3b = 21$ and thus $b = 7$ . +``` + +```txt +Step 7: Equating the constant terms gives us $-2a = -6$ , which simplifies to $a = 3$ . +``` + +```txt +Step 8: Now that we have the values of $b$ and $a$ , we can find the sum $a + b$ . +``` + +```txt +Step 9: Adding $a = 3$ and $b = 7$ gives us $a + b = 10$ . +``` + +```txt +The final answer is: $\boxed{10}$ +``` + +Figure 12: Case Study for interpretability experiments in Section 4.2.2 + +System prompt: +```txt +You are a math expert trying to solve mathematical problems. Before answering a question, your task is to decompose the original question to make it clearer. +Provide your rewritten content in JSON format: +{"action": "DECOMPOSE", "output": "{decomposed question content}}" +}} +Respond only with valid JSON. Do not write an introduction or summary. +User prompt: +Here is the question: [problem.text] +``` + +Prompt for generating final answers using on the question and metacognition reasoning: + +System prompt: +User prompt: +```txt +You are a math expert tasked with solving problems step by step. Follow the provided instructions precisely, showing all reasoning and intermediate steps. Present the final answer within \boxed{\{\}}\}. +``` + +```txt +Here is the question and instructions: +Question +[problem_text] +Provided Instruction +[instruction_text] +``` + +# G.1.2 Prompts for Math problems + +# VRP prompt: + +```txt +System prompt: +You are a math expert tasked with solving problems step by step. Present the final answer within \boxed{}?. +User prompt: +Here is the question: +{Question} +``` + +# MRP prompt: + +```txt +System prompt: +You are a math expert tasked with solving problems. When solving a problem, your first task is to provide a high-level solution plan as an instruction. Then you need to follow the provided instructions precisely, showing all reasoning and intermediate steps. Finally, you must present the final answer within boxed}. +User prompt: +Here is the question: {Question} +``` + +# MAMRP prompt: + +high-level agent: + +```txt +System prompt: +You are a math expert specialized in solving mathematical problems, you need to teach a weaker agent with minimal capability in math how to solve a problem step-by-step. +Your task is to provide a high-level solution plan for the given problem, in order to guide a low-level math solving agent to solve the problem. +You can not directly answer the question. You'll be punished if you include any answer in your response. +You need to first think deeply in mind and output your final instruction. +User prompt: +Here is the question: +{Question} +``` + +low-level agent: + +```txt +System prompt: +You are a math expert tasked with solving problems step by step. Follow the provided instructions precisely, showing all reasoning and intermediate steps. Present the final answer within \boxed{}/. User prompt: Here is the question and instructions: [Question] {Question} [End of Question] [Provided Instruction] {instruction} [End of Instruction] +``` + +# G.1.3 Prompts for LLM-as-a-Judge problems + +We adopt the prompts from Saha et al. [2025a]. + +# VRP prompt: + +```txt +System prompt: +Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. You should choose the assistant that follows the user's instructions and answers the user's question better. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of their responses. Begin your evaluation by comparing the two responses and provide a short explanation. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision.. +Do not allow the length of the responses to influence your evaluation.. +Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: "[A]" if assistant A is better, "[B]" if assistant B is better.. +User prompt: +[User Question] {instruction} [End of User Question] [The Start of Assistant A's Answer] {response_A} [The End of Assistant A's Answer] [The Start of Assistant B's Answer] {response_B} [The End of Assistant B's Answer] +``` + +# MRP prompt: + +# System prompt: + +Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. You should choose the assistant that follows the user's instructions and answers the user's question better. First of your task is to build an evaluation plan that can then be executed to assess the response quality. Whenever appropriate, you can choose to also include a step-by-step reference answer as part of the evaluation plan. Enclose your evaluation plan between the tags "[Start of Evaluation Plan]" and "[End of Evaluation Plan)". After that, please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. You should choose the assistant that follows the user's instructions and answers the user's question better. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of their responses. Begin your evaluation by comparing the two responses and provide a short explanation. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: "[A]" if assistant A is better, "[B]" if assistant B is better. User prompt: [User Question] {instruction} [End of User Question] [The Start of Assistant A's Answer] {response_A} [The End of Assistant A's Answer] [The Start of Assistant B's Answer] {response_B} [The End of Assistant B's Answer] + +# MAMRP prompt: high-level agent: + +# System prompt: + +We want to evaluate the quality of the responses provided by AI assistants to the user question displayed below. For that, your task is to help us build an evaluation plan that can then be executed to assess the response quality. Whenever appropriate, you can choose to also include a step-by-step reference answer as part of the evaluation plan. + +# User prompt: + +[User Question] +{Question} +[End of User Question] + +# low-level agent: + +# System prompt: + +Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. Your evaluation should be performed by following the provided evaluation plan step-by-step. Avoid copying the plan when doing the evaluation. + +Please also only stick to the given plan and provide explanation of how the plan is executed to compare the two responses. + +Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. + +Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. + +After providing your evaluation, output your final verdict by strictly following this format: "[A]" if assistant A is better, "[B]" if assistant B is better. + +# User prompt: + +```txt +[User Question] +{instruction} +{End of User Question] +{The Start of Assistant A's Answer} +{response_A} +{The End of Assistant A's Answer} +{The Start of Assistant B's Answer} +{response_B} +{The End of Assistant B's Answer} +{The Start of Evaluation Plan} +{evaluation計劃} +{The End of Evaluation Plan} +``` + +# G.2 Multi-turn ReMA prompts + +# G.2.1 SFT data collection of multi-turn MAMRP + +# System prompt: + +You are classifying reasoning process data into two types of thinking. You will be given a question-answer pair from a reasoning dataset. Your task is to split all words into two parts. These words are crucial for analyzing reasoning patterns, so do not skip any details. + +- **Meta-Thinking Agent (MTA):** Responsible for high-level thought processes. This includes planning, evaluating steps, expressing uncertainty, making observations, or setting goals. Avoid detailed calculations. The content should be enclosed in `` and ``. + +- $\star \star$ Reasoning Agent (RA): $\star \star$ Responsible for detailed problem-solving steps, such as calculations, logical deductions, or breaking down a problem into subproblems. The content should be enclosed in `` and ``. + +\*\*Rules to follow: \*\* + +1. **Do not assign large chunks of text to a single type of thinking.** The reasoning process consists of small, nonlinear thinking steps, so alternate appropriately between Meta-Thinking and Reasoning steps. + +2. **Keep the words from the original solution unmodified whenever possible.** Words like "Wait," "Hmm," "But," etc., typically indicate Meta-Thinking and should be preserved. + +3. $\star \star$ When finalizing the answer: $\star \star$ + +- The \*\*Meta-Thinking Agent (MTA) \*\* must explicitly confirm the answer before completion and output '[FINISH]'. + +- The $\star \star$ Reasoning Agent (RA) $\star \star$ should then provide the final answer in the correct format. + +4. **Do not skip any reasoning steps, even if they seem redundant, incorrect or irrelevant** + +5. **Do not modify or remove any part of the original reasoning process**, even if it seems redundant or repetitive. The goal is to **preserve the exact flow of thought** as it naturally occurs. + +6. **Retain all expressions such as "Wait," "Hmm," "But wait," etc., exactly as they appear. These indicate important cognitive processes and should not be skipped or altered.** + +Here are examples for you: [Examples] ... + +# User prompt: + +[Begin of Question] +{question} +[End of Question] +[Begin of Solution] +{solution} +[End of Solution] + +# G.2.2 Prompt for math problems + +# Meta-Thinking Agent (MTA): + +```txt +System prompt: +You are a meta-think agent that represents human high-level think process, when solving a question, you will have a discussion with human, each time you think about what to do next: e.g. +- Exploring multiple angles and approaches +- Breaking down the solution into clear steps +- Continuously reflecting on intermediate results honestly and adapt your strategy as you progress +- Backtracking when necessary +- Requesting exploration of multiple solutions individually +- Finally confirm the answer with the tag [FINISH] +User prompt: +{question} +``` + +# Reasoning Agent (RA): + +```txt +System prompt: Please reason step by step follow the given instruction, when asked to finalize your answer, put your answer within \boxed{} User prompt: {question} {instruction} +``` \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09501/images/0237a522e20431ebc766f44dcd6ea2566b93869ecb82778c21cee24038e74754.jpg b/data/2025/2503_09xxx/2503.09501/images/0237a522e20431ebc766f44dcd6ea2566b93869ecb82778c21cee24038e74754.jpg new file mode 100644 index 0000000000000000000000000000000000000000..60c8f451a40f3064d85d401a7b58e336c4ee846a --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/0237a522e20431ebc766f44dcd6ea2566b93869ecb82778c21cee24038e74754.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e75499d4cce715ae07852d1345dfa28d06b6c3eb011e82cb21b7ac980af3a7b4 +size 1841 diff --git a/data/2025/2503_09xxx/2503.09501/images/032a64c3b322e4687ee8458b7f6835cd1e8b820c05079fa11e7c0ad3ebd2c1ec.jpg b/data/2025/2503_09xxx/2503.09501/images/032a64c3b322e4687ee8458b7f6835cd1e8b820c05079fa11e7c0ad3ebd2c1ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eecff28324edce152fbc5af466be62d2a0e38d23 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/032a64c3b322e4687ee8458b7f6835cd1e8b820c05079fa11e7c0ad3ebd2c1ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbd019f6f21d16b93d47d82c9166f97fbc6dfeb701cbdad655e2c9310710ad18 +size 5462 diff --git a/data/2025/2503_09xxx/2503.09501/images/032ea026c484b25fb0540e17ead1ad673917359410de718a424734cd30d558a2.jpg b/data/2025/2503_09xxx/2503.09501/images/032ea026c484b25fb0540e17ead1ad673917359410de718a424734cd30d558a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..adbcec65f5ccc563ca807cd9f4a76015f8154da1 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/032ea026c484b25fb0540e17ead1ad673917359410de718a424734cd30d558a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d866489d738befec9bd289291bcc1e3c95828906901e0b87fb575bde8f68a44f +size 186730 diff --git a/data/2025/2503_09xxx/2503.09501/images/03990842fdfe887957593c9afc250ca8d106f43dd9a3ebffb41479bc04e5ab4b.jpg b/data/2025/2503_09xxx/2503.09501/images/03990842fdfe887957593c9afc250ca8d106f43dd9a3ebffb41479bc04e5ab4b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75f159fefe581fdd22c34833834d0a42dfdb6ca7 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/03990842fdfe887957593c9afc250ca8d106f43dd9a3ebffb41479bc04e5ab4b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:275048389aa7880e98f351b635a33768f87c289904e6cd22d044fc39f5540842 +size 2504 diff --git a/data/2025/2503_09xxx/2503.09501/images/05170e814c4da8f834eb3b09fcc2a7641bdce982966a108ae9e495f6791f6dd7.jpg b/data/2025/2503_09xxx/2503.09501/images/05170e814c4da8f834eb3b09fcc2a7641bdce982966a108ae9e495f6791f6dd7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d60e016098a68b2c5b325ce8444e60dbc90b5444 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/05170e814c4da8f834eb3b09fcc2a7641bdce982966a108ae9e495f6791f6dd7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c11c31c3c045aafc91c385aad4e45819220c78592a7aecf0bef4573cf6f10a7 +size 1518 diff --git a/data/2025/2503_09xxx/2503.09501/images/05dc13f8590fd0e48920b97ac09cc7f97085627fc9eb5b18c41f8b40a2814e97.jpg b/data/2025/2503_09xxx/2503.09501/images/05dc13f8590fd0e48920b97ac09cc7f97085627fc9eb5b18c41f8b40a2814e97.jpg new file mode 100644 index 0000000000000000000000000000000000000000..83d6da793ef6fc50f56cd77b9d96501a7c8919ac --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/05dc13f8590fd0e48920b97ac09cc7f97085627fc9eb5b18c41f8b40a2814e97.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a228e806c299b5f18f4ca763f839437d7dad731e9ce011508f7fcf23f3fd94dd +size 5787 diff --git a/data/2025/2503_09xxx/2503.09501/images/07fc65b6c33535685fdd786bf88fd2c3719bf9a2836ea2cfdd87b86c9fbb436d.jpg b/data/2025/2503_09xxx/2503.09501/images/07fc65b6c33535685fdd786bf88fd2c3719bf9a2836ea2cfdd87b86c9fbb436d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7368bf95db50af3068ae968258156a8a6102ecc2 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/07fc65b6c33535685fdd786bf88fd2c3719bf9a2836ea2cfdd87b86c9fbb436d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:273ce2126091aa9278d563b29d0d3ef566860631484c4e9204eb73928d5bab06 +size 956 diff --git a/data/2025/2503_09xxx/2503.09501/images/0848d561d3813403322d9efeb80547bb46375f51bc273a205eae3fdb7377b0aa.jpg b/data/2025/2503_09xxx/2503.09501/images/0848d561d3813403322d9efeb80547bb46375f51bc273a205eae3fdb7377b0aa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..baca6e908bf9b25758c84cedd255a33dfbaa1c0f --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/0848d561d3813403322d9efeb80547bb46375f51bc273a205eae3fdb7377b0aa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6100a1cad368069031351b095c9d71793497307ec10419ce21e0711ee2b8a16 +size 4277 diff --git a/data/2025/2503_09xxx/2503.09501/images/0a12a930e142fe2cdce1144a9d1bcf93a65808b7f66fd19e01bc8a6c088ffc04.jpg b/data/2025/2503_09xxx/2503.09501/images/0a12a930e142fe2cdce1144a9d1bcf93a65808b7f66fd19e01bc8a6c088ffc04.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2710d7b8a29cf99702c65bcf7b3ab84ca1566ff5 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/0a12a930e142fe2cdce1144a9d1bcf93a65808b7f66fd19e01bc8a6c088ffc04.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d751b6d2113aabe6b4815dfe33abdc0a3ea5da41a328965b1edf726615a3293 +size 37759 diff --git a/data/2025/2503_09xxx/2503.09501/images/13ee9b755412f0ddda27dfb3a4338a0ead1c5a78cddf2ef644b3f886d89c5815.jpg b/data/2025/2503_09xxx/2503.09501/images/13ee9b755412f0ddda27dfb3a4338a0ead1c5a78cddf2ef644b3f886d89c5815.jpg new file mode 100644 index 0000000000000000000000000000000000000000..808e5022439b50f56fa1fddf9ac0e4583000f788 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/13ee9b755412f0ddda27dfb3a4338a0ead1c5a78cddf2ef644b3f886d89c5815.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b36f7c0ba3ad6fdb527163f97bba984e60c50b1ca5415ac51aaba3b42c607ef +size 76258 diff --git a/data/2025/2503_09xxx/2503.09501/images/14f1085c1bd170ec01d7d192516fafed37c24536c8291207e594c8f5bd827dd5.jpg b/data/2025/2503_09xxx/2503.09501/images/14f1085c1bd170ec01d7d192516fafed37c24536c8291207e594c8f5bd827dd5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..96aa13a32a0d9eb7afd96c0d3810e3301513baa2 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/14f1085c1bd170ec01d7d192516fafed37c24536c8291207e594c8f5bd827dd5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e627f027f8a6f2e438edc7a83062a4e2f4db0ad0b6048867f85c26187ed586f0 +size 963 diff --git a/data/2025/2503_09xxx/2503.09501/images/15bca33a252a7d7747e1b41007c900efc23110c386a855fa01292b21327669cf.jpg b/data/2025/2503_09xxx/2503.09501/images/15bca33a252a7d7747e1b41007c900efc23110c386a855fa01292b21327669cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8260e768044bc8565cb90dcbb983494a60f3bdb8 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/15bca33a252a7d7747e1b41007c900efc23110c386a855fa01292b21327669cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c3e411f1aca0614371d5524ee0ddd156a8f2144326168377f342cb500cfc2eb +size 1785 diff --git a/data/2025/2503_09xxx/2503.09501/images/19d913b0dca111c638beefd731775a3856eed9a49ce53f07143ec8a8447cb2c4.jpg b/data/2025/2503_09xxx/2503.09501/images/19d913b0dca111c638beefd731775a3856eed9a49ce53f07143ec8a8447cb2c4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce4bd7289b5a7d26c99a05b37094128c42bb1d0f --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/19d913b0dca111c638beefd731775a3856eed9a49ce53f07143ec8a8447cb2c4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b0f7e17b205739cf1ef3d05955033c142cecc84894f1b94435c6dc0cc7ea551 +size 7795 diff --git a/data/2025/2503_09xxx/2503.09501/images/1a3854038f19d195ed185616c3f2b9e4fa543e0ca650c9a6ea7d1989a49d00de.jpg b/data/2025/2503_09xxx/2503.09501/images/1a3854038f19d195ed185616c3f2b9e4fa543e0ca650c9a6ea7d1989a49d00de.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4956ddf75a6edf8c6960c163a8f2a45d860bef86 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/1a3854038f19d195ed185616c3f2b9e4fa543e0ca650c9a6ea7d1989a49d00de.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9eaa61c5094c0df666d5e66ab75c06e9c73d43b7088c0ae13b0dd942c1d80ecb +size 1987 diff --git a/data/2025/2503_09xxx/2503.09501/images/1bc2c9d3ff1fdd6b7268585f52555fec4647ff246738072f074f516e6a129d24.jpg b/data/2025/2503_09xxx/2503.09501/images/1bc2c9d3ff1fdd6b7268585f52555fec4647ff246738072f074f516e6a129d24.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a3a5db2b4a4c03995773358edb330d2880b9a068 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/1bc2c9d3ff1fdd6b7268585f52555fec4647ff246738072f074f516e6a129d24.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edddaa2a5c2db357107a3be149e670cea6180ec1a2ef8ebf56ee8f67365aa642 +size 14968 diff --git a/data/2025/2503_09xxx/2503.09501/images/1c302497f77a8108527d48c628148c9c17719bb8a4d6440b23231a5f2df60a61.jpg b/data/2025/2503_09xxx/2503.09501/images/1c302497f77a8108527d48c628148c9c17719bb8a4d6440b23231a5f2df60a61.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4831c78aa9526f1ef9a70012700252b0c0216e29 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/1c302497f77a8108527d48c628148c9c17719bb8a4d6440b23231a5f2df60a61.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9587700e17d63b60e9389509699405a261c60b2a1ab03fda40c0aae9c8a1fc8b +size 1431 diff --git a/data/2025/2503_09xxx/2503.09501/images/1d56081a4c756eb34fc910f55c821d9ab5537407188b813453634d13845d2f6a.jpg b/data/2025/2503_09xxx/2503.09501/images/1d56081a4c756eb34fc910f55c821d9ab5537407188b813453634d13845d2f6a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6cf926e69643ec82b50c72016952b1334fe69b68 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/1d56081a4c756eb34fc910f55c821d9ab5537407188b813453634d13845d2f6a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b92bc6ad61159d819b04b7c0cb6c64f364278a16e058e33691fba00f02c87b4 +size 12111 diff --git a/data/2025/2503_09xxx/2503.09501/images/28eba59bcd1627af7ee1dbdd7fe9a9861b579d0aebd07602fe6fed78017ca843.jpg b/data/2025/2503_09xxx/2503.09501/images/28eba59bcd1627af7ee1dbdd7fe9a9861b579d0aebd07602fe6fed78017ca843.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a0820608296a2f858c9c936f405a0fb8f2ce20d --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/28eba59bcd1627af7ee1dbdd7fe9a9861b579d0aebd07602fe6fed78017ca843.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:018a95305ab4552ec421216e10e550de18f9fe4fa4bbdf3a214084b6d813d6b8 +size 2186 diff --git a/data/2025/2503_09xxx/2503.09501/images/2a12f2772fbb774a2c76affff4a8b8ba81eda3fe2d809a080d5e4968463bed50.jpg b/data/2025/2503_09xxx/2503.09501/images/2a12f2772fbb774a2c76affff4a8b8ba81eda3fe2d809a080d5e4968463bed50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..03342775b8258bee7e5fec4ba7bfd9d4d1dcc31b --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/2a12f2772fbb774a2c76affff4a8b8ba81eda3fe2d809a080d5e4968463bed50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:036a103eacfa678ac1eee4917e445a3f9de06ea941513c65e438074b3a9584aa +size 5377 diff --git a/data/2025/2503_09xxx/2503.09501/images/30a64c5f505bf97092d0e646564746319a53da606cabba7e7be05eb736d380ee.jpg b/data/2025/2503_09xxx/2503.09501/images/30a64c5f505bf97092d0e646564746319a53da606cabba7e7be05eb736d380ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..25a8a2764d897852c707be4f6c2f3981841b83f9 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/30a64c5f505bf97092d0e646564746319a53da606cabba7e7be05eb736d380ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73767a55d8d0ea58e03301d1d846d84b3ff3663c35a02f6f092845cbb2a40ecd +size 4282 diff --git a/data/2025/2503_09xxx/2503.09501/images/315cafbb1d2055dded17eaabf88749be03cfea772911a86fa7b194d2357e1c64.jpg b/data/2025/2503_09xxx/2503.09501/images/315cafbb1d2055dded17eaabf88749be03cfea772911a86fa7b194d2357e1c64.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0071bcf0da159b53c6e3c3ed2265cb3e0ae16307 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/315cafbb1d2055dded17eaabf88749be03cfea772911a86fa7b194d2357e1c64.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8a50789134f13e0ff75b303e116417002329d0a5c593013927e7f7443c0aaf5 +size 51755 diff --git a/data/2025/2503_09xxx/2503.09501/images/36e0a413692113a7e783656a749d6c396724839d2dbe2992be4b5dfc8d667660.jpg b/data/2025/2503_09xxx/2503.09501/images/36e0a413692113a7e783656a749d6c396724839d2dbe2992be4b5dfc8d667660.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4f684674493aaedecf1887a1d59f830df645163d --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/36e0a413692113a7e783656a749d6c396724839d2dbe2992be4b5dfc8d667660.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dadef5e181a713873bd4e2d95561e55fbd0452689085f38853b5f4a9c4925b4 +size 1074 diff --git a/data/2025/2503_09xxx/2503.09501/images/37ed86a4d76064b8e7dc589771be62484945d0a0fbaf36c2bdeedac73830355d.jpg b/data/2025/2503_09xxx/2503.09501/images/37ed86a4d76064b8e7dc589771be62484945d0a0fbaf36c2bdeedac73830355d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..27999d007c0e0e8a551d49b7371544eb4b35a2ed --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/37ed86a4d76064b8e7dc589771be62484945d0a0fbaf36c2bdeedac73830355d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b1ae0078bd159b2b01b3bd2de4026ecf825f0ca0293af73115e78358c8dca5d +size 67194 diff --git a/data/2025/2503_09xxx/2503.09501/images/39510965f995cb6f30887a0e480073cade592bfa898215baa6d1f2f71e71f3c1.jpg b/data/2025/2503_09xxx/2503.09501/images/39510965f995cb6f30887a0e480073cade592bfa898215baa6d1f2f71e71f3c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e538a9b2913f760ab61accdcfe86c7716f0f318a --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/39510965f995cb6f30887a0e480073cade592bfa898215baa6d1f2f71e71f3c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5414e260f8f18cc6b7e394d8826c4f7848be91945d478023874b23ffa84c35f4 +size 8963 diff --git a/data/2025/2503_09xxx/2503.09501/images/3a07830c095bd1b0e7d492fba662d270532fb4263b681ea523bd6daaeb0902da.jpg b/data/2025/2503_09xxx/2503.09501/images/3a07830c095bd1b0e7d492fba662d270532fb4263b681ea523bd6daaeb0902da.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a9a24bf01efd7c9c9c874a08ad5cbb66681bd7e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/3a07830c095bd1b0e7d492fba662d270532fb4263b681ea523bd6daaeb0902da.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fb96962549a35e336fdde3ca35795cd0d8f5ca7e029f0900c1373526a9d8202 +size 113778 diff --git a/data/2025/2503_09xxx/2503.09501/images/3c25888f2b698f2efd7b4ef136864d3430e01e3a7ee444879308ad71c36e297e.jpg b/data/2025/2503_09xxx/2503.09501/images/3c25888f2b698f2efd7b4ef136864d3430e01e3a7ee444879308ad71c36e297e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38b8a8504850c51ace7cd589f5d62a9423fc5d05 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/3c25888f2b698f2efd7b4ef136864d3430e01e3a7ee444879308ad71c36e297e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bd865af949f72619637f4d25201c563ef2593b87cf178718490680a10c439a3 +size 1792 diff --git a/data/2025/2503_09xxx/2503.09501/images/3e06b9d59ab4e557586a3566be6437c22310f94ff0f6d0828c903eee7d02ad79.jpg b/data/2025/2503_09xxx/2503.09501/images/3e06b9d59ab4e557586a3566be6437c22310f94ff0f6d0828c903eee7d02ad79.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea9326cd5a05d7501f693901274703c2726a2589 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/3e06b9d59ab4e557586a3566be6437c22310f94ff0f6d0828c903eee7d02ad79.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5297a8c4d13f93e73ac6e570660eebdcbfd113d01515fbac389b4caa40ffc34f +size 1155 diff --git a/data/2025/2503_09xxx/2503.09501/images/3fd02be075315fbd643707dc3d3d5497904efd1829a267bf356bdc98e9fbe27f.jpg b/data/2025/2503_09xxx/2503.09501/images/3fd02be075315fbd643707dc3d3d5497904efd1829a267bf356bdc98e9fbe27f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7c2d9f151785f751cb9115ae6e91b04cf378f76a --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/3fd02be075315fbd643707dc3d3d5497904efd1829a267bf356bdc98e9fbe27f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:977a57d178c63f1fb32b37770898e61520597f7628e50dc64778859fa97d66d1 +size 1305 diff --git a/data/2025/2503_09xxx/2503.09501/images/4c2a5c3a0dfb0eb00cd8928e39973a9df874ba2cf7234966644df2fb2cd4cdb1.jpg b/data/2025/2503_09xxx/2503.09501/images/4c2a5c3a0dfb0eb00cd8928e39973a9df874ba2cf7234966644df2fb2cd4cdb1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3c4c2db5f7f8586035d16238a373c5357bf76b08 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/4c2a5c3a0dfb0eb00cd8928e39973a9df874ba2cf7234966644df2fb2cd4cdb1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c26c3dd85bbe3a182d5ccc39e868c46758253432411476fcda27cdfc7c616e9 +size 1160 diff --git a/data/2025/2503_09xxx/2503.09501/images/4d01cf958b1cef0ac5f73fe67da58dfb9f7784a3720c1d5bf7c308c749b09a32.jpg b/data/2025/2503_09xxx/2503.09501/images/4d01cf958b1cef0ac5f73fe67da58dfb9f7784a3720c1d5bf7c308c749b09a32.jpg new file mode 100644 index 0000000000000000000000000000000000000000..33656cc84ecd98696fa9c93288d158f324ef961e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/4d01cf958b1cef0ac5f73fe67da58dfb9f7784a3720c1d5bf7c308c749b09a32.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32b86ae2dfef79fb5c9f4916dd2dfdabd6ba9e434f732e5587c21e1bb1ef52f2 +size 2211 diff --git a/data/2025/2503_09xxx/2503.09501/images/4e685cd1fc6e7e386b1f144221571ae899897bfc8d69063160055ba364dfd977.jpg b/data/2025/2503_09xxx/2503.09501/images/4e685cd1fc6e7e386b1f144221571ae899897bfc8d69063160055ba364dfd977.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0a6afc30607ff5865cda9706e6d3452ce209008d --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/4e685cd1fc6e7e386b1f144221571ae899897bfc8d69063160055ba364dfd977.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4efe7e5ec03e73b4295e32f8017f621b76d5b4861ee06f84fee76cf430023351 +size 1158 diff --git a/data/2025/2503_09xxx/2503.09501/images/50ccdd806d3c2fbb81d2905bb78d2552273c33edff77da4aad98cefd9ca79a0c.jpg b/data/2025/2503_09xxx/2503.09501/images/50ccdd806d3c2fbb81d2905bb78d2552273c33edff77da4aad98cefd9ca79a0c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fadb172a704576925020aa5fd834bde0e617afbf --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/50ccdd806d3c2fbb81d2905bb78d2552273c33edff77da4aad98cefd9ca79a0c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74b1737ee00fe643c5b0a61026b17c6a64b799ea6bcc326dc4019d18ad8bdc0d +size 1093 diff --git a/data/2025/2503_09xxx/2503.09501/images/5117513c75d33b0d0f28a22b36d95dc4e9851d988c82b7161379bad840154e56.jpg b/data/2025/2503_09xxx/2503.09501/images/5117513c75d33b0d0f28a22b36d95dc4e9851d988c82b7161379bad840154e56.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3c4331e741d0cf1fc3f098332eef37ae8f20cc42 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/5117513c75d33b0d0f28a22b36d95dc4e9851d988c82b7161379bad840154e56.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b67d3fab2649a4084b41b887e29f04ab7bc05f132a6659507e5b62eab04669a +size 6406 diff --git a/data/2025/2503_09xxx/2503.09501/images/528333c7c549e5b59e02481e7570b1ab4ebdcf2fe735070193c07c2a4fc150ac.jpg b/data/2025/2503_09xxx/2503.09501/images/528333c7c549e5b59e02481e7570b1ab4ebdcf2fe735070193c07c2a4fc150ac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94929a7d207385cfe15b3c0a8fd33bffe759c053 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/528333c7c549e5b59e02481e7570b1ab4ebdcf2fe735070193c07c2a4fc150ac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe131a20739b80aa616f3400b943d7025d93e8a1fb5ef6967c097a72559cda3b +size 1221 diff --git a/data/2025/2503_09xxx/2503.09501/images/569e05528f28d9bea18f232e9a85e68383a72aa9deef4c12e625df47568f6c0d.jpg b/data/2025/2503_09xxx/2503.09501/images/569e05528f28d9bea18f232e9a85e68383a72aa9deef4c12e625df47568f6c0d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4bfa1c039848136dc9ee864ef72b0d9ad9419e67 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/569e05528f28d9bea18f232e9a85e68383a72aa9deef4c12e625df47568f6c0d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:639f32f9965002907c638e847ce4ac825a90842b37deb0101b27ca1a30833056 +size 4189 diff --git a/data/2025/2503_09xxx/2503.09501/images/622da8514c8776b09adf1944ffa4c54f42ccb3c0dc87f16462c8dd56fc63f652.jpg b/data/2025/2503_09xxx/2503.09501/images/622da8514c8776b09adf1944ffa4c54f42ccb3c0dc87f16462c8dd56fc63f652.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ab70d8d7dbc5ca3560337ead7d885b9b3163c460 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/622da8514c8776b09adf1944ffa4c54f42ccb3c0dc87f16462c8dd56fc63f652.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f9abbd788c13d9295ca638e1df7bc9825222153d4ce8b22dc90185fda2675c5 +size 1979 diff --git a/data/2025/2503_09xxx/2503.09501/images/6525765c83f841fa74203734033cb3929d8d0bde8888d550d515f348b36a5f29.jpg b/data/2025/2503_09xxx/2503.09501/images/6525765c83f841fa74203734033cb3929d8d0bde8888d550d515f348b36a5f29.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b09dc2e47d5d99816263212630c001b881886fb --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/6525765c83f841fa74203734033cb3929d8d0bde8888d550d515f348b36a5f29.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7797e1c6c780ffa08220c3f44c5b6152b07e4f4b2c184c58daa6837c8c4c34a4 +size 1149 diff --git a/data/2025/2503_09xxx/2503.09501/images/6aad5c8b5675d5fc1e3eb597e21d99148f5c18d7820239246afb5d3c07134617.jpg b/data/2025/2503_09xxx/2503.09501/images/6aad5c8b5675d5fc1e3eb597e21d99148f5c18d7820239246afb5d3c07134617.jpg new file mode 100644 index 0000000000000000000000000000000000000000..682bfb2db197c97f5df1337ab2df6ce9685795bd --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/6aad5c8b5675d5fc1e3eb597e21d99148f5c18d7820239246afb5d3c07134617.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a3d49fce0df37d0e3f4dbdbbca54459d44bb141b8e1a48eb6583d2bd7718752 +size 8105 diff --git a/data/2025/2503_09xxx/2503.09501/images/6c039a69d6e8cd9522c4b7613deff397a371db9e812a8d0ec40925ac042bba22.jpg b/data/2025/2503_09xxx/2503.09501/images/6c039a69d6e8cd9522c4b7613deff397a371db9e812a8d0ec40925ac042bba22.jpg new file mode 100644 index 0000000000000000000000000000000000000000..faae379204ed12e15c6d28f02f2fb6a833a3cca1 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/6c039a69d6e8cd9522c4b7613deff397a371db9e812a8d0ec40925ac042bba22.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9408947bed7ca95295d94aadec2e04dda0ce82e0c12ddb14fa7d242ae6302481 +size 1486 diff --git a/data/2025/2503_09xxx/2503.09501/images/6cce98a8906c58ccf3aa6cedf13fe8485572fcdb57dd27e9b6cd696eb48335fd.jpg b/data/2025/2503_09xxx/2503.09501/images/6cce98a8906c58ccf3aa6cedf13fe8485572fcdb57dd27e9b6cd696eb48335fd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9653249f42f6ef8c42cc1fd7427b983fccf2acdc --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/6cce98a8906c58ccf3aa6cedf13fe8485572fcdb57dd27e9b6cd696eb48335fd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f67a320f24c683983d2ed7c18b38f232f5efbe72d5e52702a1a59ba50fd391c0 +size 802 diff --git a/data/2025/2503_09xxx/2503.09501/images/710b61fd23f78b9a946f7e235ed8fddd79aad1dd33ac36e38c69d6c2a28224e2.jpg b/data/2025/2503_09xxx/2503.09501/images/710b61fd23f78b9a946f7e235ed8fddd79aad1dd33ac36e38c69d6c2a28224e2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..77190e5f08617adc8a8d8a07c49e8a22cc1de208 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/710b61fd23f78b9a946f7e235ed8fddd79aad1dd33ac36e38c69d6c2a28224e2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b80c060cfc517003fe9cbee6f54d83572dec7c850a703b026b01b9990f848f2 +size 4011 diff --git a/data/2025/2503_09xxx/2503.09501/images/71aca539c79024c7c54c8b500119e08584e1cf23534f6bae0662ad5dc7fbc4aa.jpg b/data/2025/2503_09xxx/2503.09501/images/71aca539c79024c7c54c8b500119e08584e1cf23534f6bae0662ad5dc7fbc4aa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c2a649949056b55215273a77958bd72fdd98f95 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/71aca539c79024c7c54c8b500119e08584e1cf23534f6bae0662ad5dc7fbc4aa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd579a303a5807d6cd2b89dce92a58ae5440778147f720ffb3208e1e12ce17e1 +size 68453 diff --git a/data/2025/2503_09xxx/2503.09501/images/7241552645a9f715abbfd5856f36fe3e9bf0185fcaa8355ef1915299ed2a840d.jpg b/data/2025/2503_09xxx/2503.09501/images/7241552645a9f715abbfd5856f36fe3e9bf0185fcaa8355ef1915299ed2a840d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9ab822b5d8845bd841cf53db5532ac7b4266c5a1 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/7241552645a9f715abbfd5856f36fe3e9bf0185fcaa8355ef1915299ed2a840d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4e7c158f9cced835443fc943208bfb5658e21a898f97a6c3541a2e0d2ec5ca4 +size 1952 diff --git a/data/2025/2503_09xxx/2503.09501/images/75d16e4f13aaf643e0a515b5d8370809cade1a3c547cd2b914de320bdf6d4aa3.jpg b/data/2025/2503_09xxx/2503.09501/images/75d16e4f13aaf643e0a515b5d8370809cade1a3c547cd2b914de320bdf6d4aa3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0cd6fe44094f3702c0b93ffc08e582bb613d1f9 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/75d16e4f13aaf643e0a515b5d8370809cade1a3c547cd2b914de320bdf6d4aa3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbb37d336bc2f5138e7a046aa01be1c1deeb0fcc4ec8dd21125d8ccc06546da3 +size 4584 diff --git a/data/2025/2503_09xxx/2503.09501/images/7837d615d6c9a9750b72afac02ea2aa4e31641292385be12d2392f3f35fddae4.jpg b/data/2025/2503_09xxx/2503.09501/images/7837d615d6c9a9750b72afac02ea2aa4e31641292385be12d2392f3f35fddae4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bed81ec090bd3b6fcaf52fbf215d23eab40dc3d6 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/7837d615d6c9a9750b72afac02ea2aa4e31641292385be12d2392f3f35fddae4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53502a8ad6d6375dcb3aeabc40f12e9a5b4734e2ff0e582543f33db3ffabd5e2 +size 3945 diff --git a/data/2025/2503_09xxx/2503.09501/images/7be77f52a452cae631a3999a5c1a37f14186bd400761aa6b2200170fc2818d62.jpg b/data/2025/2503_09xxx/2503.09501/images/7be77f52a452cae631a3999a5c1a37f14186bd400761aa6b2200170fc2818d62.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ed6a1a818dfe98a7aabf9dc62b20abedba34d46 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/7be77f52a452cae631a3999a5c1a37f14186bd400761aa6b2200170fc2818d62.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ffb8eacd3fefbdb43942abb506590c246d82e2ede3c87c76f67de609dc75a78 +size 28060 diff --git a/data/2025/2503_09xxx/2503.09501/images/7cbee802c4e75cd55acfba9c2483cb9c2aae890a4cb8099c03a05a2668bd8ebf.jpg b/data/2025/2503_09xxx/2503.09501/images/7cbee802c4e75cd55acfba9c2483cb9c2aae890a4cb8099c03a05a2668bd8ebf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87b80e5e2e511b3370a5d2cd7c31012f232a91b4 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/7cbee802c4e75cd55acfba9c2483cb9c2aae890a4cb8099c03a05a2668bd8ebf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e84e442a114fc69453d473202a371c8ba6183e359f84831b4682f8c710b4367c +size 2094 diff --git a/data/2025/2503_09xxx/2503.09501/images/7cef31214b1e22f8feceb774c7c2fda0d8822a64668c93c676a445aa329e03c2.jpg b/data/2025/2503_09xxx/2503.09501/images/7cef31214b1e22f8feceb774c7c2fda0d8822a64668c93c676a445aa329e03c2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b90e49de066b3ca385c06bd524022b76fab95ead --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/7cef31214b1e22f8feceb774c7c2fda0d8822a64668c93c676a445aa329e03c2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34ca11df2fbaeb5756c90b3ba0c2d17886fca1c4cce37bbc1f685c4ed2f10bee +size 20942 diff --git a/data/2025/2503_09xxx/2503.09501/images/7f3231499e748935cfc4e1cfa6d56049cf9d1cab539e1879a81522875ddbfa66.jpg b/data/2025/2503_09xxx/2503.09501/images/7f3231499e748935cfc4e1cfa6d56049cf9d1cab539e1879a81522875ddbfa66.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ba7a6c3aceb579c4cd9c39a594eacbf18ecc5773 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/7f3231499e748935cfc4e1cfa6d56049cf9d1cab539e1879a81522875ddbfa66.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3bbd665059f454feb29a2af678acd396d920bc2a9264c40683d69813f527b4a +size 32388 diff --git a/data/2025/2503_09xxx/2503.09501/images/8067de4a34648160295745db52239f10161f855ef4f748f78627686ff344515e.jpg b/data/2025/2503_09xxx/2503.09501/images/8067de4a34648160295745db52239f10161f855ef4f748f78627686ff344515e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bfd07e9dc64eadcfe1c0be50a585829c3212d954 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/8067de4a34648160295745db52239f10161f855ef4f748f78627686ff344515e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59f767da9d929f9d76939aea279fc1380eda058998177da73911b326a1e4100f +size 24120 diff --git a/data/2025/2503_09xxx/2503.09501/images/80fb1f97ec83f17fbd8b560be77fe8de42b9125cf2446d0cdbeb37b3b7666a68.jpg b/data/2025/2503_09xxx/2503.09501/images/80fb1f97ec83f17fbd8b560be77fe8de42b9125cf2446d0cdbeb37b3b7666a68.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb4de79ff05151733cd0bed0538efdfc8c88fbba --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/80fb1f97ec83f17fbd8b560be77fe8de42b9125cf2446d0cdbeb37b3b7666a68.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5604733c5c05f1146bbd23e69952b1d897a75fceb69943f44e1248793e2a84b9 +size 1177 diff --git a/data/2025/2503_09xxx/2503.09501/images/8113cac550a4f08accd48762ff47f3d3be5dc7819212ee3344ef23f9e79fceb8.jpg b/data/2025/2503_09xxx/2503.09501/images/8113cac550a4f08accd48762ff47f3d3be5dc7819212ee3344ef23f9e79fceb8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..da84afcdd7d069a948ed934570736a91cf346399 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/8113cac550a4f08accd48762ff47f3d3be5dc7819212ee3344ef23f9e79fceb8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d723bbbb29cb1224e43ea524ed4d4248b3176780225146454dcb3db4466e1ec0 +size 7573 diff --git a/data/2025/2503_09xxx/2503.09501/images/84b1ab344c27fac96d5118e071408306d75ee69bcc10b08024cab4b8d264d754.jpg b/data/2025/2503_09xxx/2503.09501/images/84b1ab344c27fac96d5118e071408306d75ee69bcc10b08024cab4b8d264d754.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6afac0848f65aa4e54d87239ffb18c9846af2e55 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/84b1ab344c27fac96d5118e071408306d75ee69bcc10b08024cab4b8d264d754.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5884ee2ca2cc59e63f4100bf87fa43b05edb434cd747e59a61102d854ebc0a1b +size 8406 diff --git a/data/2025/2503_09xxx/2503.09501/images/8748dabc4481672874c3f2795f0d6264a961d9e141b9cdee0eb288d7db2dc87c.jpg b/data/2025/2503_09xxx/2503.09501/images/8748dabc4481672874c3f2795f0d6264a961d9e141b9cdee0eb288d7db2dc87c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d2e89342ad499530b156a2c831b3e01dbb2ddb0e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/8748dabc4481672874c3f2795f0d6264a961d9e141b9cdee0eb288d7db2dc87c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c892d60d36e24f4de34ad1fdb30b3b365edbdcb83e51163ae8d59fae88fecdaa +size 4195 diff --git a/data/2025/2503_09xxx/2503.09501/images/913cf3facfbd77150667cd4b4404381ce5bcddd9c76f1a6d393a0c01bfae6aa8.jpg b/data/2025/2503_09xxx/2503.09501/images/913cf3facfbd77150667cd4b4404381ce5bcddd9c76f1a6d393a0c01bfae6aa8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0bc6b6ba3d749d8d171ec4ed00435e6f9f04cfc --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/913cf3facfbd77150667cd4b4404381ce5bcddd9c76f1a6d393a0c01bfae6aa8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:635f0332e92b1b4db6d138e1c4e2f3e184f27212bd5e38bb51cc16875fd30bad +size 5576 diff --git a/data/2025/2503_09xxx/2503.09501/images/91f9b8b3200cd1fa5635daeedf1c6ecf50a917b18a2af5197eda5580d3dc874a.jpg b/data/2025/2503_09xxx/2503.09501/images/91f9b8b3200cd1fa5635daeedf1c6ecf50a917b18a2af5197eda5580d3dc874a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94dcc1ea2617c30dc35e0e911b3ba20c0395e380 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/91f9b8b3200cd1fa5635daeedf1c6ecf50a917b18a2af5197eda5580d3dc874a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e961783ab16d5524167e582bda212bddff227e6cb6bd563680fc51f162165eb1 +size 4454 diff --git a/data/2025/2503_09xxx/2503.09501/images/938abda3f283b1d544ed098377c88573b548599d2d006a47e793f5c697c16537.jpg b/data/2025/2503_09xxx/2503.09501/images/938abda3f283b1d544ed098377c88573b548599d2d006a47e793f5c697c16537.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0a4a42a2205fb4abe2833a3e3e60cae799d0458 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/938abda3f283b1d544ed098377c88573b548599d2d006a47e793f5c697c16537.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91dc255ec20702f6c28dfaf9e5f73dbcdba570ca6801ece2d4401d0ac552b4c9 +size 1102 diff --git a/data/2025/2503_09xxx/2503.09501/images/949357e9e329a27c903f62439c403346a563277fe176cc0143fd18f127825c52.jpg b/data/2025/2503_09xxx/2503.09501/images/949357e9e329a27c903f62439c403346a563277fe176cc0143fd18f127825c52.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68209eb663e2423967b5460f456d35b9cfac4ceb --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/949357e9e329a27c903f62439c403346a563277fe176cc0143fd18f127825c52.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fce1999021061bfcccb320834c8ea6008a8e580114835d774fdc30914b97cb23 +size 7899 diff --git a/data/2025/2503_09xxx/2503.09501/images/96e699afab51d562b257dd125436f136294ebd481918e9f0ef1d39366685d3c4.jpg b/data/2025/2503_09xxx/2503.09501/images/96e699afab51d562b257dd125436f136294ebd481918e9f0ef1d39366685d3c4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bfa3216e3090e6f8cfc4bd3f44f67e70c40c9d48 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/96e699afab51d562b257dd125436f136294ebd481918e9f0ef1d39366685d3c4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db1dd84a76a352c53654d56173681d64ddd8b616d750f72d5a037a11fe88e3f3 +size 3675 diff --git a/data/2025/2503_09xxx/2503.09501/images/9b53fdc6f47f6ed75eda062dd07c79e825f26afc0ac0d0fb1f7fa2587d332132.jpg b/data/2025/2503_09xxx/2503.09501/images/9b53fdc6f47f6ed75eda062dd07c79e825f26afc0ac0d0fb1f7fa2587d332132.jpg new file mode 100644 index 0000000000000000000000000000000000000000..39c1e8fb1bf805f7e0d1465e47edb44c7e4b3e99 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/9b53fdc6f47f6ed75eda062dd07c79e825f26afc0ac0d0fb1f7fa2587d332132.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f8f5f92f3b1cad83120008162d8f2e7097fff716679348ed67fd59dbdd2fcb7 +size 2024 diff --git a/data/2025/2503_09xxx/2503.09501/images/9b990e4ba70996ec4e3add4f2058c1bf66065120ac3089530de9e367f7c5b882.jpg b/data/2025/2503_09xxx/2503.09501/images/9b990e4ba70996ec4e3add4f2058c1bf66065120ac3089530de9e367f7c5b882.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2419ceb528b9deb1132be1e9e748ad009c2d77c8 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/9b990e4ba70996ec4e3add4f2058c1bf66065120ac3089530de9e367f7c5b882.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e8eec8e98910dc8d4a42651ef70782973e8a7f251b98bb941098d6ed69940b0 +size 8747 diff --git a/data/2025/2503_09xxx/2503.09501/images/9e34be703eb23abfa3d50c401d5176ae1c7172f8c5c2449d99876c5e8025bbf6.jpg b/data/2025/2503_09xxx/2503.09501/images/9e34be703eb23abfa3d50c401d5176ae1c7172f8c5c2449d99876c5e8025bbf6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..362b41405f1d6530f7c7853ce5f51e20b4421943 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/9e34be703eb23abfa3d50c401d5176ae1c7172f8c5c2449d99876c5e8025bbf6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f17d931d7d81e06763fb91a505220488361bbf193cfb19ace4a9aa23818a8f8d +size 6930 diff --git a/data/2025/2503_09xxx/2503.09501/images/9f10fb34be2f1c3eccf6de5c2a2b698ca17ec6df1df591b2e8bee7ea6c659fd4.jpg b/data/2025/2503_09xxx/2503.09501/images/9f10fb34be2f1c3eccf6de5c2a2b698ca17ec6df1df591b2e8bee7ea6c659fd4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38f211d5b0b954d77516189e01bbe3c80d7ac7f1 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/9f10fb34be2f1c3eccf6de5c2a2b698ca17ec6df1df591b2e8bee7ea6c659fd4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66a0a9271b59fe9da21ea476e7bb96aa2940b1926a2db085e769347ace4cb833 +size 1096 diff --git a/data/2025/2503_09xxx/2503.09501/images/a5f4f993c4054904cf79c154022c7bf8db9d847e25e2e01b15d142fb8abc2254.jpg b/data/2025/2503_09xxx/2503.09501/images/a5f4f993c4054904cf79c154022c7bf8db9d847e25e2e01b15d142fb8abc2254.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e6a1db2c784ea3d4ff6bc3e7a764c6a8e307489 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/a5f4f993c4054904cf79c154022c7bf8db9d847e25e2e01b15d142fb8abc2254.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b656b65334fac271d05c8a96583c33b51db40b3734bddbf7e6549829ca29fcf8 +size 6071 diff --git a/data/2025/2503_09xxx/2503.09501/images/a63ca3479e328fb21c46992199228dda29325fd84852c51c35f55e058a3aa453.jpg b/data/2025/2503_09xxx/2503.09501/images/a63ca3479e328fb21c46992199228dda29325fd84852c51c35f55e058a3aa453.jpg new file mode 100644 index 0000000000000000000000000000000000000000..14ddf41090e9f020cfa29638b7eb1db739472d3a --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/a63ca3479e328fb21c46992199228dda29325fd84852c51c35f55e058a3aa453.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:252e8c48fe0b8af0505b486dcfc95bb7d5c34132c5713bea013c5a28a88e1748 +size 3704 diff --git a/data/2025/2503_09xxx/2503.09501/images/a645dd71a3993817a902a8e8a4efdcf103b1d6ffd7933658ca0a24594ac96eed.jpg b/data/2025/2503_09xxx/2503.09501/images/a645dd71a3993817a902a8e8a4efdcf103b1d6ffd7933658ca0a24594ac96eed.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6975624c542d2f0ec982eeb8ca22cf13f16df0fd --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/a645dd71a3993817a902a8e8a4efdcf103b1d6ffd7933658ca0a24594ac96eed.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:790f5e6816ed02d1d9b68f62f5b56eeedae1770c5dc302e2415dbd46ec7cb0fe +size 3121 diff --git a/data/2025/2503_09xxx/2503.09501/images/a64ee91f6fec3fd7b197bb471e73dc04d0d0d12e6b2af7b48fca4a932ea681cb.jpg b/data/2025/2503_09xxx/2503.09501/images/a64ee91f6fec3fd7b197bb471e73dc04d0d0d12e6b2af7b48fca4a932ea681cb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..efb6ccdb099c9806d2b70399870c04ab49c247c9 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/a64ee91f6fec3fd7b197bb471e73dc04d0d0d12e6b2af7b48fca4a932ea681cb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2363b70e08c3527292074d767cd357c3b02ab553c68fd74882ecb77c31d97c6c +size 1265 diff --git a/data/2025/2503_09xxx/2503.09501/images/a86aaf6aa38251d41b64cce9905df386381958a7ede62e6faa107cd8fdf8b032.jpg b/data/2025/2503_09xxx/2503.09501/images/a86aaf6aa38251d41b64cce9905df386381958a7ede62e6faa107cd8fdf8b032.jpg new file mode 100644 index 0000000000000000000000000000000000000000..35c2b02d6a62a1e784147dc8f2a702016765f473 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/a86aaf6aa38251d41b64cce9905df386381958a7ede62e6faa107cd8fdf8b032.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:691d704a58a346d2ab216222b445619b559a6c2512fe332c0fb9dd74b08ec598 +size 10589 diff --git a/data/2025/2503_09xxx/2503.09501/images/ab388587275a1e57a8348ee40749023ad5049d876dde96e5d113fa279263566f.jpg b/data/2025/2503_09xxx/2503.09501/images/ab388587275a1e57a8348ee40749023ad5049d876dde96e5d113fa279263566f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fa8c97a67156b929e7de283c0e7a56a6ac92f3da --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/ab388587275a1e57a8348ee40749023ad5049d876dde96e5d113fa279263566f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f567d5c124b3b795862b531e2f3a22020575c6beee74188b41f92f6bcab7a8b +size 2202 diff --git a/data/2025/2503_09xxx/2503.09501/images/abdc48d2d97eab951968fc5353ec9380321ab89f488c157876a9f268dee813a0.jpg b/data/2025/2503_09xxx/2503.09501/images/abdc48d2d97eab951968fc5353ec9380321ab89f488c157876a9f268dee813a0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..037969295201598565e0b5765ecf75948c9cfadc --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/abdc48d2d97eab951968fc5353ec9380321ab89f488c157876a9f268dee813a0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04019904d2bf27fb5cf6b28fe81e88408cbabb956530f6385e83ab82698d9070 +size 5515 diff --git a/data/2025/2503_09xxx/2503.09501/images/b1511f7d2538127a540c6110fe627d96c51df91db3b849ba95d19fedfe10fad6.jpg b/data/2025/2503_09xxx/2503.09501/images/b1511f7d2538127a540c6110fe627d96c51df91db3b849ba95d19fedfe10fad6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb586e07b9684171d20ea3943fede1b802ec9e3b --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/b1511f7d2538127a540c6110fe627d96c51df91db3b849ba95d19fedfe10fad6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d377600c8449b2378b952853e55b4f7918fd3a46ca81fbaa80bb60b0fe1faa48 +size 5590 diff --git a/data/2025/2503_09xxx/2503.09501/images/b18d5455edfb4a76628089dd0bafcca0f64a4b3c8b3c93ede341d38248ca05a0.jpg b/data/2025/2503_09xxx/2503.09501/images/b18d5455edfb4a76628089dd0bafcca0f64a4b3c8b3c93ede341d38248ca05a0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1320d6c34b8b0bfb4e5a27bd613a35c197d80e39 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/b18d5455edfb4a76628089dd0bafcca0f64a4b3c8b3c93ede341d38248ca05a0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:decb65150d6bbb105912def4f3f7acba13fbb2046be96dc767aeb43147b6871d +size 6322 diff --git a/data/2025/2503_09xxx/2503.09501/images/b6835da4e0c1c0b9108bd67a52f6f1b3fa00f0b7df3bbbd73a4d0bfb0e85389d.jpg b/data/2025/2503_09xxx/2503.09501/images/b6835da4e0c1c0b9108bd67a52f6f1b3fa00f0b7df3bbbd73a4d0bfb0e85389d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ad5f9877d146b5f35f456d63b85d4a2886ee0fb --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/b6835da4e0c1c0b9108bd67a52f6f1b3fa00f0b7df3bbbd73a4d0bfb0e85389d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2758c45d90dd1e9266309c7fb92736b6a33c28cec8ffa59a434a2451a031aac8 +size 1923 diff --git a/data/2025/2503_09xxx/2503.09501/images/ba93ff6ca809911552f39438514acafb5a346d56b0ab4f63663e3f4afc26565a.jpg b/data/2025/2503_09xxx/2503.09501/images/ba93ff6ca809911552f39438514acafb5a346d56b0ab4f63663e3f4afc26565a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1fe56371e487369ed65b3fc9e00a5aa986653bf9 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/ba93ff6ca809911552f39438514acafb5a346d56b0ab4f63663e3f4afc26565a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d48c07314b407f03d34c30a3f47dd3bcf67223b873a3b632fca720c897d1e51b +size 955 diff --git a/data/2025/2503_09xxx/2503.09501/images/bb5a0da380958a3094a2173fe72a961587f3d1da6dbe550666ba873b3b27827c.jpg b/data/2025/2503_09xxx/2503.09501/images/bb5a0da380958a3094a2173fe72a961587f3d1da6dbe550666ba873b3b27827c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e36b0d7a19f77d8857c34da29f3ac78566f8a78 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/bb5a0da380958a3094a2173fe72a961587f3d1da6dbe550666ba873b3b27827c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df82b531ba44bf76f265e373076a02db68980c00429dace4973df98e1f5d31b1 +size 12348 diff --git a/data/2025/2503_09xxx/2503.09501/images/bb67bfe8bbe98dfdf746dc7ea8f055c99defa53d3bfef0dab1ba7eb652abfa26.jpg b/data/2025/2503_09xxx/2503.09501/images/bb67bfe8bbe98dfdf746dc7ea8f055c99defa53d3bfef0dab1ba7eb652abfa26.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7083b6c724f5c766497d0e56a5f1349e34c7e14c --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/bb67bfe8bbe98dfdf746dc7ea8f055c99defa53d3bfef0dab1ba7eb652abfa26.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c70f9c07417617a801bba140b72a8addcc9ed7b09e844e41a16a0761791a4579 +size 8946 diff --git a/data/2025/2503_09xxx/2503.09501/images/c1b6f6aaf161ccb2d707eb51858e4b49250221c289f22f7d7fd725b003912a00.jpg b/data/2025/2503_09xxx/2503.09501/images/c1b6f6aaf161ccb2d707eb51858e4b49250221c289f22f7d7fd725b003912a00.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cb82f3a29a0e94207bbd6e46fccbe4b8ebc4584d --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/c1b6f6aaf161ccb2d707eb51858e4b49250221c289f22f7d7fd725b003912a00.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1daaf715c3ebbd11211ddc3a1317debdcc7d1b8496e3b0aea9ddaf8c0dfbdaa6 +size 32868 diff --git a/data/2025/2503_09xxx/2503.09501/images/c2c45f6b75a3ecd7f886ca7735670f089d1dde49eae53b3ba52d5a768506d3ab.jpg b/data/2025/2503_09xxx/2503.09501/images/c2c45f6b75a3ecd7f886ca7735670f089d1dde49eae53b3ba52d5a768506d3ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..071ec69a0ededa7f34cd3db5a6c5b6db72c27af8 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/c2c45f6b75a3ecd7f886ca7735670f089d1dde49eae53b3ba52d5a768506d3ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58551289d1462ac9059faccdc2832e87361080b64f7dfbcea2b2772916522a08 +size 4425 diff --git a/data/2025/2503_09xxx/2503.09501/images/c48a4482b269d65f66e76eb545aed761bdd1e37cb7f681b85c17f366bd01e913.jpg b/data/2025/2503_09xxx/2503.09501/images/c48a4482b269d65f66e76eb545aed761bdd1e37cb7f681b85c17f366bd01e913.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f6d71503970a8983787d337993f63485bb72ed2c --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/c48a4482b269d65f66e76eb545aed761bdd1e37cb7f681b85c17f366bd01e913.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3177a6ed3f8c2a4f3bfc7f1f44c191e4711bb8555b394b48dc3a4617aa8caa2 +size 3236 diff --git a/data/2025/2503_09xxx/2503.09501/images/c6e921c75e514affe23ba47738a4334e7ae564f4ed6caa716d917d2741943324.jpg b/data/2025/2503_09xxx/2503.09501/images/c6e921c75e514affe23ba47738a4334e7ae564f4ed6caa716d917d2741943324.jpg new file mode 100644 index 0000000000000000000000000000000000000000..697057caa6a39cfecf9443452e1fd5ed2e37dd06 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/c6e921c75e514affe23ba47738a4334e7ae564f4ed6caa716d917d2741943324.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c093e26627863f5f9ce8a6c8800cd404a933d6f61b7bc7d15cc8f76225fbb5f0 +size 1955 diff --git a/data/2025/2503_09xxx/2503.09501/images/c7f26b9a321108578bb007fb3a44a3b27e7e213fbbdc726184ec19e80c57653f.jpg b/data/2025/2503_09xxx/2503.09501/images/c7f26b9a321108578bb007fb3a44a3b27e7e213fbbdc726184ec19e80c57653f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f5f77399564870e27b28212a5c2e5a7920a762c9 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/c7f26b9a321108578bb007fb3a44a3b27e7e213fbbdc726184ec19e80c57653f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f18305002ef57067c6e03d834c536a53427f7511e040d771f36732dab4882baa +size 5778 diff --git a/data/2025/2503_09xxx/2503.09501/images/ca970fefe3377e52dda01105b9874b3532e444010e03bc6c2807c37e7459b450.jpg b/data/2025/2503_09xxx/2503.09501/images/ca970fefe3377e52dda01105b9874b3532e444010e03bc6c2807c37e7459b450.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3c251ca0906a235735bf57fcfa2b214931b256e5 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/ca970fefe3377e52dda01105b9874b3532e444010e03bc6c2807c37e7459b450.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d51887f57707623bd16df10b666fca5585ee403d79da0fc76006e5a73cfe375d +size 1852 diff --git a/data/2025/2503_09xxx/2503.09501/images/d0e8f5c83c09eb3f74d88a58572429c88fe7ef9810aa4a364a80d0c3f482a10a.jpg b/data/2025/2503_09xxx/2503.09501/images/d0e8f5c83c09eb3f74d88a58572429c88fe7ef9810aa4a364a80d0c3f482a10a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..98119df6c256cbcbf6737f4c631cf541b1346aa0 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/d0e8f5c83c09eb3f74d88a58572429c88fe7ef9810aa4a364a80d0c3f482a10a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a56dcdf5d9c23d3088c2d69d000854f87f0342e277558195f40801445a822f90 +size 1695 diff --git a/data/2025/2503_09xxx/2503.09501/images/d1ac43fc68ceb3c6df31a21c8f515a2c7a6eab60ec12eb55a3eb012f71308f76.jpg b/data/2025/2503_09xxx/2503.09501/images/d1ac43fc68ceb3c6df31a21c8f515a2c7a6eab60ec12eb55a3eb012f71308f76.jpg new file mode 100644 index 0000000000000000000000000000000000000000..855794b127a047c91b12a1a74de1705f042d6c27 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/d1ac43fc68ceb3c6df31a21c8f515a2c7a6eab60ec12eb55a3eb012f71308f76.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b6606f6989fbb7d57305400a4999e762b7f388b0566126cb0dbe21a46ba3c65 +size 1437 diff --git a/data/2025/2503_09xxx/2503.09501/images/d51f2e94513b319fbfd3670c878bfba4fad1b14bbbb2d664cf5d96071695f0f5.jpg b/data/2025/2503_09xxx/2503.09501/images/d51f2e94513b319fbfd3670c878bfba4fad1b14bbbb2d664cf5d96071695f0f5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ab801c8dbc75b8c3a19c1a2ccbdbddf3d49b0c3 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/d51f2e94513b319fbfd3670c878bfba4fad1b14bbbb2d664cf5d96071695f0f5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef889e38ff1984f052deece5e7af002953aba23aa3b17d305d1d0e98ed799b0c +size 4222 diff --git a/data/2025/2503_09xxx/2503.09501/images/ee55be31f7c6103fd057348679e639180187cf989e289196bbf5b13e1e17c731.jpg b/data/2025/2503_09xxx/2503.09501/images/ee55be31f7c6103fd057348679e639180187cf989e289196bbf5b13e1e17c731.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2563f220f9f3438a01e2d7990e03abb61153a549 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/ee55be31f7c6103fd057348679e639180187cf989e289196bbf5b13e1e17c731.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19a194ea291f8994764007dcdb1345f914ec744f0eb383b0908ffa42cba5d62a +size 16338 diff --git a/data/2025/2503_09xxx/2503.09501/images/eff8fc9f0b90286a3dd92044dfef48dcd65fbd0038ac94780d15266e585245c2.jpg b/data/2025/2503_09xxx/2503.09501/images/eff8fc9f0b90286a3dd92044dfef48dcd65fbd0038ac94780d15266e585245c2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..214fb8ce43e6d89fc0adcf866460c1b96cc59ac6 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/eff8fc9f0b90286a3dd92044dfef48dcd65fbd0038ac94780d15266e585245c2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98dc103a4a5808e9c177da937b416c690a5e63ddb067309fdaa5150f8011e8ea +size 1681 diff --git a/data/2025/2503_09xxx/2503.09501/images/f246502d1a54bf77abbf1b84a3d339ae985d445d9515b42b122262754119fa92.jpg b/data/2025/2503_09xxx/2503.09501/images/f246502d1a54bf77abbf1b84a3d339ae985d445d9515b42b122262754119fa92.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0ff9e8121da1cabd011370a5db30d3af994b3f8 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/f246502d1a54bf77abbf1b84a3d339ae985d445d9515b42b122262754119fa92.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87c99c32db7b5328f7ba529b4b0ff766dc45ac88de5d67035d51fc9ed8b0ea18 +size 19268 diff --git a/data/2025/2503_09xxx/2503.09501/images/f5bf38bd49abe8bfdb677b4b0f81847c247b0abe866c361f4b13aeab02a794fc.jpg b/data/2025/2503_09xxx/2503.09501/images/f5bf38bd49abe8bfdb677b4b0f81847c247b0abe866c361f4b13aeab02a794fc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2bd42e58e4c13173637757b7105517b999398ccc --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/f5bf38bd49abe8bfdb677b4b0f81847c247b0abe866c361f4b13aeab02a794fc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:337ac36f3f614a074a7f2c79957868b9acf9716da6966accae8fc99066cd147a +size 1092 diff --git a/data/2025/2503_09xxx/2503.09501/images/f891e947f79753d2849f10bbf5382550512e2e05f5f7ecbdcdde601d0bbb9a59.jpg b/data/2025/2503_09xxx/2503.09501/images/f891e947f79753d2849f10bbf5382550512e2e05f5f7ecbdcdde601d0bbb9a59.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ad149a05f2a39018a9b3f0949d82981d84cfe36e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/f891e947f79753d2849f10bbf5382550512e2e05f5f7ecbdcdde601d0bbb9a59.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c07d7de3cb791a29af5acf310a7bb44878c2b3a7b4f9bb5b30274fe062271585 +size 7843 diff --git a/data/2025/2503_09xxx/2503.09501/images/fd5b40d644792636991a20db79d8c4202f072f2e023bbd6106c186898b906286.jpg b/data/2025/2503_09xxx/2503.09501/images/fd5b40d644792636991a20db79d8c4202f072f2e023bbd6106c186898b906286.jpg new file mode 100644 index 0000000000000000000000000000000000000000..65032801eca47639890d33c7e4522ca9ceb08d4c --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/fd5b40d644792636991a20db79d8c4202f072f2e023bbd6106c186898b906286.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b3c474edb134c8e7df15c8715eb3abb74d80aaf17a824a7bb7b97a4405722bb +size 96245 diff --git a/data/2025/2503_09xxx/2503.09501/images/fdfa8f8cf4f66137f42c7aba7c2e2ef31d21edf331150f3efd0c1f3325151c96.jpg b/data/2025/2503_09xxx/2503.09501/images/fdfa8f8cf4f66137f42c7aba7c2e2ef31d21edf331150f3efd0c1f3325151c96.jpg new file mode 100644 index 0000000000000000000000000000000000000000..925b051afea8dc35bb367ec1416d0ce22a0a0fdc --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/images/fdfa8f8cf4f66137f42c7aba7c2e2ef31d21edf331150f3efd0c1f3325151c96.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbafaeeab9b1a1aa2c3adabb31c07075fec2ad188597aa7955771d481143b07d +size 17918 diff --git a/data/2025/2503_09xxx/2503.09501/layout.json b/data/2025/2503_09xxx/2503.09501/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ab31a71d12105c82f97d6ef3a8719fa3c74ff350 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09501/layout.json @@ -0,0 +1,32066 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 131, + 97, + 479, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 97, + 479, + 137 + ], + "spans": [ + { + "bbox": [ + 131, + 97, + 479, + 137 + ], + "type": "text", + "content": "ReMA: Learning to Meta-think for LLMs with Multi-agent Reinforcement Learning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "spans": [ + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "text", + "content": "Ziyu Wan" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "inline_equation", + "content": "^{1,2*}" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "text", + "content": ", Yunxiang Li" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "inline_equation", + "content": "^{3*}" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "text", + "content": ", Xiaoyu Wen" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "text", + "content": ", Yan Song" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "text", + "content": ", Hanjing Wang" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "text", + "content": ", Linyi Yang" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "text", + "content": ", Mark Schmidt" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "text", + "content": ", Jun Wang" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "text", + "content": ", Weinan Zhang" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "text", + "content": ", Shuyue Hu" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "inline_equation", + "content": "^{2\\ddagger}" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "text", + "content": ", Ying Wen" + }, + { + "bbox": [ + 123, + 177, + 487, + 203 + ], + "type": "inline_equation", + "content": "^{1\\ddagger}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 215, + 213, + 394, + 260 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 241, + 213, + 371, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 213, + 371, + 225 + ], + "spans": [ + { + "bbox": [ + 241, + 213, + 371, + 225 + ], + "type": "text", + "content": "1 Shanghai Jiao Tong University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 215, + 224, + 394, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 215, + 224, + 394, + 236 + ], + "spans": [ + { + "bbox": [ + 215, + 224, + 394, + 236 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 215, + 224, + 394, + 236 + ], + "type": "text", + "content": " Shanghai Artificial Intelligence Laboratory" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 239, + 236, + 371, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 236, + 371, + 247 + ], + "spans": [ + { + "bbox": [ + 239, + 236, + 371, + 247 + ], + "type": "text", + "content": "3 University of British Columbia" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 246, + 248, + 363, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 248, + 363, + 260 + ], + "spans": [ + { + "bbox": [ + 246, + 248, + 363, + 260 + ], + "type": "text", + "content": "4 University College London" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 281, + 288, + 329, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 288, + 329, + 300 + ], + "spans": [ + { + "bbox": [ + 281, + 288, + 329, + 300 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 140, + 312, + 469, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 312, + 469, + 542 + ], + "spans": [ + { + "bbox": [ + 140, + 312, + 469, + 542 + ], + "type": "text", + "content": "Recent research on Reasoning of Large Language Models (LLMs) has sought to further enhance their performance by integrating meta-thinking—enabling models to monitor, evaluate, and control their reasoning processes for more adaptive and effective problem-solving. However, current single-agent work lacks a specialized design for acquiring meta-thinking, resulting in low efficacy. To address this challenge, we introduce Reinforced Meta-thinking Agents (ReMA), a novel framework that leverages Multi-Agent Reinforcement Learning (MARL) to elicit meta-thinking behaviors, encouraging LLMs to think about thinking. ReMA decouples the reasoning process into two hierarchical agents: a high-level meta-thinking agent responsible for generating strategic oversight and plans, and a low-level reasoning agent for detailed executions. Through iterative reinforcement learning with aligned objectives, these agents explore and learn collaboration, leading to improved generalization and robustness. Empirical results from single-turn experiments demonstrate that ReMA outperforms single-agent RL baselines on complex reasoning tasks, including competitive-level mathematical benchmarks and LLM-as-a-Judge benchmarks. Additionally, we further extend ReMA to multi-turn interaction settings, leveraging turn-level ratio and parameter sharing to improve efficiency. Comprehensive ablation studies further illustrate the evolving dynamics of each distinct agent, providing valuable insights into how the meta-thinking reasoning process enhances the reasoning capabilities of LLMs. Our code can be found in https://github.com/ziyuwan/ReMA-public" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 562, + 192, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 562, + 192, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 562, + 192, + 574 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 586, + 506, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 506, + 675 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 506, + 675 + ], + "type": "text", + "content": "Large language models (LLMs) have demonstrated remarkable capabilities in knowledge understanding and complex reasoning tasks [Chowdhery et al., 2023, Achiam et al., 2023, Anil et al., 2023, Dubey et al., 2024]. The paradigm in developing LLM-based reasoning models is shifting from scaling training-time computation towards scaling test-time computation [Snell et al., 2024]. Recent advancements, such as OpenAI-o1 [OpenAI, 2024], Deepseek R1 [DeepSeek-AI et al., 2025], and Gemini 2.0 Flash Thinking [DeepMind, 2025], have demonstrated that allowing LLMs to think before generating answers can significantly enhance performance and lead to the emergence of human-like reasoning patterns. These patterns like \"Wait, hold on.\" or \"Let's break this down.\"" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2503.09501v3 [cs.AI] 27 May 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 681, + 193, + 692 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 681, + 193, + 692 + ], + "spans": [ + { + "bbox": [ + 116, + 681, + 193, + 692 + ], + "type": "text", + "content": "*Equal contribution." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 118, + 692, + 392, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 692, + 392, + 703 + ], + "spans": [ + { + "bbox": [ + 118, + 692, + 392, + 703 + ], + "type": "inline_equation", + "content": "^{\\dagger}" + }, + { + "bbox": [ + 118, + 692, + 392, + 703 + ], + "type": "text", + "content": "Work done during internship at Shanghai Artificial Intelligence Laboratory" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 118, + 703, + 205, + 714 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 703, + 205, + 714 + ], + "spans": [ + { + "bbox": [ + 118, + 703, + 205, + 714 + ], + "type": "text", + "content": "Corresponding Author" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 731, + 192, + 742 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 731, + 192, + 742 + ], + "spans": [ + { + "bbox": [ + 105, + 731, + 192, + 742 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 187, + 74, + 423, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 74, + 423, + 84 + ], + "spans": [ + { + "bbox": [ + 187, + 74, + 423, + 84 + ], + "type": "text", + "content": "Question: " + }, + { + "bbox": [ + 187, + 74, + 423, + 84 + ], + "type": "inline_equation", + "content": "T = 9.5" + }, + { + "bbox": [ + 187, + 74, + 423, + 84 + ], + "type": "text", + "content": ". If " + }, + { + "bbox": [ + 187, + 74, + 423, + 84 + ], + "type": "inline_equation", + "content": "\\log_2 x^T - \\log_4 x = \\log_8 x^k" + }, + { + "bbox": [ + 187, + 74, + 423, + 84 + ], + "type": "text", + "content": " is an identity for all " + }, + { + "bbox": [ + 187, + 74, + 423, + 84 + ], + "type": "inline_equation", + "content": "x > 0" + }, + { + "bbox": [ + 187, + 74, + 423, + 84 + ], + "type": "text", + "content": ", compute the value of " + }, + { + "bbox": [ + 187, + 74, + 423, + 84 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 187, + 74, + 423, + 84 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 109, + 86, + 504, + 235 + ], + "blocks": [ + { + "bbox": [ + 109, + 86, + 504, + 235 + ], + "lines": [ + { + "bbox": [ + 109, + 86, + 504, + 235 + ], + "spans": [ + { + "bbox": [ + 109, + 86, + 504, + 235 + ], + "type": "image", + "image_path": "fd5b40d644792636991a20db79d8c4202f072f2e023bbd6106c186898b906286.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 243, + 504, + 288 + ], + "lines": [ + { + "bbox": [ + 104, + 243, + 504, + 288 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 504, + 288 + ], + "type": "text", + "content": "Figure 1: Left: A construction-based method that fine-tunes LLMs using rejection sampling, searching among combinations of pre-defined templates. Middle: R1-like method learns to mix meta-thinking and detailed reasoning steps during training. Right: Our method ReMA separates the meta-thinking and reasoning steps in a multi-agent system and updated by reinforcement learning." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 301, + 504, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 301, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 301, + 504, + 335 + ], + "type": "text", + "content": "indicate that LLMs can develop a form of meta-thinking abilities that can generalize well to out-of-distribution (OOD) tasks [Xiang et al., 2025]. Meta-thinking, also known as metacognitive skills [Flavell, 1979], is an ability traditionally considered uniquely human [Didolkar et al., 2024]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 340, + 506, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 340, + 506, + 515 + ], + "spans": [ + { + "bbox": [ + 104, + 340, + 506, + 515 + ], + "type": "text", + "content": "To cultivate meta-thinking patterns from LLMs themselves, recent construction-based supervised approaches leverage supervised finetuning on structured reasoning trajectories. Specifically, these methods sampling reasoning trajectories from predefined meta-thinking templates and then use supervised finetuning (SFT) or direct preference optimization (DPO) [Rafailov et al., 2023] to teach LLMs imitate these patterns [Qi et al., 2024, Yue et al., Xi et al., 2024, Yang et al., 2025, Muenighoff et al., 2025, Ye et al., 2025c]. However, such methods lack sufficient flexibility for LLMs to explore suitable meta-thinking patterns. Thus, they often fail to generalize to out-of-distribution (OOD) problems, leading to unstable performance on unseen data [Kirk et al., Chu et al., 2025]. Besides construction-based methods, R1-like single-agent reinforcement learning (SARL) has also been adopted for meta-thinking in reasoning [DeepSeek-AI et al., 2025, Xie et al., 2025]. However, these SARL attempts typically rely on strong foundational models for easier exploration or extensive task-specific fine-tuning for stable training [Xu et al., 2025, Gandhi et al., 2025]. Furthermore, SARL needs to learn meta-thinking and reasoning within a single forward pass, seeking to capture complex reasoning structures purely in an autoregressive manner [Xie et al., 2025]. This can potentially lead to issues such as inefficient exploration as well as reduced readability and early convergence to local optima [DeepSeek-AI et al., 2025, Xiang et al., 2025]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 520, + 506, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 520, + 506, + 640 + ], + "spans": [ + { + "bbox": [ + 104, + 520, + 506, + 640 + ], + "type": "text", + "content": "To address these limitations, we introduce Reinforced Meta-thinking Agents (ReMA), a novel framework that leverages multi-agent reinforcement learning (MARL) to encourage LLMs to think about thinking. Our approach employs a multi-agent system (MAS) composed of a high-level meta-thinking agent, responsible for strategic oversight and instruction generation, and a low-level reasoning agent tasked with detailed executing processes based on provided guidance. We compare the inference process among the construction-based method, R1-like method, and ReMA in Fig. 1. Since MAS distributes the exploration space of SARL into multiple agents, it enables each agent to explore more structurally and efficiently during training. Then we apply reinforcement learning on each agent with aligned reward functions. In this way, ReMA effectively balances the trade-off between generalization capability and exploration efficiency. As a result, they can learn to play the best of their role (either to meta-think or to follow instructions), at the present of the other agent." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 645, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 504, + 723 + ], + "type": "text", + "content": "To our knowledge, we are the first to formally define and optimize a multi-agent meta-thinking reasoning process (MAMRP) through multi-agent reinforcement learning. Our extensive experiments span both math reasoning and LLM-as-a-Judge tasks, where ReMA consistently achieves the highest average performance across three backbone pretrained models. We further extend ReMA to multi-turn interaction settings on math reasoning tasks, implementing turn-level ratio to optimize trajectory returns and stabilize training. Through comprehensive ablation studies, we illustrate the evolving dynamics between agents, revealing unexpected interaction patterns such as role reversals" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": "under different reward settings. These findings provide valuable insights into how meta-thinking processes enhance the reasoning capabilities of LLMs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 111, + 196, + 125 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 196, + 125 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 196, + 125 + ], + "type": "text", + "content": "2 Preliminaries" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 137, + 504, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 137, + 504, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 137, + 504, + 160 + ], + "type": "text", + "content": "In this section, we outline the formulation of the vanilla reasoning process (Sec. 2.1) and the representative training methods (Sec. 2.2) along with the notation used throughout the paper." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 173, + 272, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 173, + 272, + 186 + ], + "spans": [ + { + "bbox": [ + 105, + 173, + 272, + 186 + ], + "type": "text", + "content": "2.1 Vanilla Reasoning Process (VRP)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 194, + 504, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 194, + 504, + 229 + ], + "spans": [ + { + "bbox": [ + 104, + 194, + 504, + 229 + ], + "type": "text", + "content": "The probability of generating a response " + }, + { + "bbox": [ + 104, + 194, + 504, + 229 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 104, + 194, + 504, + 229 + ], + "type": "text", + "content": " equals the product of its stepwise probabilities. Given a model " + }, + { + "bbox": [ + 104, + 194, + 504, + 229 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 194, + 504, + 229 + ], + "type": "text", + "content": " and a prompt " + }, + { + "bbox": [ + 104, + 194, + 504, + 229 + ], + "type": "inline_equation", + "content": "\\mathbf{x} = (x_1, \\ldots, x_N)" + }, + { + "bbox": [ + 104, + 194, + 504, + 229 + ], + "type": "text", + "content": ", the vanilla reasoning process (VRP) autoregressively produces a response " + }, + { + "bbox": [ + 104, + 194, + 504, + 229 + ], + "type": "inline_equation", + "content": "\\mathbf{y} = (y_1, \\ldots, y_L)" + }, + { + "bbox": [ + 104, + 194, + 504, + 229 + ], + "type": "text", + "content": " with" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 161, + 235, + 447, + 269 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 161, + 235, + 447, + 269 + ], + "spans": [ + { + "bbox": [ + 161, + 235, + 447, + 269 + ], + "type": "interline_equation", + "content": "\\pi_ {\\theta} (\\mathbf {y} | \\mathbf {x}) = \\prod_ {l = 1} ^ {L} \\pi_ {\\theta} (y _ {l} | x _ {1}, x _ {2}, \\dots x _ {N}, y _ {1}, \\dots , y _ {l - 1}) = \\prod_ {l = 1} ^ {L} \\pi_ {\\theta} (\\mathbf {y} _ {l} | \\mathbf {x}, \\mathbf {y} _ {< l})", + "image_path": "bb67bfe8bbe98dfdf746dc7ea8f055c99defa53d3bfef0dab1ba7eb652abfa26.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 274, + 504, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 274, + 504, + 299 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 504, + 299 + ], + "type": "text", + "content": "The response usually contains intermediate reasoning steps before arriving at the final answer, this process is also known as chain-of-thought (CoT) [Wei et al., 2022], which can be represented as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 258, + 305, + 504, + 322 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 305, + 504, + 322 + ], + "spans": [ + { + "bbox": [ + 258, + 305, + 504, + 322 + ], + "type": "interline_equation", + "content": "\\mathbf {x} \\xrightarrow {\\text {r e a s o n i n g s t e p s}} \\mathbf {y} \\sim \\mathbf {a}, \\tag {1}", + "image_path": "a63ca3479e328fb21c46992199228dda29325fd84852c51c35f55e058a3aa453.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 327, + 395, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 327, + 395, + 340 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 395, + 340 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 327, + 395, + 340 + ], + "type": "inline_equation", + "content": "\\mathbf{a}" + }, + { + "bbox": [ + 105, + 327, + 395, + 340 + ], + "type": "text", + "content": " is the extracted final answer, which is included in the answer " + }, + { + "bbox": [ + 105, + 327, + 395, + 340 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 105, + 327, + 395, + 340 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 353, + 313, + 366 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 353, + 313, + 366 + ], + "spans": [ + { + "bbox": [ + 105, + 353, + 313, + 366 + ], + "type": "text", + "content": "2.2 Training VRP via Reinforcement Learning" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 373, + 504, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 373, + 504, + 397 + ], + "spans": [ + { + "bbox": [ + 104, + 373, + 504, + 397 + ], + "type": "text", + "content": "RL frames VRP decoding process as a deterministic, token-level Markov Decision process (MDP) [Wang et al., 2024a]. Its objective is" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 228, + 403, + 380, + 418 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 403, + 380, + 418 + ], + "spans": [ + { + "bbox": [ + 228, + 403, + 380, + 418 + ], + "type": "interline_equation", + "content": "\\mathcal {J} (\\theta) = \\mathbb {E} _ {(\\mathbf {x}, \\mathbf {y} ^ {*}) \\sim \\mathcal {D}, \\mathbf {y} \\sim \\pi_ {\\theta}} \\left[ R (\\mathbf {y}, \\mathbf {y} ^ {*}) \\right].", + "image_path": "91f9b8b3200cd1fa5635daeedf1c6ecf50a917b18a2af5197eda5580d3dc874a.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "spans": [ + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "type": "inline_equation", + "content": "R(\\cdot, \\cdot)" + }, + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "type": "text", + "content": " represents a reward function comparing generated answer " + }, + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "type": "text", + "content": " with the golden answer " + }, + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "type": "inline_equation", + "content": "\\mathbf{y}^*" + }, + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "type": "text", + "content": " for any question " + }, + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "type": "text", + "content": " sampled from dataset " + }, + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 104, + 423, + 504, + 447 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 449, + 504, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 449, + 504, + 492 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 504, + 492 + ], + "type": "text", + "content": "To compute the gradient " + }, + { + "bbox": [ + 104, + 449, + 504, + 492 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta}\\mathcal{J}(\\theta)" + }, + { + "bbox": [ + 104, + 449, + 504, + 492 + ], + "type": "text", + "content": ", computationally efficient algorithms GRPO [Shao et al., 2024] and REINFORCE++ [Hu, 2025] are widely adopted. Take GRPO as an example, given a question-answer pair " + }, + { + "bbox": [ + 104, + 449, + 504, + 492 + ], + "type": "inline_equation", + "content": "\\mathbf{x},\\mathbf{y}^*" + }, + { + "bbox": [ + 104, + 449, + 504, + 492 + ], + "type": "text", + "content": " and a group of " + }, + { + "bbox": [ + 104, + 449, + 504, + 492 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 449, + 504, + 492 + ], + "type": "text", + "content": " generated responses " + }, + { + "bbox": [ + 104, + 449, + 504, + 492 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_i" + }, + { + "bbox": [ + 104, + 449, + 504, + 492 + ], + "type": "text", + "content": ", denote " + }, + { + "bbox": [ + 104, + 449, + 504, + 492 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_{i,j}" + }, + { + "bbox": [ + 104, + 449, + 504, + 492 + ], + "type": "text", + "content": " as the " + }, + { + "bbox": [ + 104, + 449, + 504, + 492 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 104, + 449, + 504, + 492 + ], + "type": "text", + "content": "-th token of the " + }, + { + "bbox": [ + 104, + 449, + 504, + 492 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 449, + 504, + 492 + ], + "type": "text", + "content": "-th response, it optimizes the following token-level objective:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 116, + 497, + 504, + 545 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 497, + 504, + 545 + ], + "spans": [ + { + "bbox": [ + 116, + 497, + 504, + 545 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {J} (\\boldsymbol {\\theta}) = \\mathbb {E} _ {(\\mathbf {x}, \\mathbf {y} ^ {*}) \\sim \\mathcal {D}, \\{\\mathbf {y} _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\boldsymbol {\\theta} _ {\\mathrm {o l d}}} (\\cdot | \\mathbf {x})} \\\\ \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{| \\mathbf {y} _ {i} |} \\sum_ {j = 1} ^ {| \\mathbf {y} _ {i} |} \\left(\\min \\left(r _ {i, j} (\\theta) \\hat {A} _ {i, j}, \\operatorname {c l i p} \\left(r _ {i, j} (\\theta), 1 - \\epsilon , 1 + \\epsilon\\right) \\hat {A} _ {i, j}\\right) - \\beta D _ {\\mathrm {K L}} \\left(\\pi_ {\\theta} \\| \\pi_ {\\text {r e f}}\\right)\\right) \\right], \\tag {2} \\\\ \\end{array}", + "image_path": "ee55be31f7c6103fd057348679e639180187cf989e289196bbf5b13e1e17c731.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 552, + 475, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 552, + 475, + 567 + ], + "spans": [ + { + "bbox": [ + 107, + 552, + 475, + 567 + ], + "type": "text", + "content": "where the token-level ratio " + }, + { + "bbox": [ + 107, + 552, + 475, + 567 + ], + "type": "inline_equation", + "content": "r_{i,j}(\\theta)" + }, + { + "bbox": [ + 107, + 552, + 475, + 567 + ], + "type": "text", + "content": " and the group-normalized advantage " + }, + { + "bbox": [ + 107, + 552, + 475, + 567 + ], + "type": "inline_equation", + "content": "\\hat{A}_{i,j}" + }, + { + "bbox": [ + 107, + 552, + 475, + 567 + ], + "type": "text", + "content": " are defined as:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 175, + 572, + 434, + 602 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 572, + 434, + 602 + ], + "spans": [ + { + "bbox": [ + 175, + 572, + 434, + 602 + ], + "type": "interline_equation", + "content": "r _ {i, j} (\\theta) = \\frac {\\pi_ {\\theta} \\left(\\mathbf {y} _ {i , j} \\mid \\mathbf {x} , \\mathbf {y} _ {i , < j}\\right)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} \\left(\\mathbf {y} _ {i , j} \\mid \\mathbf {x} , \\mathbf {y} _ {i , < j}\\right)}, \\hat {A} _ {i, j} = \\frac {R _ {i} - \\operatorname {m e a n} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}{\\operatorname {s t d} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}.", + "image_path": "1d56081a4c756eb34fc910f55c821d9ab5537407188b813453634d13845d2f6a.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 613, + 504, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 613, + 504, + 648 + ], + "spans": [ + { + "bbox": [ + 104, + 613, + 504, + 648 + ], + "type": "text", + "content": "However, RL on base LLMs that haven't been well-aligned may suffer from issues like poor readability and language mixing, preventing researchers from verifying, understanding, and further developing their LLMs. And huge searching space makes efficient learning of meta-thinking daunting." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 663, + 167, + 676 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 663, + 167, + 676 + ], + "spans": [ + { + "bbox": [ + 105, + 663, + 167, + 676 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 689, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 505, + 723 + ], + "type": "text", + "content": "In this section, we present Reinforced Meta-thinking Agents (ReMA), a RL method integrating meta-thinking into the reasoning process of LLM under multi-agent settings (Sec. 3.1), then describe the learning process enabled by MARL of single- and multi-turn LLM setting (Secs. 3.2.1 and 3.2.2)." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 367, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 367, + 84 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 367, + 84 + ], + "type": "text", + "content": "3.1 Deploying Meta-Thinking Reasoning Process for LLMs" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 92, + 504, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 92, + 504, + 159 + ], + "spans": [ + { + "bbox": [ + 104, + 92, + 504, + 159 + ], + "type": "text", + "content": "Beyond VRP (Sec. 2.1), recent studies [Muennighoff et al., 2025, Ye et al., 2025c] have shown that integrating meta-thinking behaviors in reasoning process can largely improve the accuracy of the final answers. By integrating Meta-thinking, ReMA decomposes problem solving into two sequential phases: a meta-thinking phase that plans, monitors, or revises strategy, followed by a reasoning phase that produces the detailed solution. We analyse Meta-thinking Reasoning Process along two orthogonal axes—single- vs. multi-agent and single- vs. multi-turn." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 163, + 504, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 163, + 504, + 186 + ], + "spans": [ + { + "bbox": [ + 104, + 163, + 504, + 186 + ], + "type": "text", + "content": "In a single-agent setting, such a process calls LLM once and generates meta-thinking and the following reasoning autoregressively. We formulate the meta-thinking reasoning process (MRP) below:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 242, + 192, + 504, + 205 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 192, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 242, + 192, + 504, + 205 + ], + "type": "interline_equation", + "content": "\\mathbf {y} \\sim \\pi_ {\\theta} (\\mathbf {y} \\mid \\mathbf {x}, \\mathbf {m}) \\cdot \\pi_ {\\theta} (\\mathbf {m} \\mid \\mathbf {x}), \\tag {3}", + "image_path": "75d16e4f13aaf643e0a515b5d8370809cade1a3c547cd2b914de320bdf6d4aa3.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 209, + 504, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 504, + 230 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 504, + 230 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 209, + 504, + 230 + ], + "type": "inline_equation", + "content": "\\mathbf{m}" + }, + { + "bbox": [ + 104, + 209, + 504, + 230 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 209, + 504, + 230 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 104, + 209, + 504, + 230 + ], + "type": "text", + "content": " are the output of meta-thinking and reasoning respectively. We present the procedure as shown below:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 230, + 231, + 504, + 246 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 231, + 504, + 246 + ], + "spans": [ + { + "bbox": [ + 230, + 231, + 504, + 246 + ], + "type": "interline_equation", + "content": "\\mathbf {x} \\xrightarrow {\\text {m e t a - t h i n k i n g}} \\mathbf {m} \\xrightarrow {\\text {r e a s o n i n g s t e p s}} \\mathbf {y} \\sim \\mathbf {a}. \\tag {4}", + "image_path": "2a12f2772fbb774a2c76affff4a8b8ba81eda3fe2d809a080d5e4968463bed50.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 248, + 504, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 248, + 504, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 248, + 504, + 315 + ], + "type": "text", + "content": "Exploring MRP reasoning through a single-agent approach is often inefficient, as it requires the language model to simultaneously master both meta-thinking and detailed problem-solving within one call. Prior research has demonstrated that activating different model capabilities through specialized agents significantly improves MRP exploration efficiency. To leverage this insight, we decouple meta-thinking and reasoning into two separate LLM agents: a high-level agent dedicated to generating meta-thinking, and a low-level agent focused on executing reasoning steps." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 319, + 504, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 319, + 504, + 363 + ], + "spans": [ + { + "bbox": [ + 104, + 319, + 504, + 363 + ], + "type": "text", + "content": "During a conversation, the high-level and low-level agents (i.e., " + }, + { + "bbox": [ + 104, + 319, + 504, + 363 + ], + "type": "inline_equation", + "content": "\\pi_h" + }, + { + "bbox": [ + 104, + 319, + 504, + 363 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 319, + 504, + 363 + ], + "type": "inline_equation", + "content": "\\pi_l" + }, + { + "bbox": [ + 104, + 319, + 504, + 363 + ], + "type": "text", + "content": ") act in an interleaving manner. The high-level agent generates and summarizes meta-thoughts from the prompt and interaction history, while the low-level agent executes detailed problem-solving under those instructions. We formulate the multi-agent meta-thinking reasoning process (MAMRP) as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 245, + 369, + 504, + 381 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 245, + 369, + 504, + 381 + ], + "spans": [ + { + "bbox": [ + 245, + 369, + 504, + 381 + ], + "type": "interline_equation", + "content": "\\mathbf {y} \\sim \\pi_ {l} (\\mathbf {y} \\mid \\mathbf {x}, \\mathbf {m}) \\pi_ {h} (\\mathbf {m} \\mid \\mathbf {x}). \\tag {5}", + "image_path": "569e05528f28d9bea18f232e9a85e68383a72aa9deef4c12e625df47568f6c0d.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 386, + 504, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 386, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 104, + 386, + 504, + 430 + ], + "type": "text", + "content": "While the single-turn MAMRP offers a straightforward approach, it lacks the ability to perform immediate and fine-grained cognitive switching during the reasoning process, which limits its effectiveness on complex, long-horizon planning tasks. Therefore, we extend Eq. (5) and formulate the multi-turn MAMRP as follows:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 187, + 435, + 504, + 467 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 435, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 187, + 435, + 504, + 467 + ], + "type": "interline_equation", + "content": "\\mathbf {y} _ {T} \\sim \\prod_ {t = 1} ^ {T} \\pi_ {l} \\left(\\mathbf {y} _ {t} \\mid \\mathbf {x}, \\{\\mathbf {m}, \\mathbf {y} \\} _ {< t}, \\mathbf {m} _ {t}\\right) \\pi_ {h} \\left(\\mathbf {m} _ {t} \\mid \\mathbf {x}, \\{\\mathbf {m}, \\mathbf {y} \\} _ {< t}\\right) \\tag {6}", + "image_path": "84b1ab344c27fac96d5118e071408306d75ee69bcc10b08024cab4b8d264d754.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 472, + 459, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 472, + 459, + 484 + ], + "spans": [ + { + "bbox": [ + 105, + 472, + 459, + 484 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 472, + 459, + 484 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 105, + 472, + 459, + 484 + ], + "type": "text", + "content": " is the number of turns. Similarly, we present the process with a directed graph:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 491, + 504, + 512 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 491, + 504, + 512 + ], + "spans": [ + { + "bbox": [ + 115, + 491, + 504, + 512 + ], + "type": "interline_equation", + "content": "\\mathbf {x} \\xrightarrow [ \\pi_ {h} ]{\\text {m e t a - t h i n k i n g}} \\mathbf {m} _ {1} \\xrightarrow [ \\pi_ {l} ]{\\text {r e a s o n i n g}} \\mathbf {y} _ {1} \\xrightarrow [ \\pi_ {h} ]{\\text {m e t a - t h i n k i n g}} \\mathbf {m} _ {2} \\xrightarrow [ \\pi_ {l} ]{\\text {r e a s o n i n g}} \\mathbf {y} _ {2} \\xrightarrow [ \\pi_ {h} ]{\\text {m e t a - t h i n k i n g}} \\dots \\xrightarrow [ \\pi_ {l} ]{\\text {r e a s o n i n g}} \\mathbf {y} _ {T} \\sim \\mathbf {a}. \\tag {7}", + "image_path": "bb5a0da380958a3094a2173fe72a961587f3d1da6dbe550666ba873b3b27827c.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 517, + 504, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 517, + 504, + 539 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 504, + 539 + ], + "type": "text", + "content": "As a complex reasoning system, MAMRP provides various optimization opportunities in scaling inference-time computation. We leave further discussion of these aspects in Appendix C.1." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 552, + 330, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 552, + 330, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 552, + 330, + 563 + ], + "type": "text", + "content": "3.2 Training MAMRP: A Multi-Agent RL Method" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 571, + 504, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 571, + 504, + 605 + ], + "spans": [ + { + "bbox": [ + 104, + 571, + 504, + 605 + ], + "type": "text", + "content": "Multi-agent RL, unlike single-agent RL in a deterministic MDP, must contend with stochastic, nonstationary dynamics and rewards, making optimization more challenging. We start by considering an easier case, the optimization of single-turn MAMRP." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 616, + 279, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 616, + 279, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 279, + 628 + ], + "type": "text", + "content": "3.2.1 Optimizing Single-turn MAMRP" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 635, + 504, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 635, + 504, + 669 + ], + "spans": [ + { + "bbox": [ + 104, + 635, + 504, + 669 + ], + "type": "text", + "content": "To train the system from Sec. 3.1, we embed it as a Markov Game between the two agents. Suppose the two LLM agents are parameterized by " + }, + { + "bbox": [ + 104, + 635, + 504, + 669 + ], + "type": "inline_equation", + "content": "\\theta_h" + }, + { + "bbox": [ + 104, + 635, + 504, + 669 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 635, + 504, + 669 + ], + "type": "inline_equation", + "content": "\\theta_l" + }, + { + "bbox": [ + 104, + 635, + 504, + 669 + ], + "type": "text", + "content": ", respectively. Define a joint hierarchical policy over sequential decisions " + }, + { + "bbox": [ + 104, + 635, + 504, + 669 + ], + "type": "inline_equation", + "content": "\\mathbf{m}" + }, + { + "bbox": [ + 104, + 635, + 504, + 669 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 635, + 504, + 669 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 104, + 635, + 504, + 669 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 201, + 674, + 504, + 688 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 674, + 504, + 688 + ], + "spans": [ + { + "bbox": [ + 201, + 674, + 504, + 688 + ], + "type": "interline_equation", + "content": "\\mathbf {y} \\sim \\pi_ {\\left(\\theta_ {h}, \\theta_ {l}\\right)} (\\mathbf {y} \\mid \\mathbf {x}) := \\pi_ {\\theta_ {l}} (\\mathbf {y} \\mid \\mathbf {x}, \\mathbf {m}) \\cdot \\pi_ {\\theta_ {h}} (\\mathbf {m} \\mid \\mathbf {x}), \\tag {8}", + "image_path": "5117513c75d33b0d0f28a22b36d95dc4e9851d988c82b7161379bad840154e56.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 693, + 501, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 693, + 501, + 705 + ], + "spans": [ + { + "bbox": [ + 104, + 693, + 501, + 705 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 693, + 501, + 705 + ], + "type": "inline_equation", + "content": "R(\\mathbf{y}, \\mathbf{y}^*)" + }, + { + "bbox": [ + 104, + 693, + 501, + 705 + ], + "type": "text", + "content": " denote the final reward serves as the objective function " + }, + { + "bbox": [ + 104, + 693, + 501, + 705 + ], + "type": "inline_equation", + "content": "\\mathcal{J}(\\theta_h, \\theta_l)" + }, + { + "bbox": [ + 104, + 693, + 501, + 705 + ], + "type": "text", + "content": " for the joint policy:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 225, + 711, + 504, + 725 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 711, + 504, + 725 + ], + "spans": [ + { + "bbox": [ + 225, + 711, + 504, + 725 + ], + "type": "interline_equation", + "content": "\\mathcal {J} \\left(\\theta_ {h}, \\theta_ {l}\\right) = \\mathbb {E} _ {\\mathbf {x}, \\mathbf {y} ^ {*}} \\mathbb {E} _ {\\mathbf {y} \\sim \\pi \\left(\\theta_ {h}, \\theta_ {l}\\right)} R (\\mathbf {y}, \\mathbf {y} ^ {*}). \\tag {9}", + "image_path": "05dc13f8590fd0e48920b97ac09cc7f97085627fc9eb5b18c41f8b40a2814e97.jpg" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 81, + 211, + 213 + ], + "blocks": [ + { + "bbox": [ + 118, + 70, + 201, + 79 + ], + "lines": [ + { + "bbox": [ + 118, + 70, + 201, + 79 + ], + "spans": [ + { + "bbox": [ + 118, + 70, + 201, + 79 + ], + "type": "text", + "content": "RL for VRP & MRP" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 108, + 81, + 211, + 213 + ], + "lines": [ + { + "bbox": [ + 108, + 81, + 211, + 213 + ], + "spans": [ + { + "bbox": [ + 108, + 81, + 211, + 213 + ], + "type": "image", + "image_path": "f246502d1a54bf77abbf1b84a3d339ae985d445d9515b42b122262754119fa92.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 211, + 82, + 369, + 213 + ], + "blocks": [ + { + "bbox": [ + 223, + 70, + 355, + 81 + ], + "lines": [ + { + "bbox": [ + 223, + 70, + 355, + 81 + ], + "spans": [ + { + "bbox": [ + 223, + 70, + 355, + 81 + ], + "type": "text", + "content": "ReMA with Separate Parameters" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 211, + 82, + 369, + 213 + ], + "lines": [ + { + "bbox": [ + 211, + 82, + 369, + 213 + ], + "spans": [ + { + "bbox": [ + 211, + 82, + 369, + 213 + ], + "type": "image", + "image_path": "7f3231499e748935cfc4e1cfa6d56049cf9d1cab539e1879a81522875ddbfa66.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 369, + 82, + 503, + 213 + ], + "blocks": [ + { + "bbox": [ + 372, + 70, + 498, + 80 + ], + "lines": [ + { + "bbox": [ + 372, + 70, + 498, + 80 + ], + "spans": [ + { + "bbox": [ + 372, + 70, + 498, + 80 + ], + "type": "text", + "content": "ReMA with Shared Parameters" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 369, + 82, + 503, + 213 + ], + "lines": [ + { + "bbox": [ + 369, + 82, + 503, + 213 + ], + "spans": [ + { + "bbox": [ + 369, + 82, + 503, + 213 + ], + "type": "image", + "image_path": "c1b6f6aaf161ccb2d707eb51858e4b49250221c289f22f7d7fd725b003912a00.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 220, + 504, + 299 + ], + "lines": [ + { + "bbox": [ + 104, + 220, + 504, + 299 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 504, + 299 + ], + "type": "text", + "content": "Figure 2: Comparison of training pipelines. Left: RL training of VRP and MRP, where a single LM agent is updated either with mixed (VRP) or explicit (MRP) meta-thinking. Middle: ReMA with separate parameters for the high-level (meta-thinking) and low-level (reasoning) agents; training alternates between freezing one agent and updating the other. Right: ReMA with shared parameters and multi-turn interactions: both agents share the same parameters and are distinguished by their system prompts. Training employs a turn-level ratio for stable multi-turn reinforcement learning and efficient updates, ensuring each turn's contribution is controlled to prevent instability." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 312, + 504, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 312, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 504, + 335 + ], + "type": "text", + "content": "During optimization procedure, the high-level policy " + }, + { + "bbox": [ + 104, + 312, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta_h}" + }, + { + "bbox": [ + 104, + 312, + 504, + 335 + ], + "type": "text", + "content": " and low-level policy " + }, + { + "bbox": [ + 104, + 312, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta_l}" + }, + { + "bbox": [ + 104, + 312, + 504, + 335 + ], + "type": "text", + "content": " aim to maximize their respective rewards independently. The optimization goals for agents are:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 198, + 340, + 504, + 358 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 340, + 504, + 358 + ], + "spans": [ + { + "bbox": [ + 198, + 340, + 504, + 358 + ], + "type": "interline_equation", + "content": "\\theta_ {h} ^ {*} = \\arg \\max _ {\\theta_ {h}} \\mathbb {E} _ {(\\mathbf {x}, \\mathbf {y} ^ {*}) \\sim \\mathcal {D}, \\mathbf {m} \\sim \\pi_ {\\theta_ {h}}, \\mathbf {y} \\sim \\pi_ {\\theta_ {l} ^ {*}}} \\left[ R _ {h} (\\mathbf {m}, \\mathbf {y}, \\mathbf {y} ^ {*}) \\right], \\tag {10}", + "image_path": "8113cac550a4f08accd48762ff47f3d3be5dc7819212ee3344ef23f9e79fceb8.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 182, + 360, + 504, + 379 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 360, + 504, + 379 + ], + "spans": [ + { + "bbox": [ + 182, + 360, + 504, + 379 + ], + "type": "interline_equation", + "content": "\\theta_ {l} ^ {*} \\left(\\theta_ {h}\\right) = \\arg \\max _ {\\theta_ {l}} \\mathbb {E} _ {\\left(\\mathbf {x}, \\mathbf {y} ^ {*}\\right) \\sim \\mathcal {D}, \\mathbf {m} \\sim \\pi_ {\\theta_ {h}}, \\mathbf {y} \\sim \\pi_ {\\theta_ {l}}} \\left[ R _ {l} \\left(\\mathbf {m}, \\mathbf {y}, \\mathbf {y} ^ {*}\\right) \\right], \\tag {11}", + "image_path": "f891e947f79753d2849f10bbf5382550512e2e05f5f7ecbdcdde601d0bbb9a59.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 385, + 504, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 385, + 504, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 385, + 504, + 431 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 385, + 504, + 431 + ], + "type": "inline_equation", + "content": "R_{h}" + }, + { + "bbox": [ + 104, + 385, + 504, + 431 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 385, + 504, + 431 + ], + "type": "inline_equation", + "content": "R_{l}" + }, + { + "bbox": [ + 104, + 385, + 504, + 431 + ], + "type": "text", + "content": " are policies' individual reward functions, including " + }, + { + "bbox": [ + 104, + 385, + 504, + 431 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 385, + 504, + 431 + ], + "type": "text", + "content": " and regularization according to tasks and models, e.g., different format rewards (refer to Appendix C.2 for details). The detailed algorithm is in the Algorithm 1. We illustrate the MAMRP inference procedure and the proposed training method in Fig. 2. We also provide an analysis of different loss functions in Appendix C.5." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 443, + 285, + 455 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 443, + 285, + 455 + ], + "spans": [ + { + "bbox": [ + 105, + 443, + 285, + 455 + ], + "type": "text", + "content": "3.2.2 Scaling up to Multi-turn MAMRP" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 462, + 504, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 462, + 504, + 485 + ], + "spans": [ + { + "bbox": [ + 104, + 462, + 504, + 485 + ], + "type": "text", + "content": "To scale up to multi-turn MAMRP, we can still adopt the iterative training strategy in Sec. 3.2.1. However, we make some changes to improve the efficiency of rollout and training." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 490, + 504, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 490, + 504, + 556 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 504, + 556 + ], + "type": "text", + "content": "First, we implement a parameter-sharing strategy where both high-level and low-level agents utilize identical model weights " + }, + { + "bbox": [ + 104, + 490, + 504, + 556 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 490, + 504, + 556 + ], + "type": "text", + "content": ", distinguished only by role-specific system prompts " + }, + { + "bbox": [ + 104, + 490, + 504, + 556 + ], + "type": "inline_equation", + "content": "S_{h}" + }, + { + "bbox": [ + 104, + 490, + 504, + 556 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 490, + 504, + 556 + ], + "type": "inline_equation", + "content": "S_{l}" + }, + { + "bbox": [ + 104, + 490, + 504, + 556 + ], + "type": "text", + "content": ". Formally, we define " + }, + { + "bbox": [ + 104, + 490, + 504, + 556 + ], + "type": "inline_equation", + "content": "\\pi_h = \\pi_\\theta (\\cdot |S_h,\\cdot)" + }, + { + "bbox": [ + 104, + 490, + 504, + 556 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 490, + 504, + 556 + ], + "type": "inline_equation", + "content": "\\pi_l = \\pi_\\theta (\\cdot |S_l,\\cdot)" + }, + { + "bbox": [ + 104, + 490, + 504, + 556 + ], + "type": "text", + "content": ", sharing the same underlying parameters rather than maintaining separate model instances. This approach eliminates the need for frequent model swapping on GPU during rollout, avoiding inefficient wait times, while enabling larger batch sizes during training to simultaneously optimize policies for both meta-thinking and reasoning roles." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 559, + 504, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 559, + 504, + 592 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 504, + 592 + ], + "type": "text", + "content": "Second, we propose a multi-turn GRPO with turn-level ratio to address the challenges of multi-turn MAMRP. The trajectory-level averaged objective with turn-level ratio of " + }, + { + "bbox": [ + 104, + 559, + 504, + 592 + ], + "type": "inline_equation", + "content": "\\pi_{l}" + }, + { + "bbox": [ + 104, + 559, + 504, + 592 + ], + "type": "text", + "content": " is defined as (The objective of " + }, + { + "bbox": [ + 104, + 559, + 504, + 592 + ], + "type": "inline_equation", + "content": "\\pi_h" + }, + { + "bbox": [ + 104, + 559, + 504, + 592 + ], + "type": "text", + "content": " is the similar but with different system prompt):" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 597, + 266, + 612 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 266, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 266, + 612 + ], + "type": "interline_equation", + "content": "\\mathcal {J} (\\boldsymbol {\\theta}) = \\mathbb {E} _ {(\\mathbf {x}, \\mathbf {y} ^ {*}) \\sim \\mathcal {D}, \\{(\\mathbf {m} _ {i}, \\mathbf {y} _ {i}) \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {\\mathrm {o l d}}} (\\cdot | \\mathbf {x})", + "image_path": "c2c45f6b75a3ecd7f886ca7735670f089d1dde49eae53b3ba52d5a768506d3ab.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 126, + 614, + 504, + 654 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 614, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 126, + 614, + 504, + 654 + ], + "type": "interline_equation", + "content": "\\left. \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{T _ {i}} \\sum_ {t = 1} ^ {T _ {i}} \\frac {1}{| \\mathbf {y} _ {i , t} |} \\sum_ {j = 1} ^ {| \\mathbf {y} _ {i, t} |} \\left(\\min \\left(r _ {i, t} (\\theta) \\hat {A} _ {i, t, j}, \\operatorname {c l i p} \\left(r _ {i, t} (\\theta), 1 - \\epsilon , 1 + \\epsilon\\right) \\hat {A} _ {i, t, j}\\right) - \\beta D _ {\\mathrm {K L}} \\left(\\pi_ {\\theta} \\| \\pi_ {\\text {r e f}}\\right)\\right) \\right] \\right. \\tag {12}", + "image_path": "1bc2c9d3ff1fdd6b7268585f52555fec4647ff246738072f074f516e6a129d24.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 654, + 504, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 654, + 504, + 677 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 504, + 677 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 105, + 654, + 504, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{y}_{i,t,j}" + }, + { + "bbox": [ + 105, + 654, + 504, + 677 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 105, + 654, + 504, + 677 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 105, + 654, + 504, + 677 + ], + "type": "text", + "content": "-th token at turn " + }, + { + "bbox": [ + 105, + 654, + 504, + 677 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 105, + 654, + 504, + 677 + ], + "type": "text", + "content": " of the reasoning agent of the " + }, + { + "bbox": [ + 105, + 654, + 504, + 677 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 105, + 654, + 504, + 677 + ], + "type": "text", + "content": "-th trajectory. And the turn-level ratio for clipping is defined as:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 116, + 685, + 504, + 719 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 685, + 504, + 719 + ], + "spans": [ + { + "bbox": [ + 116, + 685, + 504, + 719 + ], + "type": "interline_equation", + "content": "r _ {i, t} (\\theta) = \\frac {1}{| \\mathbf {y} _ {i , t} |} \\sum_ {j = 1} ^ {| \\mathbf {y} _ {i, t} |} r _ {i, t, j} (\\theta) = \\frac {1}{| \\mathbf {y} _ {i , t} |} \\sum_ {j = 1} ^ {| \\mathbf {y} _ {i, t} |} \\frac {\\pi_ {\\theta} \\left(\\mathbf {y} _ {i , t , j} \\mid \\mathbf {x} , \\left\\{\\mathbf {m} _ {i , ,} , \\mathbf {y} _ {i ,} \\right\\} _ {< t} , \\mathbf {m} _ {i , t} , \\mathbf {y} _ {i , t , < j}\\right)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} \\left(\\mathbf {y} _ {i , t , j} \\mid \\mathbf {x} , \\left\\{\\mathbf {m} _ {i , ,}, \\mathbf {y} _ {i ,} \\right\\} _ {< t} , \\mathbf {m} _ {i , t} , \\mathbf {y} _ {i , t , < j}\\right)}. \\tag {13}", + "image_path": "fdfa8f8cf4f66137f42c7aba7c2e2ef31d21edf331150f3efd0c1f3325151c96.jpg" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "type": "text", + "content": "The introduction of the turn-level ratio serves two key purposes. First, using a token-level ratio (Eq. (2)) in the objective introduces bias for multi-turn training, as it averages over all tokens in a trajectory. This means that tokens within longer turns (those containing more tokens) can disproportionately influence the overall loss, and averaging at the token level may encourage excessively long single-turn responses. Second, clipping each token independently risks instability during training." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 133, + 506, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 133, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 133, + 506, + 189 + ], + "type": "text", + "content": "In contrast, the turn-level ratio aligns more closely with the underlying MDP formulation by treating all tokens within a turn as a single action and applying clipping at the turn level. Intuitively, this approach stabilizes training by preventing the LLM from making unstable updates that could result in extreme outputs, such as overly long repetitions or incoherent text. We conduct experimental verification in subsequent empirical results (Sec. 4.3)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 205, + 192, + 218 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 205, + 192, + 218 + ], + "spans": [ + { + "bbox": [ + 105, + 205, + 192, + 218 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 231, + 504, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 231, + 504, + 275 + ], + "spans": [ + { + "bbox": [ + 104, + 231, + 504, + 275 + ], + "type": "text", + "content": "To evaluate the effectiveness and efficiency of ReMA, we conduct experiments on challenging benchmarks for two types of tasks: mathematical reasoning and LLM-as-a-Judge with three different LLMs. Then, we investigate the models' performance in both single- & multi-turn settings. Finally, we provide ablation studies and qualitative analyses of our method." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 289, + 218, + 301 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 289, + 218, + 301 + ], + "spans": [ + { + "bbox": [ + 105, + 289, + 218, + 301 + ], + "type": "text", + "content": "4.1 Experiment Settings" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 310, + 504, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 310, + 504, + 355 + ], + "spans": [ + { + "bbox": [ + 104, + 310, + 504, + 355 + ], + "type": "text", + "content": "We first analyze the single-turn case of ReMA, i.e., " + }, + { + "bbox": [ + 104, + 310, + 504, + 355 + ], + "type": "inline_equation", + "content": "T = 1" + }, + { + "bbox": [ + 104, + 310, + 504, + 355 + ], + "type": "text", + "content": ". The high-level agent generates a complete meta-thinking trace in one shot, and the low-level agent follows the instructions and outputs the final results. Single-turn ReMA reduces stochasticity and training cost while our experiments show that it still provides meaningful performance gains." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 368, + 504, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 368, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 104, + 368, + 504, + 434 + ], + "type": "text", + "content": "Benchmarks We conduct experiments on two types of tasks: mathematical reasoning and LLM-as-a-Judge. For mathematical reasoning experiments, we train models on 7.5k training samples in MATH [Hendrycks et al., 2021] and use MATH500 [Lightman et al., 2023] as the in-distribution test dataset. Additionally, we test the optimized models on out-of-distribution datasets: GSM8K [Cobbe et al., 2021], AIME24" + }, + { + "bbox": [ + 104, + 368, + 504, + 434 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 104, + 368, + 504, + 434 + ], + "type": "text", + "content": ", AMC23" + }, + { + "bbox": [ + 104, + 368, + 504, + 434 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 104, + 368, + 504, + 434 + ], + "type": "text", + "content": ", GaoKao2023En [Zhang et al., 2023], Minerva Math [Lewkowycz et al., 2022], and Olympiad Bench [He et al., 2024]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 438, + 504, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 438, + 504, + 494 + ], + "spans": [ + { + "bbox": [ + 104, + 438, + 504, + 494 + ], + "type": "text", + "content": "For LLM-as-a-Judge benchmarks, we train models on RewardBench [Lambert et al., 2024]. Specifically, we convert the original data into a pair-ranking format and split it into a training set of 5k items and a test set of 970 items, denoted as RewardBench970. The models are also tested on JudgeBench [Tan et al., 2024] to assess out-of-distribution performance. We refer to Appendix D.1.2 for detailed comparisons and results." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 507, + 504, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 507, + 504, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 507, + 504, + 552 + ], + "type": "text", + "content": "Baselines, Models, Training Settings We compare pass@1 performance across the following methods: (1) VRP (CoT, step-by-step prompting, Sec. 3.1); (2) " + }, + { + "bbox": [ + 104, + 507, + 504, + 552 + ], + "type": "inline_equation", + "content": "\\mathbf{VRP}_{\\mathbf{RL}}" + }, + { + "bbox": [ + 104, + 507, + 504, + 552 + ], + "type": "text", + "content": " (RL under VRP); (3) " + }, + { + "bbox": [ + 104, + 507, + 504, + 552 + ], + "type": "inline_equation", + "content": "\\mathbf{MRP}_{\\mathbf{RL}}" + }, + { + "bbox": [ + 104, + 507, + 504, + 552 + ], + "type": "text", + "content": " (RL under MRP with high-level task analysis, Eq. (4)), and (4) ReMA (ours, RL under MAMRP, Eq. (7))." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 555, + 504, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 504, + 634 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 504, + 634 + ], + "type": "text", + "content": "We train and test Llama-3-8B-Instruct, Llama-3.1-8B-Instruct [Dubey et al., 2024], and Qwen2.5-7B-Instruct [Team, 2024] on mathematical reasoning benchmarks. For LLM-as-a-judge benchmarks, we train and test Llama-3.1-8B-Instruct and Qwen2.5-7B-Instruct. We use instruct-tuned LLMs to prompt them to perform VRP, MRP, and MAMRP directly during training. Unless specified, we use two separate copies of the same model for high- and low-level agents in ReMA. We use the base reward setting in Appendix C.2 by default. And for the underlying RL algorithm, we use REINFORCE++ [Hu, 2025]. We refer to Appendix D for detailed training settings." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 647, + 254, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 647, + 254, + 659 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 254, + 659 + ], + "type": "text", + "content": "4.2 Results of Single-turn ReMA" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 669, + 504, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 669, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 669, + 504, + 691 + ], + "type": "text", + "content": "Question 1. Does single-turn ReMA outperforms baselines on both in-distribution and out-of-distribution test sets?" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 116, + 700, + 436, + 711 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 700, + 436, + 711 + ], + "spans": [ + { + "bbox": [ + 116, + 700, + 436, + 711 + ], + "type": "text", + "content": "4https://huggingface.co/datasets/AI-MO/aimo-validation-aime" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 118, + 712, + 431, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 712, + 431, + 721 + ], + "spans": [ + { + "bbox": [ + 118, + 712, + 431, + 721 + ], + "type": "text", + "content": "5https://huggingface.co/datasets/AI-MO/aimo-validation-amc" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 79, + 504, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 79, + 504, + 122 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 504, + 122 + ], + "type": "text", + "content": "Table 1: Performance on in-distribution test sets and out-of-distribution test sets. We also report the improvement/degradation w.r.t. basic CoT performance(VRP). On average, ReMA outperforms all baselines. Particularly on out-of-distribution datasets, ReMA achieves the highest performance on most of the benchmarks." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 126, + 137, + 485, + 427 + ], + "blocks": [ + { + "bbox": [ + 236, + 125, + 373, + 134 + ], + "lines": [ + { + "bbox": [ + 236, + 125, + 373, + 134 + ], + "spans": [ + { + "bbox": [ + 236, + 125, + 373, + 134 + ], + "type": "text", + "content": "(a) Performance on math benchmarks" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 126, + 137, + 485, + 427 + ], + "lines": [ + { + "bbox": [ + 126, + 137, + 485, + 427 + ], + "spans": [ + { + "bbox": [ + 126, + 137, + 485, + 427 + ], + "type": "table", + "html": "
ModelBenchmarkVRP(CoT)\\( \\mathbf{V R P_{R L}} \\)\\( \\mathbf{M R P_{R L}} \\)ReMA(Ours)
Llama3-8B-InstructMATH50030.8033.40 (+2.60)32.80 (+2.00)33.80 (+3.00)
GSM8K67.4881.80 (+14.32)79.68 (+12.20)79.38 (+11.90)
AIME240.000.00 (+0.00)3.33 (+3.33)0.00 (+0.00)
AMC232.5010.00 (+7.50)12.50 (+10.00)22.50 (+20.00)
Gaokao2023en22.3427.53 (+5.19)23.38 (+1.04)28.57 (+6.23)
Minerva Math8.8216.54 (+7.72)18.01 (+9.19)13.97 (+5.15)
Olympiad Bench8.448.89 (+0.45)9.33 (+0.89)8.89 (+0.45)
Average20.0525.45 (+5.40)25.58 (+5.53)26.73 (+6.68)
Llama3.1-8B-InstructMATH50050.8050.20 (-0.60)48.60 (-2.20)53.20 (+2.40)
GSM8K86.0584.53 (-1.52)85.37 (-0.68)87.26 (+1.21)
AIME2410.003.33 (-6.67)6.67 (-3.33)13.33 (+3.33)
AMC2327.5012.50 (-15.00)30.00 (+2.50)20.00 (-7.50)
Gaokao2023en38.9636.10 (-2.86)37.14 (-1.82)37.14 (-1.82)
Minerva Math22.7926.84 (+4.05)25.37 (+2.58)28.31 (+5.52)
Olympiad Bench15.1119.70 (+4.59)15.70 (+0.59)19.56 (+4.45)
Average35.8933.32 (-2.57)35.55 (-0.34)36.97 (+1.08)
Qwen2.5-7B-InstructMATH50075.0077.20 (+2.20)76.40 (+1.40)74.40 (-0.60)
GSM8K92.0491.36 (-0.68)91.81 (-0.23)90.60 (-1.44)
AIME246.676.67 (+0.00)10.00 (+3.33)20.00 (+13.33)
AMC2347.5050.00 (+2.50)52.50 (+5.00)57.50 (+10.00)
Gaokao2023en56.6254.81 (-1.81)55.06 (-1.56)57.92 (+1.30)
Minerva Math35.6634.93 (-0.73)32.35 (-3.31)34.93 (-0.73)
Olympiad Bench38.2238.37 (+0.15)37.78 (-0.44)36.30 (-1.92)
Average50.2450.48 (+0.24)50.84 (+0.60)53.09 (+2.85)
", + "image_path": "032ea026c484b25fb0540e17ead1ad673917359410de718a424734cd30d558a2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 124, + 445, + 486, + 553 + ], + "blocks": [ + { + "bbox": [ + 215, + 434, + 395, + 444 + ], + "lines": [ + { + "bbox": [ + 215, + 434, + 395, + 444 + ], + "spans": [ + { + "bbox": [ + 215, + 434, + 395, + 444 + ], + "type": "text", + "content": "(b) Performance on LLM-as-a-Judge benchmarks" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 124, + 445, + 486, + 553 + ], + "lines": [ + { + "bbox": [ + 124, + 445, + 486, + 553 + ], + "spans": [ + { + "bbox": [ + 124, + 445, + 486, + 553 + ], + "type": "table", + "html": "
ModelBenchmarkVRP(CoT)\\( \\mathbf{V R P_{R L}} \\)\\( \\mathbf{M R P_{R L}} \\)ReMA(Ours)
Llama3.1-8B-InstructRewardBench97069.4882.89 (+13.41)81.13 (+11.65)83.71 (+14.23)
JudgeBench51.2951.94 (+0.65)52.90 (+1.61)52.90 (+1.61)
Average60.3967.41 (+7.02)67.02 (+6.63)68.31 (+7.92)
Qwen2.5-7B-InstructRewardBench97078.5685.36 (+6.80)86.49 (+7.93)83.51 (+4.95)
JudgeBench58.3956.94 (-1.45)58.39 (+0.00)56.94 (-1.45)
Average68.4771.15 (+2.68)72.44 (+3.97)70.22 (+1.75)
", + "image_path": "71aca539c79024c7c54c8b500119e08584e1cf23534f6bae0662ad5dc7fbc4aa.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 585, + 504, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 585, + 504, + 629 + ], + "spans": [ + { + "bbox": [ + 104, + 585, + 504, + 629 + ], + "type": "text", + "content": "Table 1 compares the greedy decoding performance of ReMA against various RL baselines across mathematical benchmarks (Table 1a) and LLM-as-a-Judge benchmarks (Table 1b). Results across different LLMs indicate that, on average, ReMA outperforms all baselines, achieving a maximum improvement of " + }, + { + "bbox": [ + 104, + 585, + 504, + 629 + ], + "type": "inline_equation", + "content": "6.68\\%" + }, + { + "bbox": [ + 104, + 585, + 504, + 629 + ], + "type": "text", + "content": " on mathematical benchmarks and " + }, + { + "bbox": [ + 104, + 585, + 504, + 629 + ], + "type": "inline_equation", + "content": "8.49\\%" + }, + { + "bbox": [ + 104, + 585, + 504, + 629 + ], + "type": "text", + "content": " on LLM-as-a-Judge benchmarks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 634, + 504, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 504, + 722 + ], + "type": "text", + "content": "Notably, ReMA achieves the highest performance on most benchmarks, particularly on out-of-distribution datasets, with a maximum improvement of " + }, + { + "bbox": [ + 104, + 634, + 504, + 722 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 104, + 634, + 504, + 722 + ], + "type": "text", + "content": " on AMC23 for Llama3-8B-Instruct, " + }, + { + "bbox": [ + 104, + 634, + 504, + 722 + ], + "type": "inline_equation", + "content": "13.33\\%" + }, + { + "bbox": [ + 104, + 634, + 504, + 722 + ], + "type": "text", + "content": " on AIME24 for Qwen2.5-7B-Instruct, " + }, + { + "bbox": [ + 104, + 634, + 504, + 722 + ], + "type": "inline_equation", + "content": "14.23\\%" + }, + { + "bbox": [ + 104, + 634, + 504, + 722 + ], + "type": "text", + "content": " on RewardBench970 for Llama3.1-8B-Instruct. These results demonstrate the superior out-of-distribution generalization ability conferred by the meta-thinking mechanism in ReMA. However, we observe that the accuracy gains from RL training on instruction-tuned LMs are smaller than from base models (Sec. 4.2.1). This may be due to the higher initial performance and the relatively fixed output distribution of instruction-tuned models, which limits the improvement and peak performance in RL." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 114, + 65, + 495, + 205 + ], + "blocks": [ + { + "bbox": [ + 114, + 65, + 495, + 205 + ], + "lines": [ + { + "bbox": [ + 114, + 65, + 495, + 205 + ], + "spans": [ + { + "bbox": [ + 114, + 65, + 495, + 205 + ], + "type": "image", + "image_path": "37ed86a4d76064b8e7dc589771be62484945d0a0fbaf36c2bdeedac73830355d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 211, + 504, + 234 + ], + "lines": [ + { + "bbox": [ + 104, + 211, + 504, + 234 + ], + "spans": [ + { + "bbox": [ + 104, + 211, + 504, + 234 + ], + "type": "text", + "content": "Figure 3: An RL experiment with 3 training schemes. While RL from SFT excels on easier problems, RL under Meta-thinking shows superior generalization to harder problems like AIME24." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 106, + 239, + 297, + 331 + ], + "blocks": [ + { + "bbox": [ + 106, + 239, + 297, + 331 + ], + "lines": [ + { + "bbox": [ + 106, + 239, + 297, + 331 + ], + "spans": [ + { + "bbox": [ + 106, + 239, + 297, + 331 + ], + "type": "image", + "image_path": "7cef31214b1e22f8feceb774c7c2fda0d8822a64668c93c676a445aa329e03c2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 335, + 504, + 359 + ], + "lines": [ + { + "bbox": [ + 104, + 335, + 504, + 359 + ], + "spans": [ + { + "bbox": [ + 104, + 335, + 504, + 359 + ], + "type": "text", + "content": "Figure 4: Average problem difficulty by action type during training. Left: 1B LM collapses to the EMPTY action. Right: 8B LM adapts to a more complex meta-thinking strategy for harder problems." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 309, + 239, + 501, + 332 + ], + "blocks": [ + { + "bbox": [ + 309, + 239, + 501, + 332 + ], + "lines": [ + { + "bbox": [ + 309, + 239, + 501, + 332 + ], + "spans": [ + { + "bbox": [ + 309, + 239, + 501, + 332 + ], + "type": "image", + "image_path": "8067de4a34648160295745db52239f10161f855ef4f748f78627686ff344515e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 371, + 329, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 371, + 329, + 384 + ], + "spans": [ + { + "bbox": [ + 105, + 371, + 329, + 384 + ], + "type": "text", + "content": "4.2.1 Meta-thoughts boost low-level generalization" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 391, + 334, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 391, + 334, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 391, + 334, + 403 + ], + "type": "text", + "content": "Question 2. Can Reasoning benefit from Meta-thinking?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 407, + 504, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 407, + 504, + 485 + ], + "spans": [ + { + "bbox": [ + 104, + 407, + 504, + 485 + ], + "type": "text", + "content": "Here we provide a tiny but motivating example of how ReMA gives better learning dynamics. We use Qwen2.5-Math-7B [Yang et al., 2024] as the starting base model, MATH (level 3-5, about " + }, + { + "bbox": [ + 104, + 407, + 504, + 485 + ], + "type": "inline_equation", + "content": "5.5\\mathrm{K}" + }, + { + "bbox": [ + 104, + 407, + 504, + 485 + ], + "type": "text", + "content": " number of instances) as the training dataset, and we compare three reinforcement learning training schemes, in particular: (1) RL from Base: train the base model directly on MATH with binary outcome reward; (2) RL from SFT: SFT the base model with GPT-4o's CoT answers; then RL on train dataset with binary outcome reward; (3) RL under Meta-thinking: SFT the base model with GPT-4o's meta-thinking plans; then RL on train dataset with binary outcome reward." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 490, + 504, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 490, + 504, + 534 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 504, + 534 + ], + "type": "text", + "content": "The models are evaluated on 3 benchmarks (Fig. 3). SFT brings the best initial accuracy on in-distribution and easier sets, but fails to improve on harder ones. RL from Base yields limited gains. In contrast, RL under Meta-thinking achieves the best learning dynamics and generalizes better to challenging problems (AIME24). See Appendix F.1 for case studies." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 548, + 337, + 561 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 548, + 337, + 561 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 337, + 561 + ], + "type": "text", + "content": "4.2.2 Diverse meta-thinking characteristics of LLMs" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 568, + 324, + 580 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 568, + 324, + 580 + ], + "spans": [ + { + "bbox": [ + 105, + 568, + 324, + 580 + ], + "type": "text", + "content": "Question 3. How well can LLMs learn to meta-think?" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 585, + 504, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 585, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 585, + 504, + 662 + ], + "type": "text", + "content": "To further analyze meta-thinking behaviors, we train models with structured JSON-format actions inspired by Yue et al.. The meta-thinking agent generate two entries in one LM call, first selects from three actions: DECOMPOSE (breaking into subproblems), REWRITE (simplifying the problem), or EMPTY (direct solving), then generates the corresponding text. We compare Llama-3.1-8B-Instruct and Llama-3.2-1B-Instruct to study scale effects (two 1B models vs two 8B models) on meta-thinking agent's training. We use vLLM guided JSON decoding [Dong et al., 2024] for valid formatting and base reward (reasoning agent's solution accuracy with format constraints)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 667, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 504, + 723 + ], + "type": "text", + "content": "We observe that smaller LMs produce simpler outputs, likely due to limited capacity to maintain valid JSON formatting while exploring diverse reasoning strategies. As Fig. 4 shows, smaller LMs like Llama-3.2-1B-Instruct quickly converge to the simplest EMPTY action to avoid formatting penalties, while larger LMs like Llama-3.1-8B-Instruct can adapt meta-thinking strategies based on problem difficulty. See Appendix F.3 for detailed case studies." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 62, + 286, + 167 + ], + "blocks": [ + { + "bbox": [ + 108, + 62, + 286, + 167 + ], + "lines": [ + { + "bbox": [ + 108, + 62, + 286, + 167 + ], + "spans": [ + { + "bbox": [ + 108, + 62, + 286, + 167 + ], + "type": "image", + "image_path": "7be77f52a452cae631a3999a5c1a37f14186bd400761aa6b2200170fc2818d62.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 170, + 504, + 204 + ], + "lines": [ + { + "bbox": [ + 104, + 170, + 504, + 204 + ], + "spans": [ + { + "bbox": [ + 104, + 170, + 504, + 204 + ], + "type": "text", + "content": "Figure 5: Training results of multi-turn Figure 6: Ablations of multi-turn ReMA on a tiny subReMA on MATH-Level3-5-8K under different set of MATH, we only show here the training curves of different training and rollout configurations." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 295, + 62, + 503, + 167 + ], + "blocks": [ + { + "bbox": [ + 295, + 62, + 503, + 167 + ], + "lines": [ + { + "bbox": [ + 295, + 62, + 503, + 167 + ], + "spans": [ + { + "bbox": [ + 295, + 62, + 503, + 167 + ], + "type": "image", + "image_path": "0a12a930e142fe2cdce1144a9d1bcf93a65808b7f66fd19e01bc8a6c088ffc04.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 228, + 307, + 241 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 228, + 307, + 241 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 307, + 241 + ], + "type": "text", + "content": "4.3 Extending ReMA to Multi-turn MAMRP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 255, + 504, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 255, + 504, + 278 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 504, + 278 + ], + "type": "text", + "content": "We further extend ReMA to multi-turn MAMRP settings, enabling multiple rounds of interaction between the meta-thinking agent and the reasoning agent as defined in Eq. (7)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 282, + 504, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 282, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 104, + 282, + 504, + 327 + ], + "type": "text", + "content": "Unlike the inherent VRP capabilities of most LLMs, multi-turn ReMA requires initial bootstrapping. Thus, we constructed a supervised fine-tuning dataset (about 0.8K samples) from LIMO [Ye et al., 2025c] using GPT-4o to establish the starting point for multi-turn interaction capabilities. Then we finetune Qwen2.5-7B before RL training." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 331, + 504, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 331, + 504, + 376 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 504, + 376 + ], + "type": "text", + "content": "As described in Sec.3.2.2, we deploy the proposed GRPO with turn-level ratio clipping and trajectory-level averaging loss during training. And we remove the KL-divergence term to allow more flexible exploration. By default, the agents share the same parameters and are simultaneously updated using their trajectories. We refer to details in Appendix D.2." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 402, + 231, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 402, + 231, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 231, + 414 + ], + "type": "text", + "content": "4.3.1 Results and Ablations" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 426, + 334, + 439 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 334, + 439 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 334, + 439 + ], + "type": "text", + "content": "Question 4. Can ReMA be scaled to multi-turn settings?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 443, + 504, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 443, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 443, + 504, + 510 + ], + "type": "text", + "content": "There are two key points revealed by our multi-turn ReMA experiments, as shown in Fig. 5. On one hand, the algorithm can demonstrate effective convergence on the training set, with accuracy steadily increasing from approximately " + }, + { + "bbox": [ + 104, + 443, + 504, + 510 + ], + "type": "inline_equation", + "content": "55\\%" + }, + { + "bbox": [ + 104, + 443, + 504, + 510 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 443, + 504, + 510 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 104, + 443, + 504, + 510 + ], + "type": "text", + "content": " during training. It also achieves an average performance gain of about " + }, + { + "bbox": [ + 104, + 443, + 504, + 510 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 104, + 443, + 504, + 510 + ], + "type": "text", + "content": " across all seven test benchmarks, indicating stable improvements on out-of-distribution data. (Experiment with the rollout config of turn30_token512, see Appendix D.2.2 and Fig. 8 for more details.)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 514, + 505, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 505, + 613 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 505, + 613 + ], + "type": "text", + "content": "On the other hand, we observe that the performance of multi-turn ReMA is highly sensitive to hyperparameters such as the maximum response length per turn and the maximum number of turns. For certain configurations, the model either collapses into producing massive repetitions within a single turn or generates empty responses after only a few turns. Similar phenomena have been reported in concurrent works such as RAGEN [Wang et al., 2025], where these issues are attributed to the lack of fine-grained, reasoning-aware guidance. As a result, multi-turn RL becomes susceptible to long-horizon credit assignment challenges and state drift, often leading to reduced exploration diversity—a phenomenon referred to as the \"Echo Trap\". To address this challenge, it is essential to comprehensively explore the training recipe w.r.t. model, data, and algorithm." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 617, + 457, + 630 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 617, + 457, + 630 + ], + "spans": [ + { + "bbox": [ + 104, + 617, + 457, + 630 + ], + "type": "text", + "content": "Question 5. How does parameter sharing and turn-level ratio affect multi-turn ReMA?" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 634, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 505, + 723 + ], + "type": "text", + "content": "As shown in Fig. 6, we compare different configurations on a smaller dataset consisting of 133 samples—19 from each of the 7 MATH problem types—to evaluate sample efficiency and convergence speed. First, all configurations eventually achieve nearly " + }, + { + "bbox": [ + 104, + 634, + 505, + 723 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 104, + 634, + 505, + 723 + ], + "type": "text", + "content": " accuracy on the training dataset. Notably, the trajectory-level loss with turn-level ratio (Turn-Ratio, Eq. (13)) demonstrates substantially better sample efficiency than its token-level variants (Eq. (2)), reaching higher training rewards with fewer steps. We also present the training curve of separate weight setting, the empirical results show that shared parameters with simultaneous updates converge noticeably faster." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 72, + 183, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 72, + 183, + 83 + ], + "spans": [ + { + "bbox": [ + 107, + 72, + 183, + 83 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 95, + 504, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 95, + 504, + 216 + ], + "spans": [ + { + "bbox": [ + 107, + 95, + 504, + 216 + ], + "type": "text", + "content": "In this paper, we introduced ReMA, a novel framework that leverages multi-agent reinforcement learning to elicit meta-thinking in large language models. By explicitly separating meta-thinking and reasoning processes into distinct agents, our approach enhances both exploration during training and the interpretability of model outputs. We tailored RL algorithms and reward functions to ensure reliable performance. Through comprehensive experiments on mathematical reasoning and LLM-as-a-Judge benchmarks, ReMA consistently achieved superior results, particularly on out-of-distribution datasets. We further extend ReMA to multi-turn settings, enabling the framework to handle more complex reasoning scenarios that require more communication between agents. Our ablations demonstrate how effective coordination between agents evolves, highlighting the promise of reinforcement learning and structured agents' collaboration for advancing the capabilities of language models in complex reasoning tasks." + } + ] + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 89, + 505, + 723 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 105, + 89, + 505, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 89, + 505, + 124 + ], + "spans": [ + { + "bbox": [ + 105, + 89, + 505, + 124 + ], + "type": "text", + "content": "Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 129, + 504, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 129, + 504, + 153 + ], + "spans": [ + { + "bbox": [ + 105, + 129, + 504, + 153 + ], + "type": "text", + "content": "Elif Akata, Lion Schulz, Julian Coda-Forno, Seong Joon Oh, Matthias Bethge, and Eric Schulz. Playing repeated games with large language models. arXiv preprint arXiv:2305.16867, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 159, + 504, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 159, + 504, + 193 + ], + "spans": [ + { + "bbox": [ + 105, + 159, + 504, + 193 + ], + "type": "text", + "content": "Cem Anil, Guodong Zhang, Yuhuai Wu, and Roger B. Grosse. Learning to give checkable answers with prover-verifier games. CoRR, abs/2108.12099, 2021. URL https://arxiv.org/abs/2108.12099." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 199, + 504, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 199, + 504, + 235 + ], + "spans": [ + { + "bbox": [ + 105, + 199, + 504, + 235 + ], + "type": "text", + "content": "Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: A family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 1, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 239, + 504, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 239, + 504, + 275 + ], + "spans": [ + { + "bbox": [ + 105, + 239, + 504, + 275 + ], + "type": "text", + "content": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 281, + 504, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 281, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 105, + 281, + 504, + 304 + ], + "type": "text", + "content": "Jiaqi Chen, Yuxian Jiang, Jiachen Lu, and Li Zhang. S-agents: Self-organizing agents in open-ended environments. arXiv preprint arXiv:2402.04578, 2024a." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 309, + 504, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 309, + 504, + 355 + ], + "spans": [ + { + "bbox": [ + 105, + 309, + 504, + 355 + ], + "type": "text", + "content": "Qiguang Chen, Libo Qin, Jiaqi WANG, Jingxuan Zhou, and Wanxiang Che. Unlocking the capabilities of thought: A reasoning boundary framework to quantify and optimize chain-of-thought. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024b. URL https://openreview.net/forum?id=pC44UMwy2v." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 361, + 504, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 361, + 504, + 396 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 504, + 396 + ], + "type": "text", + "content": "Shuhao Chen, Weisen Jiang, Baijiong Lin, James T Kwok, and Yu Zhang. Routersc: Query-based router by dual contrastive learning for assembling large language models. arXiv preprint arXiv:2409.19886, 2024c." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 402, + 504, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 402, + 504, + 446 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 504, + 446 + ], + "type": "text", + "content": "Weize Chen, Yusheng Su, Jingwei Zuo, Cheng Yang, Chenfei Yuan, Chi-Min Chan, Heyang Yu, Yaxi Lu, Yi-Hsin Hung, Chen Qian, et al. Agentverse: Facilitating multi-agent collaboration and exploring emergent behaviors. In The Twelfth International Conference on Learning Representations, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 453, + 504, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 453, + 504, + 498 + ], + "spans": [ + { + "bbox": [ + 105, + 453, + 504, + 498 + ], + "type": "text", + "content": "Yongchao Chen, Jacob Arkin, Charles Dawson, Yang Zhang, Nicholas Roy, and Chuchu Fan. Autotamp: Autoregressive task and motion planning with llms as translators and checkers. In 2024 IEEE International conference on robotics and automation (ICRA), pages 6695-6702. IEEE, 2024d." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 505, + 504, + 549 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 505, + 504, + 549 + ], + "spans": [ + { + "bbox": [ + 105, + 505, + 504, + 549 + ], + "type": "text", + "content": "Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. Palm: Scaling language modeling with pathways. Journal of Machine Learning Research, 24(240): 1-113, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 555, + 504, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 504, + 590 + ], + "type": "text", + "content": "Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V Le, Sergey Levine, and Yi Ma. Sft memorizes, rl generalizes: A comparative study of foundation model post-training. arXiv preprint arXiv:2501.17161, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 596, + 504, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 596, + 504, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 504, + 631 + ], + "type": "text", + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 636, + 504, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 636, + 504, + 660 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 504, + 660 + ], + "type": "text", + "content": "Google DeepMind. Gemini flash thinking, 2025. URL https://deepmind.google/technologies/gemini/flash-thinking/. Accessed: 2025-01-29." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 666, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 666, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 666, + 504, + 723 + ], + "type": "text", + "content": "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao," + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 504, + 723 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 115, + 72, + 504, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 72, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 115, + 72, + 504, + 335 + ], + "type": "text", + "content": "Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun, T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W. L. Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X. Q. Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y. K. Li, Y. Q. Wang, Y. X. Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong, Ying He, Yishi Piao, Yisong Wang, Yixuan Tan, Yiyang Ma, Yiyuan Liu, Yongqiang Guo, Yuan Ou, Yuduan Wang, Yue Gong, Yuheng Zou, Yujia He, Yunfan Xiong, Yuxiang Luo, Yuxiang You, Yuxuan Liu, Yuyang Zhou, Y. X. Zhu, Yanhong Xu, Yanping Huang, Yaohui Li, Yi Zheng, Yuchen Zhu, Yunxian Ma, Ying Tang, Yukun Zha, Yuting Yan, Z. Z. Ren, Zehui Ren, Zhangli Sha, Zhe Fu, Zhean Xu, Zhenda Xie, Zhengyan Zhang, Zhewen Hao, Zhicheng Ma, Zhigang Yan, Zhiyu Wu, Zihui Gu, Zijia Zhu, Zijun Liu, ZiLin Li, Ziwei Xie, Ziyang Song, Zizheng Pan, Zhen Huang, Zhipeng Xu, Zhongyu Zhang and Zhen Zhang. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. 2025. URL https://arxiv.org/abs/2501.12948." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 343, + 504, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 343, + 504, + 387 + ], + "spans": [ + { + "bbox": [ + 107, + 343, + 504, + 387 + ], + "type": "text", + "content": "Aniket Didolkar, Anirudh Goyal, Nan Rosemary Ke, Siyuan Guo, Michal Valko, Timothy Lillicrap, Danilo Rezende, Yoshua Bengio, Michael Mozer, and Sanjeev Arora. Metacognitive capabilities of llms: An exploration in mathematical problem solving. arXiv preprint arXiv:2405.12205, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 396, + 504, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 396, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 504, + 430 + ], + "type": "text", + "content": "Dujian Ding, Ankur Mallick, Chi Wang, Robert Sim, Subhabrata Mukherjee, Victor Ruhle, Laks VS Lakshmanan, and Ahmed Hassan Awadallah. Hybrid llm: Cost-efficient and quality-aware query routing. arXiv preprint arXiv:2404.14618, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 438, + 504, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 438, + 504, + 461 + ], + "spans": [ + { + "bbox": [ + 107, + 438, + 504, + 461 + ], + "type": "text", + "content": "Kefan Dong and Tengyu Ma. Stp: Self-play llm theorem provers with iterative conjecturing and proving, 2025. URL https://arxiv.org/abs/2502.00212." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 469, + 504, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 469, + 504, + 503 + ], + "spans": [ + { + "bbox": [ + 107, + 469, + 504, + 503 + ], + "type": "text", + "content": "Yixin Dong, Charlie F Ruan, Yaxing Cai, Ruihang Lai, Ziyi Xu, Yilong Zhao, and Tianqi Chen. Xgrammar: Flexible and efficient structured generation engine for large language models. arXiv preprint arXiv:2411.15100, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 511, + 504, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 511, + 504, + 545 + ], + "spans": [ + { + "bbox": [ + 107, + 511, + 504, + 545 + ], + "type": "text", + "content": "Yilun Du, Shuang Li, Antonio Torralba, Joshua B Tenenbaum, and Igor Mordatch. Improving factuality and reasoning in language models through multiagent debate. In *Forty-first International Conference on Machine Learning*, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 553, + 504, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 553, + 504, + 586 + ], + "spans": [ + { + "bbox": [ + 107, + 553, + 504, + 586 + ], + "type": "text", + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 594, + 504, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 594, + 504, + 618 + ], + "spans": [ + { + "bbox": [ + 107, + 594, + 504, + 618 + ], + "type": "text", + "content": "Andrew Estornell, Jean-Francois Ton, Yuanshun Yao, and Yang Liu. Acc-debate: An actor-critic approach to multi-agent debate, 2024. URL https://arxiv.org/abs/2411.00053." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 626, + 504, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 626, + 504, + 649 + ], + "spans": [ + { + "bbox": [ + 107, + 626, + 504, + 649 + ], + "type": "text", + "content": "John H Flavell. Metacognition and cognitive monitoring: A new area of cognitive-developmental inquiry. American psychologist, 34(10):906, 1979." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 657, + 504, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 657, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 107, + 657, + 504, + 691 + ], + "type": "text", + "content": "Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D. Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars. 2025. URL https://arxiv.org/abs/2503.01307." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 699, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 699, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 699, + 504, + 723 + ], + "type": "text", + "content": "Peizhong Gao, Ao Xie, Shaoguang Mao, Wenshan Wu, Yan Xia, Haipeng Mi, and Furu Wei. Meta reasoning for large language models. arXiv preprint arXiv:2406.11698, 2024." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 505, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "text", + "content": "Alex Graves. Sequence transduction with recurrent neural networks. arXiv preprint arXiv:1211.3711, 2012." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 102, + 504, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 102, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 106, + 102, + 504, + 137 + ], + "type": "text", + "content": "Fatemeh Haji, Mazal Bethany, Maryam Tabar, Jason Chiang, Anthony Rios, and Peyman Najafirad. Improving llm reasoning with multi-agent tree-of-thought validator agent, 2024. URL https://arxiv.org/abs/2409.11527." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 144, + 504, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 144, + 504, + 169 + ], + "spans": [ + { + "bbox": [ + 107, + 144, + 504, + 169 + ], + "type": "text", + "content": "Rui Hao, Linmei Hu, Weijian Qi, Qingliu Wu, Yirui Zhang, and Liqiang Nie. Chatlm network: More brains, more intelligence. AI Open, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 176, + 504, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 176, + 504, + 220 + ], + "spans": [ + { + "bbox": [ + 106, + 176, + 504, + 220 + ], + "type": "text", + "content": "Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, et al. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 228, + 504, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 228, + 504, + 262 + ], + "spans": [ + { + "bbox": [ + 106, + 228, + 504, + 262 + ], + "type": "text", + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 271, + 504, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 271, + 504, + 305 + ], + "spans": [ + { + "bbox": [ + 106, + 271, + 504, + 305 + ], + "type": "text", + "content": "Sirui Hong, Xiawu Zheng, Jonathan Chen, Yuheng Cheng, Jinlin Wang, Ceyao Zhang, Zili Wang, Steven Ka Shing Yau, Zijuan Lin, Liyang Zhou, et al. Metagpt: Meta programming for multiagent collaborative framework. arXiv preprint arXiv:2308.00352, 3(4):6, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 312, + 504, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 312, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 504, + 335 + ], + "type": "text", + "content": "Jian Hu. Reinforce++: A simple and efficient approach for aligning large language models. arXiv preprint arXiv:2501.03262, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 343, + 504, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 343, + 504, + 376 + ], + "spans": [ + { + "bbox": [ + 106, + 343, + 504, + 376 + ], + "type": "text", + "content": "Jian Hu, Xibin Wu, Zilin Zhu, Xianyu, Weixun Wang, Dehao Zhang, and Yu Cao. Openrlhf: An easy-to-use, scalable and high-performance rlhf framework. arXiv preprint arXiv:2405.11143, 2024a." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 385, + 504, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 385, + 504, + 419 + ], + "spans": [ + { + "bbox": [ + 106, + 385, + 504, + 419 + ], + "type": "text", + "content": "Qitian Jason Hu, Jacob Bieker, Xiuyu Li, Nan Jiang, Benjamin Keigwin, Gaurav Ranganath, Kurt Keutzer, and Shriyash Kaustubh Upadhyay. Routerbench: A benchmark for multi-llm routing system. arXiv preprint arXiv:2403.12031, 2024b." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 427, + 504, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 427, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 106, + 427, + 504, + 460 + ], + "type": "text", + "content": "Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 469, + 504, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 469, + 504, + 493 + ], + "spans": [ + { + "bbox": [ + 107, + 469, + 504, + 493 + ], + "type": "text", + "content": "Fangkai Jiao, Geyang Guo, Xingxing Zhang, Nancy F Chen, Shafiq Joty, and Furu Wei. Preference optimization for reasoning with pseudo feedback. arXiv preprint arXiv:2411.16345, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 500, + 504, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 500, + 504, + 534 + ], + "spans": [ + { + "bbox": [ + 106, + 500, + 504, + 534 + ], + "type": "text", + "content": "Bowen Jin, Hansi Zeng, Zhenrui Yue, Jinsung Yoon, Sercan Arik, Dong Wang, Hamed Zamani, and Jiawei Han. Search-r1: Training llms to reason and leverage search engines with reinforcement learning, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 542, + 504, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 542, + 504, + 566 + ], + "spans": [ + { + "bbox": [ + 106, + 542, + 504, + 566 + ], + "type": "text", + "content": "Jan Hendrik Kirchner, Yining Chen, Harri Edwards, Jan Leike, Nat McAleese, and Yuri Burda. Prover-verifier games improve legibility of llm outputs. arXiv preprint arXiv:2407.13692, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 573, + 504, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 573, + 504, + 608 + ], + "spans": [ + { + "bbox": [ + 106, + 573, + 504, + 608 + ], + "type": "text", + "content": "Robert Kirk, Ishita Mediratta, Christoforos Nalmpantis, Jelena Luketina, Eric Hambro, Edward Grefenstette, and Roberta Raileanu. Understanding the effects of rlhf on llm generalisation and diversity. In The Twelfth International Conference on Learning Representations." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 615, + 504, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 615, + 504, + 649 + ], + "spans": [ + { + "bbox": [ + 106, + 615, + 504, + 649 + ], + "type": "text", + "content": "Aviral Kumar, Vincent Zhuang, Rishabh Agarwal, Yi Su, John D Co-Reyes, Avi Singh, Kate Baumli, Shariq Iqbal, Colton Bishop, Rebecca Roelofs, et al. Training language models to self-correct via reinforcement learning. arXiv preprint arXiv:2409.12917, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 657, + 504, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 657, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 106, + 657, + 504, + 692 + ], + "type": "text", + "content": "Nathan Lambert, Valentina Pyatkin, Jacob Morrison, LJ Miranda, Bill Yuchen Lin, Khyathi Chandu, Nouha Dziri, Sachin Kumar, Tom Zick, Yejin Choi, et al. Rewardbench: Evaluating reward models for language modeling. arXiv preprint arXiv:2403.13787, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 699, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 699, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 699, + 504, + 723 + ], + "type": "text", + "content": "Pat Langley, Kirstin Cummings, and Daniel Shapiro. Hierarchical skills and cognitive architectures. In Proceedings of the annual meeting of the cognitive science society, volume 26, 2004." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 723 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 117 + ], + "type": "text", + "content": "Aitor Lewkowycz, Anders Andreassen, David Dohan, Ethan Dyer, Henryk Michalewski, Vinay Ramasesh, Ambrose Slone, Cem Anil, Imanol Schlag, Theo Gutman-Solo, et al. Solving quantitative reasoning problems with language models. Advances in Neural Information Processing Systems, 35:3843-3857, 2022." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 124, + 505, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 124, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 106, + 124, + 505, + 158 + ], + "type": "text", + "content": "Ming Li, Jiuhai Chen, Lichang Chen, and Tianyi Zhou. Can llms speak for diverse people? tuning llms via debate to generate controllable controversial statements. arXiv preprint arXiv:2402.10614, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 166, + 505, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 166, + 505, + 201 + ], + "spans": [ + { + "bbox": [ + 106, + 166, + 505, + 201 + ], + "type": "text", + "content": "Tian Liang, Zhiwei He, Wenxiang Jiao, Xing Wang, Yan Wang, Rui Wang, Yujiu Yang, Shuming Shi, and Zhaopeng Tu. Encouraging divergent thinking in large language models through multiagent debate. arXiv preprint arXiv:2305.19118, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 208, + 505, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 208, + 505, + 241 + ], + "spans": [ + { + "bbox": [ + 106, + 208, + 505, + 241 + ], + "type": "text", + "content": "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. arXiv preprint arXiv:2305.20050, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 249, + 505, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 249, + 505, + 283 + ], + "spans": [ + { + "bbox": [ + 106, + 249, + 505, + 283 + ], + "type": "text", + "content": "Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 291, + 505, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 291, + 505, + 335 + ], + "spans": [ + { + "bbox": [ + 106, + 291, + 505, + 335 + ], + "type": "text", + "content": "Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective. CoRR, abs/2503.20783, 2025. doi: 10.48550/ARXIV.2503.20783. URL https://doi.org/10.48550/arXiv.2503.20783." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 343, + 505, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 343, + 505, + 378 + ], + "spans": [ + { + "bbox": [ + 106, + 343, + 505, + 378 + ], + "type": "text", + "content": "Chengdong Ma, Ziran Yang, Minquan Gao, Hai Ci, Jun Gao, Xuehai Pan, and Yaodong Yang. Red teaming game: A game-theoretic framework for red teaming language models. arXiv preprint arXiv:2310.00322, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 385, + 505, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 385, + 505, + 430 + ], + "spans": [ + { + "bbox": [ + 106, + 385, + 505, + 430 + ], + "type": "text", + "content": "Hao Ma, Tianyi Hu, Zhiqiang Pu, Boyin Liu, Xiaolin Ai, Yanyan Liang, and Min Chen. Coevolving with the other you: Fine-tuning LLM with sequential cooperative multi-agent reinforcement learning. CoRR, abs/2410.06101, 2024. doi: 10.48550/ARXIV.2410.06101. URL https://doi.org/10.48550/arXiv.2410.06101." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 437, + 505, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 437, + 505, + 472 + ], + "spans": [ + { + "bbox": [ + 106, + 437, + 505, + 472 + ], + "type": "text", + "content": "Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegrefe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, et al. Self-refine: Iterative refinement with self-feedback. Advances in Neural Information Processing Systems, 36:46534-46594, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 479, + 505, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 479, + 505, + 514 + ], + "spans": [ + { + "bbox": [ + 106, + 479, + 505, + 514 + ], + "type": "text", + "content": "Dakota Mahan, Duy Van Phung, Rafael Rafailov, Chase Blagden, Nathan Lile, Louis Castricato, Jan-Philipp Franken, Chelsea Finn, and Alon Albalak. Generative reward models. arXiv preprint arXiv:2410.12832, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 521, + 505, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 521, + 505, + 565 + ], + "spans": [ + { + "bbox": [ + 106, + 521, + 505, + 565 + ], + "type": "text", + "content": "Sumeet Ramesh Motwani, Chandler Smith, Rocktim Jyoti Das, Markian Rybchuk, Philip H. S. Torr, Ivan Laptev, Fabio Pizzati, Ronald Clark, and Christian Schroeder de Witt. Malt: Improving reasoning with multi-agent llm training, 2024. URL https://arxiv.org/abs/2412.01928." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 573, + 505, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 573, + 505, + 608 + ], + "spans": [ + { + "bbox": [ + 106, + 573, + 505, + 608 + ], + "type": "text", + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 615, + 425, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 615, + 425, + 628 + ], + "spans": [ + { + "bbox": [ + 106, + 615, + 425, + 628 + ], + "type": "text", + "content": "OpenAI. Openai o1 system card, 2024. URL https://openai.com/ol/." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 635, + 505, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 635, + 505, + 679 + ], + "spans": [ + { + "bbox": [ + 106, + 635, + 505, + 679 + ], + "type": "text", + "content": "Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in neural information processing systems, 35: 27730-27744, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 688, + 505, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 688, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 688, + 505, + 723 + ], + "type": "text", + "content": "Chanwoo Park, Seungju Han, Xingzhi Guo, Asuman Ozdaglar, Kaiqing Zhang, and Joo-Kyung Kim. Maporl: Multi-agent post-co-training for collaborative large language models with reinforcement learning. 2025. URL https://arxiv.org/abs/2502.18439." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 505, + 721 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "text", + "content": "Ethan Perez, Saffron Huang, Francis Song, Trevor Cai, Roman Ring, John Aslanides, Amelia Glaese, Nat McAleese, and Geoffrey Irving. Red teaming language models with language models. arXiv preprint arXiv:2202.03286, 2022." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 111, + 505, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 111, + 505, + 145 + ], + "spans": [ + { + "bbox": [ + 106, + 111, + 505, + 145 + ], + "type": "text", + "content": "Israel Puerta-Merino, Carlos Núñez-Molina, Pablo Mesejo, and Juan Fernández-Olivares. A roadmap to guide the integration of llms in hierarchical planning. arXiv preprint arXiv:2501.08068, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 150, + 504, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 150, + 504, + 176 + ], + "spans": [ + { + "bbox": [ + 107, + 150, + 504, + 176 + ], + "type": "text", + "content": "Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual reasoning makes smaller llms stronger problem-solvers. arXiv preprint arXiv:2408.06195, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 179, + 504, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 179, + 504, + 215 + ], + "spans": [ + { + "bbox": [ + 107, + 179, + 504, + 215 + ], + "type": "text", + "content": "Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, and Pengfei Liu. O1 replication journey: A strategic progress report - part 1, 2024. URL https://arxiv.org/abs/2410.18982." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 219, + 504, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 219, + 504, + 253 + ], + "spans": [ + { + "bbox": [ + 105, + 219, + 504, + 253 + ], + "type": "text", + "content": "Lv Qingsong, Yangning Li, Zihua Lan, Zishan Xu, Jiwei Tang, Yinghui Li, Wenhao Jiang, Hai-Tao Zheng, and Philip S. Yu. Raise: Reinforenced adaptive instruction selection for large language models, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 258, + 504, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 258, + 504, + 293 + ], + "spans": [ + { + "bbox": [ + 107, + 258, + 504, + 293 + ], + "type": "text", + "content": "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 298, + 504, + 333 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 298, + 504, + 333 + ], + "spans": [ + { + "bbox": [ + 107, + 298, + 504, + 333 + ], + "type": "text", + "content": "Krishan Rana, Jesse Haviland, Sourav Garg, Jad Abou-Chakra, Ian Reid, and Niko Suenderhauf. Sayplan: Grounding large language models using 3d scene graphs for scalable robot task planning. arXiv preprint arXiv:2307.06135, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 338, + 504, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 338, + 504, + 372 + ], + "spans": [ + { + "bbox": [ + 107, + 338, + 504, + 372 + ], + "type": "text", + "content": "Swarnadeep Saha, Xian Li, Marjan Ghazvininejad, Jason Weston, and Tianlu Wang. Learning to plan & reason for evaluation with thinking-llm-as-a-judge. arXiv preprint arXiv:2501.18099, 2025a." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 377, + 504, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 377, + 504, + 412 + ], + "spans": [ + { + "bbox": [ + 107, + 377, + 504, + 412 + ], + "type": "text", + "content": "Swarnadeep Saha, Xian Li, Marjan Ghazvininejad, Jason Weston, and Tianlu Wang. Learning to plan & reason for evaluation with thinking-llm-as-a-judge, 2025b. URL https://arxiv.org/abs/2501.18099." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 417, + 505, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 417, + 505, + 474 + ], + "spans": [ + { + "bbox": [ + 107, + 417, + 505, + 474 + ], + "type": "text", + "content": "John Schulman, Sergey Levine, Pieter Abbeel, Michael I. Jordan, and Philipp Moritz. Trust region policy optimization. In Francis R. Bach and David M. Blei, editors, Proceedings of the 32nd International Conference on Machine Learning, ICML 2015, Lille, France, 6-11 July 2015, volume 37 of JMLR Workshop and Conference Proceedings, pages 1889-1897. JMLR.org, 2015. URL http://proceedings.mlr.press/v37/schulman15.html." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 479, + 504, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 479, + 504, + 513 + ], + "spans": [ + { + "bbox": [ + 107, + 479, + 504, + 513 + ], + "type": "text", + "content": "John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. CoRR, abs/1707.06347, 2017. URL http://arxiv.org/abs/1707.06347." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 518, + 504, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 518, + 504, + 553 + ], + "spans": [ + { + "bbox": [ + 107, + 518, + 504, + 553 + ], + "type": "text", + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 558, + 504, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 558, + 504, + 604 + ], + "spans": [ + { + "bbox": [ + 107, + 558, + 504, + 604 + ], + "type": "text", + "content": "Maohao Shen, Guangtao Zeng, Zhenting Qi, Zhang-Wei Hong, Zhenfang Chen, Wei Lu, Gregory Wornell, Subhro Das, David Cox, and Chuang Gan. Satori: Reinforcement learning with chain-of-action-thought enhances llm reasoning via autoregressive search, 2025. URL https:// arxiv.org/abs/2502.02508." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 609, + 504, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 609, + 504, + 643 + ], + "spans": [ + { + "bbox": [ + 107, + 609, + 504, + 643 + ], + "type": "text", + "content": "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv: 2409.19256, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 647, + 504, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 647, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 107, + 647, + 504, + 672 + ], + "type": "text", + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 677, + 504, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 677, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 107, + 677, + 504, + 721 + ], + "type": "text", + "content": "Chan Hee Song, Jiaman Wu, Clayton Washington, Brian M Sadler, Wei-Lun Chao, and Yu Su. Llm-planner: Few-shot grounded planning for embodied agents with large language models. In Proceedings of the IEEE/CVF international conference on computer vision, pages 2998-3009, 2023." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 505, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "text", + "content": "Dimitris Stripelis, Zijian Hu, Jipeng Zhang, Zhaozhuo Xu, Alay Dilipbhai Shah, Han Jin, Yuhang Yao, Salman Avestimehr, and Chaoyang He. Tensoropera router: A multi-model router for efficient llm inference. arXiv preprint arXiv:2408.12320, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 112, + 505, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 112, + 505, + 148 + ], + "spans": [ + { + "bbox": [ + 106, + 112, + 505, + 148 + ], + "type": "text", + "content": "Vighnesh Subramaniam, Yilun Du, Joshua B. Tenenbaum, Antonio Torralba, Shuang Li, and Igor Mordatch. Multiagent finetuning: Self improvement with diverse reasoning chains, 2025. URL https://arxiv.org/abs/2501.05707." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 154, + 505, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 154, + 505, + 189 + ], + "spans": [ + { + "bbox": [ + 106, + 154, + 505, + 189 + ], + "type": "text", + "content": "Chuanneng Sun, Songjun Huang, and Dario Pompili. Retrieval-augmented hierarchical in-context reinforcement learning and hindsight modular reflections for task planning with llms. arXiv preprint arXiv:2408.06520, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 194, + 408, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 194, + 408, + 209 + ], + "spans": [ + { + "bbox": [ + 107, + 194, + 408, + 209 + ], + "type": "text", + "content": "Richard Sutton. The bitter lesson. Incomplete Ideas (blog), 13(1):38, 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 215, + 505, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 215, + 505, + 250 + ], + "spans": [ + { + "bbox": [ + 107, + 215, + 505, + 250 + ], + "type": "text", + "content": "Sijun Tan, Siyuan Zhuang, Kyle Montgomery, William Y Tang, Alejandro Cuadron, Chenguang Wang, Raluca Ada Popa, and Ion Stoica. Judgebench: A benchmark for evaluating llm-based judges. arXiv preprint arXiv:2410.12784, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 256, + 504, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 256, + 504, + 291 + ], + "spans": [ + { + "bbox": [ + 106, + 256, + 504, + 291 + ], + "type": "text", + "content": "Xiangru Tang, Anni Zou, Zhuosheng Zhang, Ziming Li, Yilun Zhao, Xingyao Zhang, Arman Cohan, and Mark Gerstein. Medagents: Large language models as collaborators for zero-shot medical reasoning. arXiv preprint arXiv:2311.10537, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 297, + 503, + 321 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 297, + 503, + 321 + ], + "spans": [ + { + "bbox": [ + 105, + 297, + 503, + 321 + ], + "type": "text", + "content": "Qwen Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwenlm.github.io/blog/qwen2.5/." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 327, + 504, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 327, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 106, + 327, + 504, + 361 + ], + "type": "text", + "content": "Jun Wang, Meng Fang, Ziyu Wan, Muning Wen, Jiachen Zhu, Anjie Liu, Ziqin Gong, Yan Song, Lei Chen, Lionel M Ni, et al. Openr: An open source framework for advanced reasoning with large language models. arXiv preprint arXiv:2410.09671, 2024a." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 369, + 504, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 369, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 107, + 369, + 504, + 403 + ], + "type": "text", + "content": "Tianlu Wang, Ilia Kulikov, Olga Golovneva, Ping Yu, Weizhe Yuan, Jane Dwivedi-Yu, Richard Yuanzhe Pang, Maryam Fazel-Zarandi, Jason Weston, and Xian Li. Self-taught evaluators. arXiv preprint arXiv:2408.02666, 2024b." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 410, + 504, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 410, + 504, + 445 + ], + "spans": [ + { + "bbox": [ + 107, + 410, + 504, + 445 + ], + "type": "text", + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. arXiv preprint arXiv:2203.11171, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 451, + 504, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 451, + 504, + 475 + ], + "spans": [ + { + "bbox": [ + 107, + 451, + 504, + 475 + ], + "type": "text", + "content": "Yuqing Wang and Yun Zhao. Metacognitive prompting improves understanding in large language models. arXiv preprint arXiv:2308.05342, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 482, + 504, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 482, + 504, + 516 + ], + "spans": [ + { + "bbox": [ + 107, + 482, + 504, + 516 + ], + "type": "text", + "content": "Zhenhailong Wang, Shaoguang Mao, Wenshan Wu, Tao Ge, Furu Wei, and Heng Ji. Unleashing the emergent cognitive synergy in large language models: A task-solving agent through multi-personal self-collaboration. arXiv preprint arXiv:2307.05300, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 523, + 504, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 523, + 504, + 568 + ], + "spans": [ + { + "bbox": [ + 106, + 523, + 504, + 568 + ], + "type": "text", + "content": "Zihan Wang, Kangrui Wang, Qineng Wang, Pingyue Zhang, Linjie Li, Zhengyuan Yang, Kefan Yu, Minh Nhat Nguyen, Licheng Liu, Eli Gottlieb, Monica Lam, Yiping Lu, Kyunghyun Cho, Jiajun Wu, Li Fei-Fei, Lijuan Wang, Yejin Choi, and Manling Li. Ragen: Understanding self-evolution in llm agents via multi-turn reinforcement learning, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 574, + 504, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 574, + 504, + 609 + ], + "spans": [ + { + "bbox": [ + 106, + 574, + 504, + 609 + ], + "type": "text", + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 616, + 504, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 616, + 504, + 640 + ], + "spans": [ + { + "bbox": [ + 107, + 616, + 504, + 640 + ], + "type": "text", + "content": "Sean Welleck, Ximing Lu, Peter West, Faeze Brahman, Tianxiao Shen, Daniel Khashabi, and Yejin Choi. Generating sequences by learning to self-correct. arXiv preprint arXiv:2211.00053, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 647, + 504, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 647, + 504, + 681 + ], + "spans": [ + { + "bbox": [ + 106, + 647, + 504, + 681 + ], + "type": "text", + "content": "Muning Wen, Ziyu Wan, Weinan Zhang, Jun Wang, and Ying Wen. Reinforcing language agents via policy optimization with action decomposition. CoRR, abs/2405.15821, 2024. doi: 10.48550/ ARXIV.2405.15821. URL https://doi.org/10.48550/arXiv.2405.15821." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 688, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 688, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 688, + 504, + 723 + ], + "type": "text", + "content": "Zhiheng Xi, Dingwen Yang, Jixuan Huang, Jiafu Tang, Guanyu Li, Yiwen Ding, Wei He, Boyang Hong, Shihan Do, Wenyu Zhan, et al. Enhancing llm reasoning via critique models with test-time and training-time supervision. arXiv preprint arXiv:2411.16579, 2024." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 721 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "text", + "content": "Violet Xiang, Charlie Snell, Kanishk Gandhi, Alon Albalak, Anikait Singh, Chase Blagden, Duy Phung, Rafael Rafailov, Nathan Lile, Dakota Mahan, et al. Towards system 2 reasoning in llms: Learning how to think with meta chain-of-though. arXiv preprint arXiv:2501.04682, 2025." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 112, + 505, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 112, + 505, + 148 + ], + "spans": [ + { + "bbox": [ + 106, + 112, + 505, + 148 + ], + "type": "text", + "content": "Yihang Xiao, Jinyi Liu, Yan Zheng, Xiaohan Xie, Jianye Hao, Mingzhi Li, Ruitao Wang, Fei Ni, Yuxiao Li, Jintian Luo, et al. Cellagent: An llm-driven multi-agent framework for automated single-cell data analysis. BioRxiv, pages 2024-05, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 152, + 505, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 152, + 505, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 152, + 505, + 186 + ], + "type": "text", + "content": "Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 193, + 505, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 193, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 106, + 193, + 505, + 228 + ], + "type": "text", + "content": "Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, et al. Towards large reasoning models: A survey of reinforced reasoning with large language models. arXiv preprint arXiv:2501.09686, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 233, + 505, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 233, + 505, + 267 + ], + "spans": [ + { + "bbox": [ + 106, + 233, + 505, + 267 + ], + "type": "text", + "content": "Prateek Yadav, Tu Vu, Jonathan Lai, Alexandra Chronopoulou, Manaal Faruqui, Mohit Bansal, and Tsendsuren Munkhdalai. What matters for model merging at scale? arXiv preprint arXiv:2410.03617, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 274, + 505, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 274, + 505, + 308 + ], + "spans": [ + { + "bbox": [ + 106, + 274, + 505, + 308 + ], + "type": "text", + "content": "Xue Yan, Yan Song, Xinyu Cui, Filippos Christianos, Haifeng Zhang, David Henry Mguni, and Jun Wang. Ask more, know better: Reinforce-learned prompt questions for decision making with large language models. arXiv preprint arXiv:2310.18127, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 314, + 505, + 359 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 314, + 505, + 359 + ], + "spans": [ + { + "bbox": [ + 106, + 314, + 505, + 359 + ], + "type": "text", + "content": "An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, Keming Lu, Mingfeng Xue, Runji Lin, Tianyu Liu, Xingzhang Ren, and Zhenru Zhang. Qwen2.5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 365, + 505, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 365, + 505, + 389 + ], + "spans": [ + { + "bbox": [ + 106, + 365, + 505, + 389 + ], + "type": "text", + "content": "Ling Yang, Zhaochen Yu, Bin Cui, and Mengdi Wang. Reasonflux: Hierarchical llm reasoning via scaling thought templates. arXiv preprint arXiv:2502.06772, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 395, + 505, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 395, + 505, + 429 + ], + "spans": [ + { + "bbox": [ + 106, + 395, + 505, + 429 + ], + "type": "text", + "content": "Guanghao Ye, Khiem Duc Pham, Xinzhi Zhang, Sivakanth Gopi, Baolin Peng, Beibin Li, Janardhan Kulkarni, and Huseyin A Inan. On the emergence of thinking in llms i: Searching for the right intuition. arXiv preprint arXiv:2502.06773, 2025a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 435, + 505, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 435, + 505, + 459 + ], + "spans": [ + { + "bbox": [ + 106, + 435, + 505, + 459 + ], + "type": "text", + "content": "Peijun Ye, Tao Wang, and Fei-Yue Wang. A survey of cognitive architectures in the past 20 years. IEEE transactions on cybernetics, 48(12):3280-3290, 2018." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 464, + 505, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 464, + 505, + 488 + ], + "spans": [ + { + "bbox": [ + 106, + 464, + 505, + 488 + ], + "type": "text", + "content": "Yaowen Ye, Cassidy Laidlaw, and Jacob Steinhardt. Iterative label refinement matters more than preference optimization under weak supervision. arXiv preprint arXiv:2501.07886, 2025b." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 494, + 505, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 494, + 505, + 517 + ], + "spans": [ + { + "bbox": [ + 106, + 494, + 505, + 517 + ], + "type": "text", + "content": "Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning. arXiv preprint arXiv:2502.03387, 2025c." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 523, + 505, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 523, + 505, + 558 + ], + "spans": [ + { + "bbox": [ + 106, + 523, + 505, + 558 + ], + "type": "text", + "content": "Le Yu, Bowen Yu, Haiyang Yu, Fei Huang, and Yongbin Li. Language models are super mario: Absorbing abilities from homologous models as a free lunch. In *Forty-first International Conference on Machine Learning*, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 563, + 505, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 563, + 505, + 631 + ], + "spans": [ + { + "bbox": [ + 106, + 563, + 505, + 631 + ], + "type": "text", + "content": "Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, Haibin Lin, Zhiqi Lin, Bole Ma, Guangming Sheng, Yuxuan Tong, Chi Zhang, Mofan Zhang, Wang Zhang, Hang Zhu, Jinhua Zhu, Jiaze Chen, Jiangjie Chen, Chengyi Wang, Hongli Yu, Weinan Dai, Yuxuan Song, Xiangpeng Wei, Hao Zhou, Jingjing Liu, Wei-Ying Ma, Ya-Qin Zhang, Lin Yan, Mu Qiao, Yonghui Wu, and Mingxuan Wang. Dapo: An open-source llm reinforcement learning system at scale, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 636, + 505, + 671 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 636, + 505, + 671 + ], + "spans": [ + { + "bbox": [ + 106, + 636, + 505, + 671 + ], + "type": "text", + "content": "Murong Yue, Wenlin Yao, Haitao Mi, Dian Yu, Ziyu Yao, and Dong Yu. Dots: Learning to reason dynamically in llms via optimal reasoning trajectories search. In The Thirteenth International Conference on Learning Representations." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 677, + 505, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 677, + 505, + 721 + ], + "spans": [ + { + "bbox": [ + 106, + 677, + 505, + 721 + ], + "type": "text", + "content": "Murong Yue, Wenlin Yao, Haitao Mi, Dian Yu, Ziyu Yao, and Dong Yu. DOTS: learning to reason dynamically in llms via optimal reasoning trajectories search. CoRR, abs/2410.03864, 2024. doi: 10.48550/ARXIV.2410.03864. URL https://doi.org/10.48550/arXiv.2410.03864." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 505, + 563 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "type": "text", + "content": "Yanwei Yue, Guibin Zhang, Boyang Liu, Guancheng Wan, Kun Wang, Dawei Cheng, and Yiyan Qi. Masrouter: Learning to route llms for multi-agent systems. arXiv preprint arXiv:2502.11133, 2025a." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 112, + 505, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 112, + 505, + 168 + ], + "spans": [ + { + "bbox": [ + 106, + 112, + 505, + 168 + ], + "type": "text", + "content": "Yu Yue, Yufeng Yuan, Qiying Yu, Xiaochen Zuo, Ruofei Zhu, Wenyuan Xu, Jiaze Chen, Chengyi Wang, TianTian Fan, Zhengyin Du, Xiangpeng Wei, Xiangyu Yu, Gaohong Liu, Juncai Liu, Lingjun Liu, Haibin Lin, Zhiqi Lin, Bole Ma, Chi Zhang, Mofan Zhang, Wang Zhang, Hang Zhu, Ru Zhang, Xin Liu, Mingxuan Wang, Yonghui Wu, and Lin Yan. Vapo: Efficient and reliable reinforcement learning for advanced reasoning tasks, 2025b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 175, + 504, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 175, + 504, + 199 + ], + "spans": [ + { + "bbox": [ + 107, + 175, + 504, + 199 + ], + "type": "text", + "content": "Weihao Zeng, Yuzhen Huang, Qian Liu, Wei Liu, Keqing He, Zejun Ma, and Junxian He. Simplerl-zoo: Investigating and taming zero reinforcement learning for open base models in the wild, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 205, + 504, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 205, + 504, + 250 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 504, + 250 + ], + "type": "text", + "content": "Di Zhang, Jianbo Wu, Jingdi Lei, Tong Che, Jiatong Li, Tong Xie, Xiaoshui Huang, Shufei Zhang, Marco Pavone, Yuqiang Li, Wanli Ouyang, and Dongzhan Zhou. Llama-berry: Pairwise optimization for o1-like olympiad-level mathematical reasoning, 2024a. URL https://arxiv.org/abs/2410.02884." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 256, + 504, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 256, + 504, + 290 + ], + "spans": [ + { + "bbox": [ + 106, + 256, + 504, + 290 + ], + "type": "text", + "content": "Hangfan Zhang, Zhiyao Cui, Xinrun Wang, Qiaosheng Zhang, Zhen Wang, Dinghao Wu, and Shuyue Hu. If multi-agent debate is the answer, what is the question? arXiv preprint arXiv:2502.08788, 2025a." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 297, + 504, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 297, + 504, + 331 + ], + "spans": [ + { + "bbox": [ + 105, + 297, + 504, + 331 + ], + "type": "text", + "content": "Jiayi Zhang, Jinyu Xiang, Zhaoyang Yu, Fengwei Teng, Xionghui Chen, Jiaqi Chen, Mingchen Zhuge, Xin Cheng, Sirui Hong, Jinlin Wang, et al. Aflow: Automating agentic workflow generation. arXiv preprint arXiv:2410.10762, 2024b." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 338, + 504, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 338, + 504, + 371 + ], + "spans": [ + { + "bbox": [ + 106, + 338, + 504, + 371 + ], + "type": "text", + "content": "Xiaotian Zhang, Chunyang Li, Yi Zong, Zhengyu Ying, Liang He, and Xipeng Qiu. Evaluating the performance of large language models on gaokao benchmark. arXiv preprint arXiv:2305.12474, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 378, + 504, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 378, + 504, + 412 + ], + "spans": [ + { + "bbox": [ + 106, + 378, + 504, + 412 + ], + "type": "text", + "content": "Yiqun Zhang, Peng Ye, Xiaocui Yang, Shi Feng, Shufei Zhang, Lei Bai, Wanli Ouyang, and Shuyue Hu. Nature-inspired population-based evolution of large language models. arXiv preprint arXiv:2503.01155, 2025b." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 419, + 504, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 419, + 504, + 443 + ], + "spans": [ + { + "bbox": [ + 106, + 419, + 504, + 443 + ], + "type": "text", + "content": "Rosie Zhao, Alexandru Meterez, Sham Kakade, Cengiz Pehlevan, Samy Jelassi, and Eran Malach. Echo chamber: Rl post-training amplifies behaviors learned in pretraining, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 449, + 504, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 449, + 504, + 483 + ], + "spans": [ + { + "bbox": [ + 106, + 449, + 504, + 483 + ], + "type": "text", + "content": "Yu Zhao, Huifeng Yin, Bo Zeng, Hao Wang, Tianqi Shi, Chenyang Lyu, Longyue Wang, Weihua Luo, and Kaifu Zhang. Marco-ol: Towards open reasoning models for open-ended solutions, 2024. URL https://arxiv.org/abs/2411.14405." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 490, + 504, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 490, + 504, + 513 + ], + "spans": [ + { + "bbox": [ + 106, + 490, + 504, + 513 + ], + "type": "text", + "content": "Yifei Zhou, Andrea Zanette, Jiayi Pan, Sergey Levine, and Aviral Kumar. Archer: Training language model agents via hierarchical multi-turn rl, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 519, + 504, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 519, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 106, + 519, + 504, + 563 + ], + "type": "text", + "content": "Mingchen Zhuge, Haozhe Liu, Francesco Faccio, Dylan R Ashley, Róbert Csordás, Anand Gopalakrishnan, Abdullah Hamdi, Hasan Abed Al Kader Hammoud, Vincent Herrmann, Kazuki Irie, et al. Mindstorms in natural language-based societies of mind. arXiv preprint arXiv:2305.17066, 2023." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 251, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 251, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 251, + 85 + ], + "type": "text", + "content": "Appendix Table of Contents" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 133, + 91, + 505, + 102 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 505, + 102 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 505, + 102 + ], + "type": "text", + "content": "- A Related work 19" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 150, + 105, + 504, + 153 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 150, + 105, + 504, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 105, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 150, + 105, + 504, + 117 + ], + "type": "text", + "content": "-A.1 Single LLM Reasoning 19" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 150, + 118, + 504, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 118, + 504, + 129 + ], + "spans": [ + { + "bbox": [ + 150, + 118, + 504, + 129 + ], + "type": "text", + "content": "-A.2MultipleLLMReasoning 20" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 150, + 130, + 504, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 130, + 504, + 141 + ], + "spans": [ + { + "bbox": [ + 150, + 130, + 504, + 141 + ], + "type": "text", + "content": "-A.3 Hierarchical Reasoning 20" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 150, + 142, + 504, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 142, + 504, + 153 + ], + "spans": [ + { + "bbox": [ + 150, + 142, + 504, + 153 + ], + "type": "text", + "content": "-A.4RL in LLM 21" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 156, + 504, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 156, + 504, + 167 + ], + "spans": [ + { + "bbox": [ + 132, + 156, + 504, + 167 + ], + "type": "text", + "content": "B Limitation and Future Work 21" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 171, + 504, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 171, + 504, + 182 + ], + "spans": [ + { + "bbox": [ + 132, + 171, + 504, + 182 + ], + "type": "text", + "content": "C Supplementary Materials for Method in Section 3 21" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 150, + 185, + 504, + 245 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 150, + 185, + 504, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 185, + 504, + 196 + ], + "spans": [ + { + "bbox": [ + 150, + 185, + 504, + 196 + ], + "type": "text", + "content": "- C.1 Inference-time Scaling For ReMA 21" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 150, + 198, + 504, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 198, + 504, + 208 + ], + "spans": [ + { + "bbox": [ + 150, + 198, + 504, + 208 + ], + "type": "text", + "content": "- C.2 Detailed reward design 22" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 150, + 209, + 504, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 209, + 504, + 220 + ], + "spans": [ + { + "bbox": [ + 150, + 209, + 504, + 220 + ], + "type": "text", + "content": "- C.3 Pseudocode of ReMA 23" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 150, + 221, + 504, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 221, + 504, + 232 + ], + "spans": [ + { + "bbox": [ + 150, + 221, + 504, + 232 + ], + "type": "text", + "content": "- C.4 Brief convergence analysis 23" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 150, + 234, + 504, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 234, + 504, + 245 + ], + "spans": [ + { + "bbox": [ + 150, + 234, + 504, + 245 + ], + "type": "text", + "content": "- C.5 Learning to reason from the perspective of Leader Follower Game 24" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 248, + 504, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 248, + 504, + 259 + ], + "spans": [ + { + "bbox": [ + 132, + 248, + 504, + 259 + ], + "type": "text", + "content": "D Training Details 26" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 150, + 262, + 504, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 262, + 504, + 274 + ], + "spans": [ + { + "bbox": [ + 150, + 262, + 504, + 274 + ], + "type": "text", + "content": "- D.1 Single-turn ReMA 26" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 167, + 275, + 504, + 320 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 167, + 275, + 504, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 275, + 504, + 285 + ], + "spans": [ + { + "bbox": [ + 167, + 275, + 504, + 285 + ], + "type": "text", + "content": "\\* D.1.1 Supervised fine-tuning data collection 27" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 167, + 286, + 504, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 286, + 504, + 297 + ], + "spans": [ + { + "bbox": [ + 167, + 286, + 504, + 297 + ], + "type": "text", + "content": "\\* D.1.2 Dataset Curation of RewardBench970 27" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 167, + 298, + 504, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 298, + 504, + 308 + ], + "spans": [ + { + "bbox": [ + 167, + 298, + 504, + 308 + ], + "type": "text", + "content": "\\*D.1.3 Training on MATH 28" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 167, + 309, + 504, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 309, + 504, + 320 + ], + "spans": [ + { + "bbox": [ + 167, + 309, + 504, + 320 + ], + "type": "text", + "content": "\\* D.1.4 Training on Reward Bench 28" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 150, + 322, + 504, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 322, + 504, + 332 + ], + "spans": [ + { + "bbox": [ + 150, + 322, + 504, + 332 + ], + "type": "text", + "content": "- D.2 Multi-turn ReMA 28" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 167, + 334, + 504, + 357 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 167, + 334, + 504, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 334, + 504, + 344 + ], + "spans": [ + { + "bbox": [ + 167, + 334, + 504, + 344 + ], + "type": "text", + "content": "\\* D.2.1 SFT data collection of multi-turn MAMRP 29" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 167, + 346, + 504, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 346, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 167, + 346, + 504, + 357 + ], + "type": "text", + "content": "\\* D.2.2 Training on MATH 29" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 359, + 504, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 359, + 504, + 370 + ], + "spans": [ + { + "bbox": [ + 132, + 359, + 504, + 370 + ], + "type": "text", + "content": "E Other Experiments 29" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 150, + 373, + 504, + 397 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 150, + 373, + 504, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 373, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 150, + 373, + 504, + 384 + ], + "type": "text", + "content": "-E.1 Reward functions shape cross-agent behaviors 29" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 150, + 386, + 504, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 386, + 504, + 397 + ], + "spans": [ + { + "bbox": [ + 150, + 386, + 504, + 397 + ], + "type": "text", + "content": "- E.2 Detailed Training Curves on Different Datasets of Multi-turn ReMA 30" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 400, + 504, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 400, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 132, + 400, + 504, + 411 + ], + "type": "text", + "content": "F Qualitative results 30" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 150, + 414, + 504, + 449 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 150, + 414, + 504, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 414, + 504, + 426 + ], + "spans": [ + { + "bbox": [ + 150, + 414, + 504, + 426 + ], + "type": "text", + "content": "- F.1 High-level policy finds better plans 30" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 150, + 426, + 504, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 426, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 150, + 426, + 504, + 437 + ], + "type": "text", + "content": "- F.2 Case study for Experiments of Different Reward Functions in Appendix E.1 .30" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 150, + 438, + 504, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 438, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 150, + 438, + 504, + 449 + ], + "type": "text", + "content": "- F.3 Case study for Adaptive Meta-thinking in Single-Turn ReMA in Section 4.2.2 30" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 453, + 504, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 453, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 132, + 453, + 504, + 464 + ], + "type": "text", + "content": "G Prompts 31" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 479, + 197, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 479, + 197, + 491 + ], + "spans": [ + { + "bbox": [ + 105, + 479, + 197, + 491 + ], + "type": "text", + "content": "A Related work" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 104, + 502, + 506, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 502, + 506, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 502, + 506, + 581 + ], + "type": "text", + "content": "Drawing from the bitter lesson [Sutton, 2019], two methods that appear to scale effectively are searching and learning, aligning with current trends in large language models [Xu et al., 2025]. At present, researchers are leveraging these methods to maximize the capabilities of individual transformers, while other efforts are exploring architectures that involve multiple interacting entities. In this paper, we examine this divergence within the context of LLM reasoning, a capability that allows large language models to solve problems through logical reasoning, step-by-step analysis, and inference [Wang et al., 2024a]." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 592, + 231, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 592, + 231, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 231, + 605 + ], + "type": "text", + "content": "A.1 Single LLM Reasoning" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "text", + "content": "Main research works in reasoning involving a single LLM utilize search-based and post-training methods. The fundamental elements of searching methods are text generation and evaluation. Generation schemes include In-Context Learning [Brown et al., 2020], Beam Search [Graves, 2012], and various tree-based searching [Snell et al., 2024]; Evaluation approaches often use outcome accuracy, self-consistency [Wang et al., 2022], or process reward signal [Lightman et al., 2023] as the criteria to select high-quality responses from the generated texts. Post-training method is another research line in opposition to pre-training. Popular training pipelines often involve specific data construction followed by Supervised Fine-tuning [Qin et al., 2024, Ouyang et al., 2022, Hui et al., 2024, Liu et al., 2024], or reinforcement learning to interactively explore learning patterns [Wang et al., 2024a, Zhang et al., 2024a, DeepSeek-AI et al., 2025, Xu et al., 2025]." + } + ] + } + ], + "index": 39 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 40 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 73, + 241, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 73, + 241, + 83 + ], + "spans": [ + { + "bbox": [ + 107, + 73, + 241, + 83 + ], + "type": "text", + "content": "A.2 Multiple LLM Reasoning" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 95, + 504, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 95, + 504, + 445 + ], + "spans": [ + { + "bbox": [ + 107, + 95, + 504, + 445 + ], + "type": "text", + "content": "Integrating multiple entities can potentially surpass the intelligence of the individual model [Chen et al., 2023]. With the rapid emergence of large language models showing a varying level of abilities, some studies have explored facilitating discussions among multiple off-the-shelf LLMs [Zhang et al., 2025a, Chen et al., 2024a, Wang et al., 2023, Du et al., 2023, Zhuge et al., 2023, Tang et al., 2023, Hao et al., 2025, Akata et al., 2023, Hong et al., 2023, Zhang et al., 2024b], taking the form of free discussion [Du et al., 2023, Liang et al., 2023] or structured role assignments [Hong et al., 2023, Zhang et al., 2024b]. Some have applied routing mechanisms to assign tasks to the most suitable expert models [Hu et al., 2024b, Stripelis et al., 2024, Ding et al., 2024, Yue et al., 2025a, Chen et al., 2024c] or merging mechanisms to develop more versatile models [Yadav et al., 2024, Yu et al., 2024, Zhang et al., 2025b]. Beyond aggregating static knowledge from multiple agents, multi-agent LLM training can also enhance reasoning capabilities. For example, multi-agent debates can generate diverse synthetic data, which can subsequently be used for supervised fine-tuning [Estornell et al., 2024, Li et al., 2024, Motwani et al., 2024, Dong and Ma, 2025, Perez et al., 2022, Ye et al., 2025a, Subramaniam et al., 2025]. Reinforcement learning (RL) methods have also been adopted to improve LLM reasoning in areas such as alignment [Perez et al., 2022, Ma et al., 2023] and legibility [Kirchner et al., 2024]. Motwani et al. [2024] utilize a three-agent system for generation and fine-tune the models using Direct Preference Optimization (DPO). Reinforcement Learning with Generative Reward Models (GenRM) [Mahan et al., 2024, Ye et al., 2025b, Jiao et al., 2024, Wang et al., 2024b] represents another common approach of multi-agent training, where the reward signal is derived from the token probabilities of another LLM, coupled with the reasoning process. While our work aligns with these efforts, it diverges by using an additional tunable LLM to provide metacognitive instructions, guiding the low-level LLM during learning, rather than relying on a static GenRM. The most closely related works to ours are MAPoRL [Park et al., 2025] and COPYR [Ma et al., 2024]. MAPoRL is a multi-agent debating framework that uses multi-agent reinforcement learning (MARL) with a learned verifier to fine-tune each LLM agent. COPYR duplicates an LLM into two agents, training them simultaneously in the roles of pioneer and observer using RL. Shen et al. [2025] trained with a novel Chain-of-Action-Thought (COAT) framework that embeds meta-action tokens for self-reflection and exploration into an autoregressive search process. However, unlike our approach, which explicitly separates metacognition from plan execution, these methods do not decompose the reasoning process but instead focus on improving direct chain-of-thought generation. Furthermore, our experiments are conducted on a larger scale and include more challenging problems." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 469, + 232, + 479 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 469, + 232, + 479 + ], + "spans": [ + { + "bbox": [ + 107, + 469, + 232, + 479 + ], + "type": "text", + "content": "A.3 Hierarchical Reasoning" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 491, + 504, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 491, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 107, + 491, + 504, + 721 + ], + "type": "text", + "content": "Partitioning reasoning into hierarchical processes has been explored in prior research to make biological sense [Ye et al., 2018, Langley et al., 2004]. In the context of language models, a hierarchical structure has been used to facilitate diverse reasoning patterns, including planning [Puerta-Merino et al., 2025, Sun et al., 2024, Song et al., 2023, Rana et al., 2023, Chen et al., 2024d, Yan et al., 2023, Xiao et al., 2024], validation [Haji et al., 2024, Xi et al., 2024] and self-refinement [Madaan et al., 2023, Kumar et al., 2024, Welleck et al., 2022]. For instance, EvalPlanner [Saha et al., 2025b] is a framework that conducts reasoning through plan generation and execution. DOTS [Yue et al., 2024] extends decomposition by integrating a tree-based searching method with Analysis, Solution, and Verification layers. Marco-o1 [Zhao et al., 2024] focuses on open-ended problem-solving and abstract thinking, dynamically adjusting reasoning granularity and incorporating reflection mechanisms to enhance reasoning performance. Beyond these approaches, metacognition [Flavell, 1979] has been identified as another critical component of reasoning, referring to the intuitive understanding of one's own cognitive and reasoning processes [Gao et al., 2024, Wang and Zhao, 2023]. Wang and Zhao [2023] proposed a metacognitive prompting strategy to improve large language model (LLM) capabilities. Didolkar et al. [2024] further developed a prompt-guided method that enables models to label math problems with the required skills and subsequently use these labels to solve new problems. Gao et al. [2024] introduce meta-reasoner which use contextual multi-arm bandit to learn a high-level \"advisor\" over low-level reasoning process. Xiang et al. [2025] provides a Meta-CoT framework to think about its own thinking. They use construction-based methods as well as reinforcement learning to develop meta-cognitive skills. Qingsong et al. [2025] introduces a RL framework for dynamic instruction selection during fine-tuning. In our work, we also value reflect-" + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 742, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 742, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 742, + 310, + 750 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": "ing on reasoning processes, and we enhance metacognitive abilities through two-agent interaction and reinforcement learning at both end." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 109, + 185, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 109, + 185, + 121 + ], + "spans": [ + { + "bbox": [ + 105, + 109, + 185, + 121 + ], + "type": "text", + "content": "A.4 RL in LLM" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 129, + 506, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 129, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 129, + 506, + 316 + ], + "type": "text", + "content": "Recent advancements in applying RL to LLMs have enhanced their reasoning and decision-making capabilities. Liu et al. [2025] examines token-level optimization biases by introducing Dr. GRPO to stabilize policy gradients. VAPO [Yue et al., 2025b] enhances PPO with value-aware perturbations and adaptive reward shaping to improve robustness in sparse-reward reasoning tasks. DAPO [Yu et al., 2025] provides a scalable, modular RL framework that integrates distributed rollout collection and dynamic replay buffers for reproducible training at scale. SimpleRL-Zoo [Zeng et al., 2025] conducts zero-shot RL experiments across open-base LLMs to uncover emergent cognitive behaviors under minimal reward signals. Echo Chamber [Zhao et al., 2025] investigates how RL fine-tuning algorithms can amplify pretrained model biases and proposes regularization to mitigate over-amplification. Wen et al. [2024] decomposes high-level language actions into token-level operations to achieve finer-grained credit assignment. Some works push RL training for single-turn to multi-turn. Search-R1 [Jin et al., 2025] trains LLMs to orchestrate multi-turn search strategies with RL-optimized decision policies to improve question-answering accuracy. ArCHer [Zhou et al., 2024] employs a hierarchical, multi-turn RL architecture with manager and worker policies to efficiently handle long-horizon dialogue tasks. RAGEN [Wang et al., 2025] introduces trajectory filtering and critic modules within a multi-turn RL framework to stabilize learning and reduce shallow policy behaviors." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 332, + 277, + 345 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 332, + 277, + 345 + ], + "spans": [ + { + "bbox": [ + 105, + 332, + 277, + 345 + ], + "type": "text", + "content": "B Limitation and Future Work" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 357, + 504, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 504, + 434 + ], + "type": "text", + "content": "In this work, we only test ReMA on math and LLM-as-a-Judge benchmarks. Though the results show the effectiveness of ReMA, adopting ReMA to tasks where naturally needs multi-turn interaction between several interleaved agents has great potential. Moreover, a comprehensive understanding of the learning dynamics of multi-turn RL and multi-turn MARL for LLMs is needed. Finally, there's still sufficient space to further improve the procedure of multi-turn multi-agent rollout through modern LLM speed up techniques, e.g. prefetch-decode disaggregation and asynchronous rollout." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 450, + 383, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 450, + 383, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 450, + 383, + 464 + ], + "type": "text", + "content": "C Supplementary Materials for Method in Section 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 475, + 271, + 488 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 475, + 271, + 488 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 271, + 488 + ], + "type": "text", + "content": "C.1 Inference-time Scaling of ReMA" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 496, + 504, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 496, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 496, + 504, + 540 + ], + "type": "text", + "content": "In this section, we discuss how to enhance the inference-time computation of our hierarchical system, specifically focusing on the interaction between the high-level and low-level agents. The total number of model samples required for inference is determined by the product of the sampling budget allocated to each agent." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 545, + 504, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 545, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 545, + 504, + 578 + ], + "type": "text", + "content": "For instance, in a simple single-turn setting, if the high-level agent samples " + }, + { + "bbox": [ + 104, + 545, + 504, + 578 + ], + "type": "inline_equation", + "content": "k_{1}" + }, + { + "bbox": [ + 104, + 545, + 504, + 578 + ], + "type": "text", + "content": " responses and each of these responses leads to " + }, + { + "bbox": [ + 104, + 545, + 504, + 578 + ], + "type": "inline_equation", + "content": "k_{2}" + }, + { + "bbox": [ + 104, + 545, + 504, + 578 + ], + "type": "text", + "content": " samples from the low-level agent, the total number of model calls required is:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 252, + 579, + 357, + 591 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 579, + 357, + 591 + ], + "spans": [ + { + "bbox": [ + 252, + 579, + 357, + 591 + ], + "type": "interline_equation", + "content": "\\text {T o t a l s a m p l e s} = k _ {1} \\times k _ {2}.", + "image_path": "c48a4482b269d65f66e76eb545aed761bdd1e37cb7f681b85c17f366bd01e913.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 596, + 504, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 596, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 504, + 651 + ], + "type": "text", + "content": "Given a fixed computational budget, an important question arises: how should the sampling budget be distributed between the high-level and low-level agents to maximize performance? Allocating more samples to the high-level agent may increase diversity in reasoning strategies while allocating more to the low-level agent may yield more refined solutions for a given metacognitive plan." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 656, + 504, + 678 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 656, + 504, + 678 + ], + "spans": [ + { + "bbox": [ + 104, + 656, + 504, + 678 + ], + "type": "text", + "content": "Another crucial consideration is how to perform reranking on the final outputs. Two potential strategies include:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 132, + 689, + 504, + 723 + ], + "type": "text", + "content": "- Hierarchical reranking: First, for each high-level response, rank and aggregate the low-level responses under it. Then, rank the aggregated results across different high-level responses." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 132, + 72, + 504, + 95 + ], + "type": "text", + "content": "- Flat reranking: Directly rank all sampled responses together, regardless of the hierarchy of high-level reasoning steps." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 104, + 505, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 104, + 505, + 138 + ], + "spans": [ + { + "bbox": [ + 104, + 104, + 505, + 138 + ], + "type": "text", + "content": "Balancing sampling allocation and designing an effective reranking strategy are key challenges in efficiently scaling our multi-agent reasoning system. In the next section, we explore empirical results comparing different allocation strategies and ranking methods." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 150, + 231, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 150, + 231, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 150, + 231, + 163 + ], + "type": "text", + "content": "C.2 Detailed reward design" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 170, + 504, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 170, + 504, + 194 + ], + "spans": [ + { + "bbox": [ + 104, + 170, + 504, + 194 + ], + "type": "text", + "content": "As described in Sec. 3.2, we update both high-level and low-level agents by assigning rewards based on the low-level policy output. Below, we outline several potential reward designs:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 201, + 504, + 286 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 132, + 201, + 504, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 201, + 504, + 224 + ], + "spans": [ + { + "bbox": [ + 132, + 201, + 504, + 224 + ], + "type": "text", + "content": "- Correctness reward: For tasks with explicit ground truth, we assign rewards based on the correctness of the low-level agent's output." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 226, + 504, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 226, + 504, + 249 + ], + "spans": [ + { + "bbox": [ + 132, + 226, + 504, + 249 + ], + "type": "text", + "content": "- Format reward: For tasks that require a specific output format, we enforce adherence to the prescribed structure by providing a format reward." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 251, + 504, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 251, + 504, + 286 + ], + "spans": [ + { + "bbox": [ + 132, + 251, + 504, + 286 + ], + "type": "text", + "content": "- To encourage the high-level agent to generate informative and unambiguous meta-thinking, and to stabilize the low-level outputs, we reward the high-level agent when the low-level agent produces consistent responses. Specifically, the consistency reward is defined as" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 247, + 290, + 399, + 316 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 290, + 399, + 316 + ], + "spans": [ + { + "bbox": [ + 247, + 290, + 399, + 316 + ], + "type": "interline_equation", + "content": "R _ {h} = \\frac {\\text {m a x o c c u r r e n c e o f a n a n s w e r}}{\\text {t o t a l n u m b e r o f r e s p o n s e s}}.", + "image_path": "6aad5c8b5675d5fc1e3eb597e21d99148f5c18d7820239246afb5d3c07134617.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 322, + 504, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 322, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 104, + 322, + 504, + 357 + ], + "type": "text", + "content": "To examine multi-agent metacognition-integrated reasoning with different reward designs, we experiment with different reward function designs to encourage effective collaboration and structured reasoning. Below, we introduce and justify several reward schemes." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 366, + 505, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 366, + 505, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 366, + 505, + 411 + ], + "type": "text", + "content": "1. Correctness and Format-Aware Reward (Base Setting) In our primary reward setting, the system's overall correctness is used as the primary reward signal, supplemented by format-based rewards for both the high-level and low-level agents. Using mathematical problem-solving as an example:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 420, + 504, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 420, + 504, + 444 + ], + "spans": [ + { + "bbox": [ + 132, + 420, + 504, + 444 + ], + "type": "text", + "content": "- Low-level agent " + }, + { + "bbox": [ + 132, + 420, + 504, + 444 + ], + "type": "inline_equation", + "content": "(\\pi_{\\theta_l})" + }, + { + "bbox": [ + 132, + 420, + 504, + 444 + ], + "type": "text", + "content": ": Receives a reward of " + }, + { + "bbox": [ + 132, + 420, + 504, + 444 + ], + "type": "inline_equation", + "content": "+1.0" + }, + { + "bbox": [ + 132, + 420, + 504, + 444 + ], + "type": "text", + "content": " for a correct answer. If the answer is incorrect, the agent is further penalized based on format compliance. Specifically:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 149, + 445, + 504, + 491 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 149, + 445, + 504, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 445, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 149, + 445, + 504, + 467 + ], + "type": "text", + "content": "- If the output contains the designated answer-indicating format (e.g., boxed in Latex), it receives " + }, + { + "bbox": [ + 149, + 445, + 504, + 467 + ], + "type": "inline_equation", + "content": "-0.5" + }, + { + "bbox": [ + 149, + 445, + 504, + 467 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 149, + 469, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 469, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 149, + 469, + 504, + 491 + ], + "type": "text", + "content": "- Otherwise, it receives " + }, + { + "bbox": [ + 149, + 469, + 504, + 491 + ], + "type": "inline_equation", + "content": "-1.0" + }, + { + "bbox": [ + 149, + 469, + 504, + 491 + ], + "type": "text", + "content": ", as a missing format often suggests an incomplete or unstructured response." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 495, + 505, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 495, + 505, + 540 + ], + "spans": [ + { + "bbox": [ + 132, + 495, + 505, + 540 + ], + "type": "text", + "content": "- High-level agent " + }, + { + "bbox": [ + 132, + 495, + 505, + 540 + ], + "type": "inline_equation", + "content": "(\\pi_{\\theta_h})" + }, + { + "bbox": [ + 132, + 495, + 505, + 540 + ], + "type": "text", + "content": ": Receives the average correctness of the low-level agent's sampled responses as its reward. Additionally, to prevent the high-level agent from directly generating explicit answers instead of guiding reasoning, a strong penalty of " + }, + { + "bbox": [ + 132, + 495, + 505, + 540 + ], + "type": "inline_equation", + "content": "-1.0" + }, + { + "bbox": [ + 132, + 495, + 505, + 540 + ], + "type": "text", + "content": " is applied if it includes an explicit answer format (e.g., boxed)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 550, + 505, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 550, + 505, + 596 + ], + "spans": [ + { + "bbox": [ + 104, + 550, + 505, + 596 + ], + "type": "text", + "content": "2. Consistency-Based Reward Instead of using correctness as the high-level reward signal, this approach rewards the high-level agent for promoting consistent responses from the low-level agent, regardless of actual correctness. The consistency reward is defined as the proportion of the most frequently occurring answer among all sampled responses:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 230, + 608, + 504, + 633 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 608, + 504, + 633 + ], + "spans": [ + { + "bbox": [ + 230, + 608, + 504, + 633 + ], + "type": "interline_equation", + "content": "R _ {h} = \\frac {\\text {m a x o c c u r r e n c e o f a n a n s w e r}}{\\text {t o t a l n u m b e r o f r e s p o n s e s}} \\tag {14}", + "image_path": "39510965f995cb6f30887a0e480073cade592bfa898215baa6d1f2f71e71f3c1.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 639, + 504, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 639, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 104, + 639, + 504, + 685 + ], + "type": "text", + "content": "If the majority of responses do not contain a definitive answer, the reward is set to zero. We also add the format penalty to the high-level agent if its output contains the designated answer-indicating format. This incentivizes the high-level agent to guide the low-level agent toward more stable, detailed, reproducible outputs rather than erratic reasoning paths." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "These different reward formulations allow us to investigate various dimensions of metacognitive reasoning: correctness, consistency, etc. We empirically compare their effects on learned metacognitive reasoning patterns in Sec. E.1." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 123, + 76, + 467, + 243 + ], + "blocks": [ + { + "bbox": [ + 123, + 76, + 467, + 243 + ], + "lines": [ + { + "bbox": [ + 123, + 76, + 467, + 243 + ], + "spans": [ + { + "bbox": [ + 123, + 76, + 467, + 243 + ], + "type": "image", + "image_path": "315cafbb1d2055dded17eaabf88749be03cfea772911a86fa7b194d2357e1c64.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 252, + 504, + 308 + ], + "lines": [ + { + "bbox": [ + 104, + 252, + 504, + 308 + ], + "spans": [ + { + "bbox": [ + 104, + 252, + 504, + 308 + ], + "type": "text", + "content": "Figure 7: Our method can be viewed as a combination of practical TRPO and block coordinate ascent, with the high and low-level models treated as distinct components within a larger neural network. Note that the figure does not represent the exact gradient back-propagation flow but rather highlights the key idea that we separate the high- and low-level models. This separation allows for the independent computation of gradients and the independent training of each model." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 335, + 224, + 345 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 335, + 224, + 345 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 224, + 345 + ], + "type": "text", + "content": "C.3 Pseudocode of ReMA" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 357, + 274, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 274, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 274, + 369 + ], + "type": "text", + "content": "The pseudocode is shown in Algorithm 1." + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 106, + 402, + 504, + 589 + ], + "blocks": [ + { + "bbox": [ + 106, + 387, + 247, + 399 + ], + "lines": [ + { + "bbox": [ + 106, + 387, + 247, + 399 + ], + "spans": [ + { + "bbox": [ + 106, + 387, + 247, + 399 + ], + "type": "text", + "content": "Algorithm 1 Single turn MAMRP" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "lines": [ + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "spans": [ + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": "Require: High-level policy " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\pi_h" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": ", Low-level policy " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\pi_l" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": ", Dataset " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": ", Optimizers for " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\pi_h" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\pi_l" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\mathrm{min}}, \\varepsilon_{\\mathrm{max}}" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " to filter training dataset \n1: Initialize " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\pi_h" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\pi_l" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " \n2: while not converged do \n3: build training dataset " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_l" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\pi_h, \\pi_l, \\varepsilon_{\\mathrm{min}}, \\varepsilon_{\\mathrm{max}}" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " \n4: for Sample " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "(\\mathbf{x}, \\mathbf{m}, \\mathbf{y}^*) \\sim \\mathcal{D}_l" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " do \n5: Generate " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\mathbf{y} \\sim \\pi_l(\\mathbf{x}, \\mathbf{m})" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " \n6: Compute low-level reward " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "R_l(\\mathbf{y}, \\mathbf{y}^*)" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " \n7: Update " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\pi_l" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " using " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta_l} \\mathbb{E}[R_l]" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " \n8: end for \n9: build training dataset " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_h" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\pi_h, \\pi_l, \\varepsilon_{\\mathrm{min}}, \\varepsilon_{\\mathrm{max}}" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " \n10: for Sample " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "(\\mathbf{x}, \\mathbf{y}^*) \\sim \\mathcal{D}_h" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " do \n11: Generate " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\mathbf{m} \\sim \\pi_h(\\mathbf{x})" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\mathbf{y} \\sim \\pi_l(\\mathbf{x}, \\mathbf{m})" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " \n12: Compute high-level reward " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "R_h(\\mathbf{m}, \\mathbf{y}, \\mathbf{y}^*)" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " \n13: Update " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\pi_h" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " using " + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta_h} \\mathbb{E}[R_h]" + }, + { + "bbox": [ + 106, + 402, + 504, + 589 + ], + "type": "text", + "content": " \n14: end for \n15: end while" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "algorithm" + }, + { + "bbox": [ + 105, + 628, + 246, + 639 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 628, + 246, + 639 + ], + "spans": [ + { + "bbox": [ + 105, + 628, + 246, + 639 + ], + "type": "text", + "content": "C.4 Brief convergence analysis" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "text", + "content": "We reuse the notations from Sec. 3.2, where " + }, + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "text", + "content": " is task prompt, " + }, + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "text", + "content": " is generated answer, " + }, + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "inline_equation", + "content": "\\mathbf{y}^*" + }, + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "text", + "content": " is groundtruth, " + }, + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "inline_equation", + "content": "\\mathbf{m}" + }, + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "text", + "content": " is metacognition on task solving, " + }, + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta_h}" + }, + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta_l}" + }, + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "text", + "content": " are high- and low-level agents with parameters " + }, + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "inline_equation", + "content": "\\theta_h" + }, + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "inline_equation", + "content": "\\theta_l" + }, + { + "bbox": [ + 104, + 651, + 504, + 696 + ], + "type": "text", + "content": ". We consider the joint hierarchical policy defined in Eq. (8) and update the objective as in Eq. (9)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "To leverage existing RL and optimization convergence analysis methods, we treat the two models as components of a larger model, as illustrated in Fig. 7. When updating one model, we treat the other" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 453, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 453, + 84 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 453, + 84 + ], + "type": "text", + "content": "model as part of a stationary environment. The gradients with respect to " + }, + { + "bbox": [ + 105, + 72, + 453, + 84 + ], + "type": "inline_equation", + "content": "\\theta_h" + }, + { + "bbox": [ + 105, + 72, + 453, + 84 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 105, + 72, + 453, + 84 + ], + "type": "inline_equation", + "content": "\\theta_l" + }, + { + "bbox": [ + 105, + 72, + 453, + 84 + ], + "type": "text", + "content": " are:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 138, + 89, + 470, + 116 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 89, + 470, + 116 + ], + "spans": [ + { + "bbox": [ + 138, + 89, + 470, + 116 + ], + "type": "interline_equation", + "content": "\\nabla_ {\\theta_ {h}} J (\\theta_ {h}, \\theta_ {l}) = \\mathbb {E} _ {\\mathbf {x}, \\mathbf {y} ^ {*}} \\sum_ {\\mathbf {m} \\sim \\pi_ {h} (\\mathbf {m} | \\mathbf {x}; \\theta_ {h})} \\nabla_ {\\theta_ {h}} \\pi_ {h} (\\mathbf {m} | \\mathbf {x}; \\theta_ {h}) \\left[ \\mathbb {E} _ {\\mathbf {y} \\sim \\pi_ {l} (\\mathbf {y} | \\mathbf {x}, \\mathbf {m})} R (\\mathbf {y}, \\mathbf {y} ^ {*}) \\right],", + "image_path": "a86aaf6aa38251d41b64cce9905df386381958a7ede62e6faa107cd8fdf8b032.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 140, + 118, + 411, + 145 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 118, + 411, + 145 + ], + "spans": [ + { + "bbox": [ + 140, + 118, + 411, + 145 + ], + "type": "interline_equation", + "content": "\\nabla_ {\\theta_ {l}} J (\\theta_ {h}, \\theta_ {l}) = \\mathbb {E} _ {\\mathbf {x}, \\mathbf {y} ^ {*}} \\sum_ {\\mathbf {y} \\sim \\pi (\\theta_ {h}, \\theta_ {l})} \\nabla_ {\\theta_ {l}} \\pi_ {l} (\\mathbf {y} \\mid \\mathbf {x}, \\mathbf {m}; \\theta_ {h}); \\theta_ {l}) R (\\mathbf {y}, \\mathbf {y} ^ {*}).", + "image_path": "9b990e4ba70996ec4e3add4f2058c1bf66065120ac3089530de9e367f7c5b882.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 149, + 504, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 149, + 504, + 171 + ], + "spans": [ + { + "bbox": [ + 104, + 149, + 504, + 171 + ], + "type": "text", + "content": "We can compute the gradients with log trick and estimate " + }, + { + "bbox": [ + 104, + 149, + 504, + 171 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{\\mathbf{y}\\sim \\pi_l(\\mathbf{y}|\\mathbf{x},\\mathbf{m})}R(\\mathbf{y},\\mathbf{y}^*)" + }, + { + "bbox": [ + 104, + 149, + 504, + 171 + ], + "type": "text", + "content": " with Monte Carlo method." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "spans": [ + { + "bbox": [ + 104, + 177, + 504, + 200 + ], + "type": "text", + "content": "Equipped with the objective function and gradient computation, we update the models iteratively. Without loss of generality, we analyze the case where the high-level policy is updated first:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 238, + 203, + 363, + 224 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 203, + 363, + 224 + ], + "spans": [ + { + "bbox": [ + 238, + 203, + 363, + 224 + ], + "type": "interline_equation", + "content": "\\theta_ {h} ^ {(t + 1)} = \\arg \\max _ {\\theta_ {h}} J (\\theta_ {h}, \\theta_ {l} ^ {(t)}),", + "image_path": "30a64c5f505bf97092d0e646564746319a53da606cabba7e7be05eb736d380ee.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 240, + 226, + 370, + 246 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 240, + 226, + 370, + 246 + ], + "spans": [ + { + "bbox": [ + 240, + 226, + 370, + 246 + ], + "type": "interline_equation", + "content": "\\theta_ {l} ^ {(t + 1)} = \\arg \\max _ {\\theta_ {l}} J \\left(\\theta_ {h} ^ {(t + 1)}, \\theta_ {l}\\right).", + "image_path": "0848d561d3813403322d9efeb80547bb46375f51bc273a205eae3fdb7377b0aa.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 251, + 504, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 251, + 504, + 295 + ], + "spans": [ + { + "bbox": [ + 104, + 251, + 504, + 295 + ], + "type": "text", + "content": "Regarding the different regularizations " + }, + { + "bbox": [ + 104, + 251, + 504, + 295 + ], + "type": "inline_equation", + "content": "R_{h}" + }, + { + "bbox": [ + 104, + 251, + 504, + 295 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 251, + 504, + 295 + ], + "type": "inline_equation", + "content": "R_{l}" + }, + { + "bbox": [ + 104, + 251, + 504, + 295 + ], + "type": "text", + "content": " in Eqs. (10) and (11) for the different policies, instead of directly integrating them into the loss function, we treat them as constraints, as done in Trust Region Policy Optimization (TRPO) [Schulman et al., 2015]. Note that when one policy is fixed, the other policy operates in a stationary decision process." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 300, + 504, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 300, + 504, + 345 + ], + "spans": [ + { + "bbox": [ + 104, + 300, + 504, + 345 + ], + "type": "text", + "content": "Based on the defined objective and update method, we apply TRPO and block coordinate ascent. First, recall that when updating a single policy, TRPO guarantees monotonic improvement by optimizing a lower bound. Specifically, let " + }, + { + "bbox": [ + 104, + 300, + 504, + 345 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathrm{old}}" + }, + { + "bbox": [ + 104, + 300, + 504, + 345 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 300, + 504, + 345 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 300, + 504, + 345 + ], + "type": "text", + "content": " represent the old and current policies, respectively. We define a surrogate objective as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 205, + 349, + 405, + 376 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 349, + 405, + 376 + ], + "spans": [ + { + "bbox": [ + 205, + 349, + 405, + 376 + ], + "type": "interline_equation", + "content": "L _ {\\pi_ {\\mathrm {o l d}}} (\\pi) = \\mathbb {E} _ {s \\sim \\pi_ {\\mathrm {o l d}}, a \\sim \\pi_ {\\mathrm {o l d}}} \\left[ \\frac {\\pi (a | s)}{\\pi_ {\\mathrm {o l d}} (a | s)} A ^ {\\pi_ {\\mathrm {o l d}}} (s, a) \\right],", + "image_path": "9e34be703eb23abfa3d50c401d5176ae1c7172f8c5c2449d99876c5e8025bbf6.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 378, + 433, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 378, + 433, + 390 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 433, + 390 + ], + "type": "text", + "content": "As shown by Schulman et al. [2015], the true objective of " + }, + { + "bbox": [ + 105, + 378, + 433, + 390 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 105, + 378, + 433, + 390 + ], + "type": "text", + "content": " is lower-bounded by:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 198, + 393, + 411, + 411 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 393, + 411, + 411 + ], + "spans": [ + { + "bbox": [ + 198, + 393, + 411, + 411 + ], + "type": "interline_equation", + "content": "J (\\pi) \\geq L _ {\\pi_ {\\mathrm {o l d}}} (\\pi) - C \\cdot \\max _ {s} \\mathrm {K L} [ \\pi_ {\\mathrm {o l d}} (\\cdot | s), \\pi (\\cdot | s) ],", + "image_path": "a5f4f993c4054904cf79c154022c7bf8db9d847e25e2e01b15d142fb8abc2254.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 415, + 504, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 504, + 448 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 504, + 448 + ], + "type": "text", + "content": "for some constant " + }, + { + "bbox": [ + 104, + 415, + 504, + 448 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 415, + 504, + 448 + ], + "type": "text", + "content": ". By optimizing the right-hand side of the above inequality, we are guaranteed to improve the performance of " + }, + { + "bbox": [ + 104, + 415, + 504, + 448 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 415, + 504, + 448 + ], + "type": "text", + "content": ". Therefore, for policies " + }, + { + "bbox": [ + 104, + 415, + 504, + 448 + ], + "type": "inline_equation", + "content": "\\pi^t" + }, + { + "bbox": [ + 104, + 415, + 504, + 448 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 415, + 504, + 448 + ], + "type": "inline_equation", + "content": "\\pi^{t + 1}" + }, + { + "bbox": [ + 104, + 415, + 504, + 448 + ], + "type": "text", + "content": " obtained from iterations " + }, + { + "bbox": [ + 104, + 415, + 504, + 448 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 415, + 504, + 448 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 415, + 504, + 448 + ], + "type": "inline_equation", + "content": "t + 1" + }, + { + "bbox": [ + 104, + 415, + 504, + 448 + ], + "type": "text", + "content": " using the TRPO method, we have:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 266, + 451, + 342, + 466 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 266, + 451, + 342, + 466 + ], + "spans": [ + { + "bbox": [ + 266, + 451, + 342, + 466 + ], + "type": "interline_equation", + "content": "J (\\pi^ {t + 1}) \\geq J (\\pi^ {t}).", + "image_path": "03990842fdfe887957593c9afc250ca8d106f43dd9a3ebffb41479bc04e5ab4b.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 474, + 504, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 474, + 504, + 528 + ], + "spans": [ + { + "bbox": [ + 104, + 474, + 504, + 528 + ], + "type": "text", + "content": "Now, returning to our updating method, we treat the high- and low-level policies as two blocks of a single agent. The iterative update process can thus be viewed as a cyclic block coordinate ascent, where the two policies are updated in a fixed order. By updating each block using the TRPO method, and improving the surrogate objective within the KL constraint, each block update does not decrease " + }, + { + "bbox": [ + 104, + 474, + 504, + 528 + ], + "type": "inline_equation", + "content": "J" + }, + { + "bbox": [ + 104, + 474, + 504, + 528 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 244, + 533, + 347, + 546 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 533, + 347, + 546 + ], + "spans": [ + { + "bbox": [ + 244, + 533, + 347, + 546 + ], + "type": "interline_equation", + "content": "J \\left(\\theta_ {h} ^ {t + 1}, \\theta_ {l} ^ {t}\\right) \\geq J \\left(\\theta_ {h} ^ {t}, \\theta_ {l} ^ {t}\\right),", + "image_path": "96e699afab51d562b257dd125436f136294ebd481918e9f0ef1d39366685d3c4.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 246, + 548, + 365, + 563 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 548, + 365, + 563 + ], + "spans": [ + { + "bbox": [ + 246, + 548, + 365, + 563 + ], + "type": "interline_equation", + "content": "J \\left(\\theta_ {h} ^ {t + 1}, \\theta_ {l} ^ {t + 1}\\right) \\geq J \\left(\\theta_ {h} ^ {t + 1}, \\theta_ {l} ^ {t}\\right).", + "image_path": "710b61fd23f78b9a946f7e235ed8fddd79aad1dd33ac36e38c69d6c2a28224e2.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 572, + 504, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 572, + 504, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 572, + 504, + 597 + ], + "type": "text", + "content": "Thus " + }, + { + "bbox": [ + 104, + 572, + 504, + 597 + ], + "type": "inline_equation", + "content": "J(\\theta_h^{t + 1},\\theta_l^{t + 1})\\geq J(\\theta_h^t,\\theta_l^t)" + }, + { + "bbox": [ + 104, + 572, + 504, + 597 + ], + "type": "text", + "content": ". This repeated coordinate maximization converges to a fixed point, where no single coordinate update can further improve " + }, + { + "bbox": [ + 104, + 572, + 504, + 597 + ], + "type": "inline_equation", + "content": "J(\\theta_h,\\theta_l)" + }, + { + "bbox": [ + 104, + 572, + 504, + 597 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "spans": [ + { + "bbox": [ + 104, + 601, + 504, + 635 + ], + "type": "text", + "content": "Given the theoretical monotonic improvement with TRPO and block coordinate ascent, we adopt a practical version of TRPO in our experiments, specifically Proximal Policy Optimization (PPO) [Schulman et al., 2017] or GRPO [Shao et al., 2024]." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 647, + 415, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 647, + 415, + 658 + ], + "spans": [ + { + "bbox": [ + 104, + 647, + 415, + 658 + ], + "type": "text", + "content": "C.5 Learning to reason from the perspective of Leader Follower Game" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 667, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 505, + 723 + ], + "type": "text", + "content": "Besides the loss function in the main part, we also propose to frame the problem as a leader-follower game. By analyzing the equilibria of the leader-follower game, we demonstrate that our framework inherently identifies the optimal sub-tasks aligned with the capabilities of the low-level model. This ensures that the high-level decisions are guided by the low-level model's strengths, leading to more efficient and targeted task decomposition." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 234, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 234, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 234, + 85 + ], + "type": "text", + "content": "C.5.1 Leader-follower game" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 91, + 504, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 91, + 504, + 169 + ], + "spans": [ + { + "bbox": [ + 104, + 91, + 504, + 169 + ], + "type": "text", + "content": "The leader-follower game, also known as the Stackelberg game, models interaction between two agents with parametrized strategies " + }, + { + "bbox": [ + 104, + 91, + 504, + 169 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta} = (\\pmb{\\theta}_1, \\pmb{\\theta}_2)" + }, + { + "bbox": [ + 104, + 91, + 504, + 169 + ], + "type": "text", + "content": " and differentiable objective functions " + }, + { + "bbox": [ + 104, + 91, + 504, + 169 + ], + "type": "inline_equation", + "content": "(\\mathcal{L}_1, \\mathcal{L}_2): \\mathbb{R}^d \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 91, + 504, + 169 + ], + "type": "text", + "content": ". In this framework, the leader announces its strategy first, and the follower observes this decision to respond optimally. This sequential structure enables the leader to anticipate the follower's reaction and adjust its strategy accordingly. A Stackelberg equilibrium occurs when neither agent can unilaterally improve its objective. Denoting " + }, + { + "bbox": [ + 104, + 91, + 504, + 169 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_1" + }, + { + "bbox": [ + 104, + 91, + 504, + 169 + ], + "type": "text", + "content": " as the leader's strategy and " + }, + { + "bbox": [ + 104, + 91, + 504, + 169 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_2" + }, + { + "bbox": [ + 104, + 91, + 504, + 169 + ], + "type": "text", + "content": " as the follower's, the loss functions " + }, + { + "bbox": [ + 104, + 91, + 504, + 169 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_1" + }, + { + "bbox": [ + 104, + 91, + 504, + 169 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 91, + 504, + 169 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_2" + }, + { + "bbox": [ + 104, + 91, + 504, + 169 + ], + "type": "text", + "content": " are optimized with the following bi-level structure:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 172, + 173, + 436, + 188 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 172, + 173, + 436, + 188 + ], + "spans": [ + { + "bbox": [ + 172, + 173, + 436, + 188 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\theta} _ {1} ^ {*} = \\operatorname {a r g m i n} _ {\\boldsymbol {\\theta} _ {1}} \\mathcal {L} _ {1} (\\boldsymbol {\\theta}, \\boldsymbol {\\theta} _ {2} ^ {*} (\\boldsymbol {\\theta} _ {1})), \\quad \\boldsymbol {w} _ {2} ^ {*} (\\boldsymbol {\\theta} _ {1}) = \\operatorname {a r g m i n} _ {\\boldsymbol {\\theta} _ {2}} \\mathcal {L} _ {2} (\\boldsymbol {\\theta} _ {1}, \\boldsymbol {\\theta} _ {2}).", + "image_path": "19d913b0dca111c638beefd731775a3856eed9a49ce53f07143ec8a8447cb2c4.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 195, + 504, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 195, + 504, + 273 + ], + "spans": [ + { + "bbox": [ + 104, + 195, + 504, + 273 + ], + "type": "text", + "content": "Anil et al. [2021] apply the leader-follower game to ensure checkable answers in a prover-verifier game (PVG). The objective is a verifier that is both complete (accepts all correct proofs from a verifier) and sound (rejects all incorrect proofs from a verifier). They analyze different scenarios where the verifier acts as the leader, the prover as the follower, and both announce strategies simultaneously, forming a Nash equilibrium. The study concludes that in verifier-led SVG, a Stackelberg equilibrium is both necessary and sufficient for achieving a sound and complete verifier, whereas in other configurations, a Stackelberg equilibrium is not necessary or sufficient for this outcome." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 284, + 211, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 284, + 211, + 295 + ], + "spans": [ + { + "bbox": [ + 105, + 284, + 211, + 295 + ], + "type": "text", + "content": "C.5.2 Efficacy of LLM" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 302, + 504, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 302, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 302, + 504, + 411 + ], + "type": "text", + "content": "Because the high-level policy possesses strong generalization capabilities, it is impractical for it to exhaustively explore every potential sub-task for each question. Instead, it naturally focuses on tasks within a feasible range of difficulty, leveraging only a limited set of coarse planning actions. Rather than pinpointing perfectly tailored sub-tasks, the policy searches for general tasks of particular computational complexity, i.e., difficulty, that it can handle reliably. Motivated by this perspective, we incorporate the concept of a reasoning boundary for large language models (LLMs) [Chen et al., 2024b]. Intuitively, the reasoning boundary circumscribes the maximum difficulty of problems a model can solve at a desired accuracy level. Formally, for a model " + }, + { + "bbox": [ + 104, + 302, + 504, + 411 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 302, + 504, + 411 + ], + "type": "text", + "content": ", a task " + }, + { + "bbox": [ + 104, + 302, + 504, + 411 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 302, + 504, + 411 + ], + "type": "text", + "content": ", and a predefined threshold " + }, + { + "bbox": [ + 104, + 302, + 504, + 411 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 104, + 302, + 504, + 411 + ], + "type": "text", + "content": ", the reasoning boundary of " + }, + { + "bbox": [ + 104, + 302, + 504, + 411 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 302, + 504, + 411 + ], + "type": "text", + "content": " represents the maximum problem difficulty " + }, + { + "bbox": [ + 104, + 302, + 504, + 411 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 302, + 504, + 411 + ], + "type": "text", + "content": " that satisfies:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 220, + 415, + 389, + 434 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 415, + 389, + 434 + ], + "spans": [ + { + "bbox": [ + 220, + 415, + 389, + 434 + ], + "type": "interline_equation", + "content": "\\mathcal {B} _ {A c c = A} (t | \\theta) = \\sup _ {d} \\{d | A c c (t | d, \\theta) = A \\}.", + "image_path": "032a64c3b322e4687ee8458b7f6835cd1e8b820c05079fa11e7c0ad3ebd2c1ec.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 439, + 504, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 439, + 504, + 483 + ], + "spans": [ + { + "bbox": [ + 104, + 439, + 504, + 483 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 439, + 504, + 483 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 439, + 504, + 483 + ], + "type": "text", + "content": " denotes the problem difficulty. By quantifying the difficulty level a model can reliably handle, the reasoning boundary provides a systematic way to align the high-level policy's focus with the model's actual capabilities, gauge the efficacy of the low-level policy, and determine the optimal strategy for solving the question." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 495, + 324, + 507 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 324, + 507 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 324, + 507 + ], + "type": "text", + "content": "C.5.3 Leader-follower Game for LLM Reasoning" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 514, + 504, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 504, + 536 + ], + "type": "text", + "content": "Our goal is to find the high-level policy that searches for the sub-task sequence based on the efficacy of the low-level policy to solve the question. We design the loss functions as follows:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 192, + 541, + 410, + 554 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 541, + 410, + 554 + ], + "spans": [ + { + "bbox": [ + 192, + 541, + 410, + 554 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {h} = \\mathbb {E} _ {(x, y) \\sim p _ {D}, t _ {1: K}} \\left[ - \\log \\pi_ {l} \\left(y _ {K} \\mid x, t _ {1: K}, y _ {1: K - 1}\\right) \\right],", + "image_path": "c7f26b9a321108578bb007fb3a44a3b27e7e213fbbdc726184ec19e80c57653f.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 192, + 556, + 418, + 568 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 556, + 418, + 568 + ], + "spans": [ + { + "bbox": [ + 192, + 556, + 418, + 568 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {l} = \\mathbb {E} _ {x \\sim p _ {D}, t _ {1: k} \\sim \\pi_ {h}, \\hat {y} _ {k} \\sim \\pi_ {l}} \\left[ - r \\left(y _ {k}, \\hat {y} _ {k} \\mid x, t _ {1: k}, y _ {1: k - 1}\\right) \\right],", + "image_path": "b1511f7d2538127a540c6110fe627d96c51df91db3b849ba95d19fedfe10fad6.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 572, + 504, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 572, + 504, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 572, + 504, + 628 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 572, + 504, + 628 + ], + "type": "inline_equation", + "content": "r(y_k, \\hat{y}_k \\mid x, t_{1:k}, y_{1:k-1})" + }, + { + "bbox": [ + 104, + 572, + 504, + 628 + ], + "type": "text", + "content": " represents the step reward for the correctness of " + }, + { + "bbox": [ + 104, + 572, + 504, + 628 + ], + "type": "inline_equation", + "content": "\\hat{y}_k" + }, + { + "bbox": [ + 104, + 572, + 504, + 628 + ], + "type": "text", + "content": " derived from the question " + }, + { + "bbox": [ + 104, + 572, + 504, + 628 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 572, + 504, + 628 + ], + "type": "text", + "content": ", the sub-task sequence " + }, + { + "bbox": [ + 104, + 572, + 504, + 628 + ], + "type": "inline_equation", + "content": "t_{1:k}" + }, + { + "bbox": [ + 104, + 572, + 504, + 628 + ], + "type": "text", + "content": " from the high policy and prior intermediate answer " + }, + { + "bbox": [ + 104, + 572, + 504, + 628 + ], + "type": "inline_equation", + "content": "y_{1:k-1}" + }, + { + "bbox": [ + 104, + 572, + 504, + 628 + ], + "type": "text", + "content": ". The loss functions can be interpreted as follows: the high-level policy is incentivized to find subtasks that lead to the correct answer based on the capabilities of the low-level policy, while the low-level policy is incentivized to enhance its instruction-following ability." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 632, + 504, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 504, + 689 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 504, + 689 + ], + "type": "text", + "content": "How to minimize the loss functions and whether such minimization leads to the desired results remain questions. To explore this, we consider a simplified case of our method, where the high-level policy plans the complete sub-task sequence at the beginning and the low-level executes the instruction in a single interaction. The corresponding parameterized policies are defined as " + }, + { + "bbox": [ + 104, + 632, + 504, + 689 + ], + "type": "inline_equation", + "content": "\\pi_h((t_1,\\ldots ,t_K)\\mid x)" + }, + { + "bbox": [ + 104, + 632, + 504, + 689 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 632, + 504, + 689 + ], + "type": "inline_equation", + "content": "\\pi_l((\\hat{y}_1,\\dots ,\\hat{y}_K)\\mid x,(t_1,\\dots ,t_K))" + }, + { + "bbox": [ + 104, + 632, + 504, + 689 + ], + "type": "text", + "content": " . The corresponding loss functions are:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 192, + 693, + 503, + 704 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 693, + 503, + 704 + ], + "spans": [ + { + "bbox": [ + 192, + 693, + 503, + 704 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {h} = \\mathbb {E} _ {(x, y) \\sim p _ {D}, t _ {1: K}} \\left[ - \\log \\pi_ {l} \\left(y _ {K} \\mid x, t _ {1: K}\\right) \\right], \\tag {15}", + "image_path": "913cf3facfbd77150667cd4b4404381ce5bcddd9c76f1a6d393a0c01bfae6aa8.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 192, + 708, + 503, + 719 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 708, + 503, + 719 + ], + "spans": [ + { + "bbox": [ + 192, + 708, + 503, + 719 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {l} = \\mathbb {E} _ {x \\sim p _ {D}, t _ {1: k} \\sim \\pi_ {h}, \\hat {y} _ {k} \\sim \\pi_ {l}} \\left[ - r \\left(y _ {k}, \\hat {y} _ {k} \\mid x, t _ {1: k}, y _ {1: k - 1}\\right) \\right]. \\tag {16}", + "image_path": "b18d5455edfb4a76628089dd0bafcca0f64a4b3c8b3c93ede341d38248ca05a0.jpg" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": "In this step, the high-level policy generates the entire sub-task sequence without relying on intermediate answers, while the low-level policy follows the sequence to produce answers for the sub-tasks. The low-level policy can still leverage prior intermediate answers to sequentially refine its responses." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 110, + 504, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 504, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 504, + 189 + ], + "type": "text", + "content": "To analyze the result agents by minimizing the loss functions, we adopt the completeness and soundness properties from the PVG framework for LLM reasoning. Specifically, if the high-level policy generates a sub-task sequence that is executable within the low-level policy's capabilities, the problem must be solved (completeness). Conversely, if the sub-task sequence is incorrect or beyond the low-level policy's capacity, the problem cannot be solved (soundness). To achieve this, we utilize the conclusion from Anil et al. [2021], which positions the low-level policy as the leader and the high-level policy as the follower, equilibria guarantee the complete and sound low-level policy." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 192, + 506, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 192, + 506, + 302 + ], + "spans": [ + { + "bbox": [ + 104, + 192, + 506, + 302 + ], + "type": "text", + "content": "When the high-level policy takes the lead, the low-level policy is forced to adapt to the specific strategy defined by the high-level policy, which can result in neither complete nor sound low-level policy. For example, if the high-level policy dictates that it will only generate sub-tasks involving addition and subtraction, the low-level policy is constrained to optimize only for these tasks. While they may reach an equilibrium, the low-level policy remains incomplete, and this limitation impacts both policies. In the case of the simultaneous PVG game, convergence to a Nash equilibrium is possible, but it is not sufficient for completeness and soundness. For instance, the low-level policy might disregard the high-level policy entirely (e.g., if the high-level provides incorrect instructions, but the low-level still performs correctly). This approach, however, is challenging to implement due to the significantly larger search space involved." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 307, + 504, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 307, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 307, + 504, + 373 + ], + "type": "text", + "content": "Furthermore, the loss functions we design ensure that, at a Stackelberg equilibrium, the high-level policy identifies sub-task sequences that the low-level policy can execute to solve the problem with the highest probability. With the low-level policy acting as the leader, it establishes its reasoning boundary for tasks. Based on the reasoning boundary, let " + }, + { + "bbox": [ + 104, + 307, + 504, + 373 + ], + "type": "inline_equation", + "content": "\\theta_h" + }, + { + "bbox": [ + 104, + 307, + 504, + 373 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 307, + 504, + 373 + ], + "type": "inline_equation", + "content": "\\theta_l" + }, + { + "bbox": [ + 104, + 307, + 504, + 373 + ], + "type": "text", + "content": " represent the policy parameters for the high-level and low-level policies, respectively. The probability that the low-level policy correctly solves the question is defined as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 225, + 377, + 384, + 410 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 377, + 384, + 410 + ], + "spans": [ + { + "bbox": [ + 225, + 377, + 384, + 410 + ], + "type": "interline_equation", + "content": "\\pi_ {l} \\left(y _ {K} \\mid x, t _ {1: K}\\right) = \\prod_ {k = 1} ^ {K} \\operatorname {A c c} \\left(t _ {k} \\mid x, \\theta_ {l}\\right),", + "image_path": "abdc48d2d97eab951968fc5353ec9380321ab89f488c157876a9f268dee813a0.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 415, + 504, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 504, + 459 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 504, + 459 + ], + "type": "text", + "content": "where we can compute the difficulty " + }, + { + "bbox": [ + 104, + 415, + 504, + 459 + ], + "type": "inline_equation", + "content": "d_{k}" + }, + { + "bbox": [ + 104, + 415, + 504, + 459 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 104, + 415, + 504, + 459 + ], + "type": "inline_equation", + "content": "t_k" + }, + { + "bbox": [ + 104, + 415, + 504, + 459 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 415, + 504, + 459 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 415, + 504, + 459 + ], + "type": "text", + "content": ". where the difficulty " + }, + { + "bbox": [ + 104, + 415, + 504, + 459 + ], + "type": "inline_equation", + "content": "d_{k}" + }, + { + "bbox": [ + 104, + 415, + 504, + 459 + ], + "type": "text", + "content": " can be derived from " + }, + { + "bbox": [ + 104, + 415, + 504, + 459 + ], + "type": "inline_equation", + "content": "t_k" + }, + { + "bbox": [ + 104, + 415, + 504, + 459 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 415, + 504, + 459 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 415, + 504, + 459 + ], + "type": "text", + "content": ". The loss function in Eq. (15) ensures that the selected sub-tasks are optimal for the low-level policy. Here we provide a theoretical condition under which the most efficient solution strategy can be identified, according to the efficacy of the LLM." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 464, + 504, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 504, + 530 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 504, + 530 + ], + "type": "text", + "content": "This approach can be viewed as a game between a high-level \"prover\" and a low-level \"verifier\". The verifier, representing the low-level policy, adheres the high-level policy's instructions to validate its reasoning. Unlike the classic PVG setting, where the prover has ground-truth labels, the label of our high-level policy depends on the tunable low-level policy. This distinction, where the low-level policy (leader) is inherently more complex, contrasts with traditional PVG setups and adds complexity due to the interdependence between the high- and low-level policies." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 534, + 504, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 534, + 504, + 568 + ], + "spans": [ + { + "bbox": [ + 104, + 534, + 504, + 568 + ], + "type": "text", + "content": "By framing the problem-solving process as a leader-follower game, with the low-level policy designated as the leader, we can construct a bi-level optimization problem to identify an equilibrium. Following the formulation in Sec. C.5.1, the problem is expressed as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 182, + 571, + 426, + 592 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 571, + 426, + 592 + ], + "spans": [ + { + "bbox": [ + 182, + 571, + 426, + 592 + ], + "type": "interline_equation", + "content": "\\theta_ {l} ^ {*} = \\underset {\\theta_ {l}} {\\arg \\min } \\mathcal {L} _ {l} (\\theta_ {h} ^ {*} (\\theta_ {l}), \\theta_ {l}) \\quad \\theta_ {h} ^ {*} (\\theta_ {l}) = \\underset {\\theta_ {l}} {\\arg \\min } \\mathcal {L} _ {h} (\\theta_ {h}, \\theta_ {l}).", + "image_path": "949357e9e329a27c903f62439c403346a563277fe176cc0143fd18f127825c52.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 596, + 317, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 596, + 317, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 317, + 608 + ], + "type": "text", + "content": "Then we can apply bi-level optimization techniques." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 623, + 212, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 623, + 212, + 636 + ], + "spans": [ + { + "bbox": [ + 105, + 623, + 212, + 636 + ], + "type": "text", + "content": "D Training Details" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 647, + 212, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 647, + 212, + 658 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 212, + 658 + ], + "type": "text", + "content": "D.1 Single-turn ReMA" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 666, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 504, + 723 + ], + "type": "text", + "content": "We refer to Appendix G for prompts we use during training. We implement the training pipeline with OpenRLHF [Hu et al., 2024a] which is a highly efficient codebase and is easy to scale up. We select REINFORCE++ to save resources and for efficient training. All experiments are conducted in a node of 8 NVIDIA A100 GPUs. We use bf16, Zero2, Flash-Attention and gradient checkpointing to run our experiments." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 149 + ], + "type": "text", + "content": "During rollout, we set temperature " + }, + { + "bbox": [ + 104, + 72, + 504, + 149 + ], + "type": "inline_equation", + "content": "= 1.0" + }, + { + "bbox": [ + 104, + 72, + 504, + 149 + ], + "type": "text", + "content": ", top_p " + }, + { + "bbox": [ + 104, + 72, + 504, + 149 + ], + "type": "inline_equation", + "content": "= 1.0" + }, + { + "bbox": [ + 104, + 72, + 504, + 149 + ], + "type": "text", + "content": ", top_k " + }, + { + "bbox": [ + 104, + 72, + 504, + 149 + ], + "type": "inline_equation", + "content": "= -1" + }, + { + "bbox": [ + 104, + 72, + 504, + 149 + ], + "type": "text", + "content": ", and use vLLM for inference acceleration. We set the max generation length to be 2048 and, the rollout batch size to be 1000. The number of samples per prompt is 4. During training, we use Adam Optimizer with a learning rate of 5e-7. We set the mini-batch size to be 500, and the clip ratio to be 0.2. Other hyperparameters, such as KL coefficients and the number of training episodes, were carefully tuned based on validation set performance to ensure robust and reliable results. To align with the hyperparameter in OpenRLHF, we use #Training Episode as the number of reinforcement learning epoch on the entire dataset." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "spans": [ + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "text", + "content": "In ReMA, during prompt filtering of the high-level model, the high-level agent first samples 10 candidates for each question with " + }, + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "inline_equation", + "content": "t = 1.0" + }, + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "text", + "content": ", and for each output the low-level agents sample 1 solution with " + }, + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "inline_equation", + "content": "t = 0.0" + }, + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "text", + "content": ", then we select questions of success rate between " + }, + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "inline_equation", + "content": "[\\varepsilon_{\\mathrm{min}}, \\varepsilon_{\\mathrm{max}}]" + }, + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "text", + "content": ". And for the low-level agent's prompt filtering, the high-level agent first samples 1 candidate for each question with " + }, + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "inline_equation", + "content": "t = 0.0" + }, + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "text", + "content": " and for each output the low-level agents sample 10 solutions with " + }, + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "inline_equation", + "content": "t = 1.0" + }, + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "text", + "content": ", then we select questions of success rate between " + }, + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "inline_equation", + "content": "[\\varepsilon_{\\mathrm{min}}, \\varepsilon_{\\mathrm{max}}]" + }, + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "text", + "content": " and use the high-level agent to sample 4 meta-thoughts with " + }, + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "inline_equation", + "content": "t = 1.0" + }, + { + "bbox": [ + 104, + 154, + 506, + 233 + ], + "type": "text", + "content": " as the input." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 243, + 301, + 255 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 243, + 301, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 243, + 301, + 255 + ], + "type": "text", + "content": "D.1.1 Supervised fine-tuning data collection" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 261, + 506, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 261, + 506, + 382 + ], + "spans": [ + { + "bbox": [ + 104, + 261, + 506, + 382 + ], + "type": "text", + "content": "For experiments in Sec. 4.2.1, we collect expert data to enhance the reasoning pattern, i.e. " + }, + { + "bbox": [ + 104, + 261, + 506, + 382 + ], + "type": "inline_equation", + "content": "RL" + }, + { + "bbox": [ + 104, + 261, + 506, + 382 + ], + "type": "text", + "content": " from SFT. Specifically, we collect demonstration data from GPT-4o Mini on MATH training dataset (7.5k problems) Hendrycks et al. [2021] and use it to fine-tune the LLMs. The data generation follows these steps: First, we prompt GPT-4o Mini to produce metacognitive reasoning for high-level model training. Specifically, we use different prompts to instruct it to rewrite and decompose a given question without providing a final answer. We collect metacognitive reasoning using two predefined actions, \"rewrite\" and \"decompose\", which align with human approaches to complex problem-solving while preserving answer diversity. Next, we use the generated instructions to prompt GPT-4o Mini to follow the metacognitive steps and solve the question, obtaining SFT data for low-level policy training. Below, we present the prompts used for both high-level and low-level models. Prompts can be found in Appendix G.1.1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 393, + 302, + 404 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 393, + 302, + 404 + ], + "spans": [ + { + "bbox": [ + 105, + 393, + 302, + 404 + ], + "type": "text", + "content": "D.1.2 Dataset Curation of RewardBench970" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 110, + 445, + 501, + 556 + ], + "blocks": [ + { + "bbox": [ + 105, + 422, + 504, + 444 + ], + "lines": [ + { + "bbox": [ + 105, + 422, + 504, + 444 + ], + "spans": [ + { + "bbox": [ + 105, + 422, + 504, + 444 + ], + "type": "text", + "content": "Table 2: Performance on LLM-as-a-Judge benchmarks, trained on dataset under the loose setting. The two-agent workflow in ReMA" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 110, + 445, + 501, + 556 + ], + "lines": [ + { + "bbox": [ + 110, + 445, + 501, + 556 + ], + "spans": [ + { + "bbox": [ + 110, + 445, + 501, + 556 + ], + "type": "table", + "html": "
ModelBenchmarkVRP(CoT)\\( \\mathbf{V R P_{R L}} \\)\\( \\mathbf{M R P_{R L}} \\)ReMA(Ours)
Llama3.1-8B-InstructRewardBench97071.2481.86 (+10.62)80.41 (+9.17)86.29 (+15.05)
JudgeBench51.7751.45 (-0.32)50.65 (-1.12)53.71 (+1.94)
Average61.5166.65 (+5.14)65.53 (+4.02)70.00 (+8.49)
Qwen2.5-7B-InstructRewardBench97086.4987.22 (+0.73)80.31 (-6.18)90.72 (+4.23)
JudgeBench58.3954.84 (-3.55)55.81 (-2.58)58.71 (+0.32)
Average72.4471.03 (-1.41)68.06 (-4.38)74.72 (+2.28)
", + "image_path": "13ee9b755412f0ddda27dfb3a4338a0ead1c5a78cddf2ef644b3f886d89c5815.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 568, + 504, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 568, + 504, + 592 + ], + "spans": [ + { + "bbox": [ + 104, + 568, + 504, + 592 + ], + "type": "text", + "content": "We process the original dataset in RewardBench by splitting it into a training set containing 5,000 tuples of (instruction, response A, response B) and a test set with the remaining 970 tuples." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 596, + 406, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 596, + 406, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 406, + 608 + ], + "type": "text", + "content": "To ensure a meaningful dataset split, we validate two separation strategies:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 616, + 504, + 664 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 132, + 616, + 504, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 616, + 504, + 638 + ], + "spans": [ + { + "bbox": [ + 132, + 616, + 504, + 638 + ], + "type": "text", + "content": "- Loose setting: We only ensure that there is no direct overlap of tuples between the training and test sets." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 641, + 504, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 641, + 504, + 664 + ], + "spans": [ + { + "bbox": [ + 132, + 641, + 504, + 664 + ], + "type": "text", + "content": "- Strict setting: We further enforce that no instruction appears in both the training and test sets. The results for this setting are presented in the main results (Table 1b)." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 672, + 504, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 672, + 504, + 695 + ], + "spans": [ + { + "bbox": [ + 104, + 672, + 504, + 695 + ], + "type": "text", + "content": "Additionally, since the original RewardBench data originates from different subsets, we ensure that all original subsets are evenly represented in both the training and test sets." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "Table 2 reports the learning performance of various methods under the loose dataset split setting. Compared to the results in Table 1b, ReMA significantly outperforms other RL tuning baselines" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": "across all models, particularly on out-of-distribution (OOD) benchmarks. The consistent improvements on OOD datasets of these two settings suggest that ReMA enhances meta-thinking ability, resulting in better generalization across diverse task distributions." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 118, + 224, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 118, + 224, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 118, + 224, + 129 + ], + "type": "text", + "content": "D.1.3 Training on MATH" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 137, + 504, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 137, + 504, + 170 + ], + "spans": [ + { + "bbox": [ + 104, + 137, + 504, + 170 + ], + "type": "text", + "content": "VRP For Llama3-8B-Instruct, Llama3.1-8B-Instruct, and Qwen2.5-7B-Instruct, we all use a KL coefficient of 1e-2, and for #Training Episode, we use 12,6,6 for these 3 models respectively. For Llama3-8B-Instruct, we set the learning rate of 2e-7 for stable training." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 182, + 504, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 504, + 205 + ], + "type": "text", + "content": "MRP For Llama3-8B-Instruct, Llama3.1-8B-Instruct, and Qwen2.5-7B-Instruct, we all use a KL coefficient of 1e-2, and for #Training Episode, we use 10,6,6 for these 3 models respectively." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 216, + 504, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 216, + 504, + 260 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 504, + 260 + ], + "type": "text", + "content": "MAMRP We use " + }, + { + "bbox": [ + 104, + 216, + 504, + 260 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\mathrm{min}} = 0.2, \\varepsilon_{\\mathrm{max}} = 0.8" + }, + { + "bbox": [ + 104, + 216, + 504, + 260 + ], + "type": "text", + "content": " for prompt filtering. We use the same #Training Episode=4 for all models, and for #Update Iteration, we use 3 for Llama3-8B-Instruct and Llama3.1-8B-Instruct, 10 for Qwen2.5-7B-Instruct. And we set the KL coefficient to be 1e-2 for all the 3 models." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 272, + 255, + 284 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 272, + 255, + 284 + ], + "spans": [ + { + "bbox": [ + 105, + 272, + 255, + 284 + ], + "type": "text", + "content": "D.1.4 Training on Reward Bench" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 291, + 504, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 291, + 504, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 291, + 504, + 315 + ], + "type": "text", + "content": "VRP For Llama3.1-8B-Instruct, and Qwen2.5-7B-Instruct, we all use a KL coefficient of 1e-2, and for #Training Episode, we use 4,6 for these 2 models respectively." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 325, + 504, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 325, + 504, + 348 + ], + "spans": [ + { + "bbox": [ + 104, + 325, + 504, + 348 + ], + "type": "text", + "content": "MRP For Llama3.1-8B-Instruct, and Qwen2.5-7B-Instruct, we all use a KL coefficient of 1e-2, and for #Training Episode, we use 4,6 for these 2 models respectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 360, + 504, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 360, + 504, + 416 + ], + "spans": [ + { + "bbox": [ + 104, + 360, + 504, + 416 + ], + "type": "text", + "content": "MAMRP We set #Update Iteration=1 for all models. We set the KL coefficient to be 1e-2 for Llama3.1-8B-Instruct and 1e-2 for Qwen2.5-7B-Instruct all models. For Llama3.1-8B-Instruct, we use " + }, + { + "bbox": [ + 104, + 360, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\mathrm{min}} = 0.2" + }, + { + "bbox": [ + 104, + 360, + 504, + 416 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 360, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\mathrm{max}} = 0.8" + }, + { + "bbox": [ + 104, + 360, + 504, + 416 + ], + "type": "text", + "content": " for prompt filtering and we use #Training Episode of 2 during training. For Llama3.1-8B-Instruct, we use " + }, + { + "bbox": [ + 104, + 360, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\mathrm{min}} = 0.1" + }, + { + "bbox": [ + 104, + 360, + 504, + 416 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 360, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\mathrm{max}} = 0.9" + }, + { + "bbox": [ + 104, + 360, + 504, + 416 + ], + "type": "text", + "content": " for prompt filtering and we use #Training Episode of 1 during training." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 429, + 209, + 440 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 429, + 209, + 440 + ], + "spans": [ + { + "bbox": [ + 105, + 429, + 209, + 440 + ], + "type": "text", + "content": "D.2 Multi-turn ReMA" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 449, + 504, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 449, + 504, + 493 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 504, + 493 + ], + "type": "text", + "content": "We refer to Appendix G for prompts we use during training. We implement a multi-turn ReMA training pipeline with VeRL [Sheng et al., 2024] since it's easier to implement complex training pipeline with a single centralized controller. Similar to OpenRLHF, VeRL is also a highly efficient and scalable codebase for further development." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 498, + 504, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 498, + 504, + 575 + ], + "spans": [ + { + "bbox": [ + 104, + 498, + 504, + 575 + ], + "type": "text", + "content": "For the multi-turn ReMA rollout, we use parameter sharing and simultaneous update by default. In details, we maintain two message lists with the system prompt of meta-thinking agent and reasoning agent respectively. During rollout, each agent acts as 'assistant' in its own message list and the other agent acts as 'user'. We use three hyperparameters to control the rollout length: (1) 'max_num_turns': the maximum number of turns for each trajectory. (2) 'max_response_length': the maximum number of tokens for each turn's response. (3) 'max_prompt_length': the maximum number of tokens for each trajectory." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 579, + 504, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 579, + 504, + 603 + ], + "spans": [ + { + "bbox": [ + 104, + 579, + 504, + 603 + ], + "type": "text", + "content": "During training, we apply the collected message list to Qwen2.5-7B's chat template and build loss masks in order to compute the loss for all turns of one trajectory (message list)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 607, + 504, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 607, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 607, + 504, + 662 + ], + "type": "text", + "content": "Moreover, for multi-turn ReMA rollout, unlike single agent single turn rollout, we need to carefully design the termination logic. Basically, we let the meta-thinking agent automatically decide when to finish the solving procedure, we use a special tag '[FINISH]' to indicate the end of the solving procedure. After we detect this tag, we will terminate trajectory after the reasoning agent generates its output." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 667, + 505, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 505, + 722 + ], + "type": "text", + "content": "We also design other termination conditions to ensure the quality of the generated trajectories. If the last agent's response is too long, we will terminate the whole trajectory and setting the reward to 0. We also introduce a different version of format reward: we give a reward of 1.0 only if the reasoning agent's last turn response is correct and the meta-thinking agent's last turn response include '[FINISH]'. We use math_check as the default verifier." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 323, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 323, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 323, + 83 + ], + "type": "text", + "content": "D.2.1 SFT data collection of multi-turn MAMRP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 93, + 504, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 93, + 504, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 93, + 504, + 160 + ], + "type": "text", + "content": "We use GPT-4o to translate 817 samples in LIMO [Ye et al., 2025c] by prompting it to wrap each sentence with meta-thinking and reasoning tags. We use a temperature of 0. After filtering, we get 800 conversations for training. The prompt can be found in Appendix G.2.1. For supervised finetuning, we use LlamaFactory as the codebase and train the model for 3 epochs with a learning rate of 1e-5, consine learning rate scheduler, and batch size of 8. Use DeepSpeed Zero2 for distributed training." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 176, + 224, + 188 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 176, + 224, + 188 + ], + "spans": [ + { + "bbox": [ + 105, + 176, + 224, + 188 + ], + "type": "text", + "content": "D.2.2 Training on MATH" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 197, + 504, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 197, + 504, + 264 + ], + "spans": [ + { + "bbox": [ + 104, + 197, + 504, + 264 + ], + "type": "text", + "content": "For training of multi-turn ReMA on MATH, we use GRPO [Shao et al., 2024] as the default learning algorithm. We refer to Appendix G.2.2 for prompts. For experiment in Sec 4.3, we use sample 128 prompts, each with 16 trajectories. During training, we drop the KL loss term to improve the numerical stability. We use a learning rate of 1e-6, bfloat16 precision, FSDP backend for distributed training. We split the rollout data into 4 mini-batches for update. For the sake of numerical stability, we do pre-clip before computing the exponential of log-prob for a upperbound of 3.0." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 268, + 504, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 268, + 504, + 301 + ], + "spans": [ + { + "bbox": [ + 104, + 268, + 504, + 301 + ], + "type": "text", + "content": "For the main result in Fig 5, we test different rollout configurations with a max_prompt_length of 4096, training for 500 steps. We use 32 NVIDIA A800 GPUs, the longest training cost about 40 hours due to large scale validation per 10 steps." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 306, + 504, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 306, + 504, + 340 + ], + "spans": [ + { + "bbox": [ + 104, + 306, + 504, + 340 + ], + "type": "text", + "content": "For the ablation results in Fig 6, we use a tiny subset of MATH Level 3-5, training for 300 steps. Specifically, we sample 19 questions for every single type (133 instances in total). We use 8 NVIDIA A800 GPUs, the training cost about 30 hours" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 345, + 265, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 345, + 265, + 355 + ], + "spans": [ + { + "bbox": [ + 105, + 345, + 265, + 355 + ], + "type": "text", + "content": "We test different rollout configurations:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 356, + 504, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 356, + 504, + 378 + ], + "spans": [ + { + "bbox": [ + 104, + 356, + 504, + 378 + ], + "type": "text", + "content": "(1) max_num_turns=30, max_response_length=256, max_prompt_length=4096 (2) \nmax_num_turns=30, max_response_length=1024, max_prompt_length=3072" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 383, + 504, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 383, + 504, + 417 + ], + "spans": [ + { + "bbox": [ + 104, + 383, + 504, + 417 + ], + "type": "text", + "content": "And for the experiment of separate parameter in multi-turn ReMA, we iteratively train each agent with the same configuration as above, but with a switch interval of 10 steps, starting from the metathinking agent." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 437, + 228, + 450 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 437, + 228, + 450 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 228, + 450 + ], + "type": "text", + "content": "E Other Experiments" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 464, + 330, + 476 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 464, + 330, + 476 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 330, + 476 + ], + "type": "text", + "content": "E.1 Reward functions shape cross-agent behaviors" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 487, + 504, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 487, + 504, + 575 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 504, + 575 + ], + "type": "text", + "content": "We also investigate the impact of different reward function designs on ReMA's behavior. In addition to the base reward setting described in Appendix C.2, we evaluate a consistency-based reward function using Qwen2.5-7B-Instruct. This reward function is designed to encourage the high-level agent to generate more detailed guidance. Indeed, we observe that the high-level agent trained in this manner produces more detailed solution steps compared to the one trained with the basic correctness format reward. However, we also find that this approach often leads to jailbreak behavior, where the high-level agent tends to include the final answer within its output, compromising the intended hierarchical reasoning process." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 579, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 579, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 579, + 506, + 723 + ], + "type": "text", + "content": "Furthermore, we discover an interesting evolution of a pattern during training: although our experimental setup is designed for the high-level agent to provide a solution plan while the lower-level agent executes it, we find that under the consistency-based reward, the lower-level agent significantly increases its attempt of verification rather than straightforward execution. We observed a certain sentence commonly appearing in the low-level agent's responses: \"Let's go through the solution step by step to ensure clarity and correctness.\" To quantify this effect, we track the frequency of it. We analyze this pattern across all mathematical test sets, sampling eight completions per question at a temperature of 0.7. Our empirical results have identified a " + }, + { + "bbox": [ + 104, + 579, + 506, + 723 + ], + "type": "inline_equation", + "content": "30\\mathrm{x}" + }, + { + "bbox": [ + 104, + 579, + 506, + 723 + ], + "type": "text", + "content": " increase of such self-verifying patterns in the model trained with the consistency-based reward compared to the one trained with the base reward. Moreover, we also observe additional variations of this pattern, e.g. \"Let's carefully re-evaluate the problem and solution to ensure accuracy and clarity.\" These phrases indicate that the low-level agent is actively exploring to verify the detailed response provided by the high-level agent." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "type": "text", + "content": "This suggests that (1) meta-thinking can not only emerge and be reinforced in the high-level agent but also in the low-level agent. During reinforcement learning (RL) training, the two agents develop a novel problem-solving pattern characterized by a role reversal. (2) Consistency-based rewards promote a more self-corrective approach at the lower level, potentially disrupting the intended separation of roles between planning and execution. For a detailed case study, refer to Appendix F.2." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 140, + 422, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 140, + 422, + 152 + ], + "spans": [ + { + "bbox": [ + 105, + 140, + 422, + 152 + ], + "type": "text", + "content": "E.2 Detailed Training Curves on Different Datasets of Multi-turn ReMA" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 160, + 473, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 160, + 473, + 172 + ], + "spans": [ + { + "bbox": [ + 105, + 160, + 473, + 172 + ], + "type": "text", + "content": "We show the detailed training curves of the multi-turn ReMA on different datasets in Fig. 8." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 108, + 184, + 504, + 383 + ], + "blocks": [ + { + "bbox": [ + 108, + 184, + 504, + 383 + ], + "lines": [ + { + "bbox": [ + 108, + 184, + 504, + 383 + ], + "spans": [ + { + "bbox": [ + 108, + 184, + 504, + 383 + ], + "type": "image", + "image_path": "3a07830c095bd1b0e7d492fba662d270532fb4263b681ea523bd6daaeb0902da.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 146, + 387, + 463, + 399 + ], + "lines": [ + { + "bbox": [ + 146, + 387, + 463, + 399 + ], + "spans": [ + { + "bbox": [ + 146, + 387, + 463, + 399 + ], + "type": "text", + "content": "Figure 8: Detailed Training Curves on Different Datasets of Multi-turn ReMA" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 427, + 226, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 427, + 226, + 441 + ], + "spans": [ + { + "bbox": [ + 105, + 427, + 226, + 441 + ], + "type": "text", + "content": "F Qualitative Results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 451, + 279, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 451, + 279, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 451, + 279, + 464 + ], + "type": "text", + "content": "F.1 High-level policy finds better plans" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 472, + 504, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 504, + 528 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 504, + 528 + ], + "type": "text", + "content": "Here is an example of how a high-level policy alters the solving method of an LLM, increasing the likelihood of providing correct answers. As we can see from the following example, without the high-level policy, the LLM counts all integer coordinates, including those on the boundary, and then subtracts the boundary coordinates. In contrast, the high-level policy identifies a better approach, directly instructing the LLM to count only the coordinates strictly inside the boundary." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 540, + 309, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 540, + 309, + 552 + ], + "spans": [ + { + "bbox": [ + 105, + 540, + 309, + 552 + ], + "type": "text", + "content": "F.2 Case study for Experiments in Section E.1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 560, + 373, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 560, + 373, + 572 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 373, + 572 + ], + "type": "text", + "content": "Fig. 10 and Fig. 11 show an case study of experiments in Sec. E.1." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 576, + 504, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 576, + 504, + 643 + ], + "spans": [ + { + "bbox": [ + 104, + 576, + 504, + 643 + ], + "type": "text", + "content": "Although both agents are prompted with the same instructions as in our main results, the consistency reward of the high-level agent significantly alters the learning dynamics. As illustrated in Fig. 10, the high-level agent generates detailed solution attempts rather than a strategic plan. Consequently, the low-level agent evolves to verify the high-level agent's solutions. This suggests that, during reinforcement learning (RL) training, the two agents develop a novel problem-solving pattern characterized by a role reversal, where one agent generates an answer while the other verifies it." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 655, + 316, + 667 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 655, + 316, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 316, + 667 + ], + "type": "text", + "content": "F.3 Case study for Experiments in Section 4.2.2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 675, + 504, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 675, + 504, + 719 + ], + "spans": [ + { + "bbox": [ + 104, + 675, + 504, + 719 + ], + "type": "text", + "content": "Fig. 12 shows the detailed outputs of the two groups of LLMs trained in JSON action. The outputs finally converge to relative easier and shorter sentences, compared to our main experiments. This may due to insufficient capacity of a small LM to produce a valid JSON-format response while simultaneously exploring diverse reasoning strategies." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 115, + 79, + 493, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 79, + 493, + 95 + ], + "spans": [ + { + "bbox": [ + 115, + 79, + 493, + 95 + ], + "type": "text", + "content": "Consider the rectangular region with the following points as vertices: " + }, + { + "bbox": [ + 115, + 79, + 493, + 95 + ], + "type": "inline_equation", + "content": "(5,4),(-5,4),(-5, - 4),(5, - 4)" + }, + { + "bbox": [ + 115, + 79, + 493, + 95 + ], + "type": "text", + "content": " . How many points with integer coordinates will be strictly in the interior of this rectangular region?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 133, + 100, + 285, + 115 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 100, + 285, + 115 + ], + "spans": [ + { + "bbox": [ + 133, + 100, + 285, + 115 + ], + "type": "text", + "content": "Output without high-level planning solves this question in a less optimal way, leading to a wrong answer:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 120, + 303, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 120, + 303, + 142 + ], + "spans": [ + { + "bbox": [ + 115, + 120, + 303, + 142 + ], + "type": "text", + "content": "To determine how many points with integer coordinates will be strictly in the interior of the rectangular region with vertices at " + }, + { + "bbox": [ + 115, + 120, + 303, + 142 + ], + "type": "inline_equation", + "content": "(5,4)" + }, + { + "bbox": [ + 115, + 120, + 303, + 142 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 115, + 120, + 303, + 142 + ], + "type": "inline_equation", + "content": "(-5,4)" + }, + { + "bbox": [ + 115, + 120, + 303, + 142 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 115, + 120, + 303, + 142 + ], + "type": "inline_equation", + "content": "(-5, - 4)" + }, + { + "bbox": [ + 115, + 120, + 303, + 142 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 115, + 120, + 303, + 142 + ], + "type": "inline_equation", + "content": "(5, - 4)" + }, + { + "bbox": [ + 115, + 120, + 303, + 142 + ], + "type": "text", + "content": ", we will follow these steps:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 116, + 142, + 218, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 142, + 218, + 148 + ], + "spans": [ + { + "bbox": [ + 116, + 142, + 218, + 148 + ], + "type": "text", + "content": "1. Identify the dimensions of the rectangle:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 149, + 301, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 149, + 301, + 163 + ], + "spans": [ + { + "bbox": [ + 115, + 149, + 301, + 163 + ], + "type": "text", + "content": "- The width of the rectangle can be calculated as the distance between the x-coordinates of the vertices. The x-coordinates are " + }, + { + "bbox": [ + 115, + 149, + 301, + 163 + ], + "type": "inline_equation", + "content": "-5" + }, + { + "bbox": [ + 115, + 149, + 301, + 163 + ], + "type": "text", + "content": " and 5." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 178, + 168, + 241, + 177 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 168, + 241, + 177 + ], + "spans": [ + { + "bbox": [ + 178, + 168, + 241, + 177 + ], + "type": "interline_equation", + "content": "\\mathrm {W i d t h} = 5 - (- 5) = 1 0", + "image_path": "d0e8f5c83c09eb3f74d88a58572429c88fe7ef9810aa4a364a80d0c3f482a10a.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 182, + 301, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 182, + 301, + 196 + ], + "spans": [ + { + "bbox": [ + 115, + 182, + 301, + 196 + ], + "type": "text", + "content": "- The height of the rectangle can be calculated as the distance between the y-coordinates of the vertices. The y-coordinates are 4 and -4." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 179, + 201, + 240, + 209 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 201, + 240, + 209 + ], + "spans": [ + { + "bbox": [ + 179, + 201, + 240, + 209 + ], + "type": "interline_equation", + "content": "\\mathrm {H e i g h t} = 4 - (- 4) = 8", + "image_path": "eff8fc9f0b90286a3dd92044dfef48dcd65fbd0038ac94780d15266e585245c2.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 114, + 214, + 299, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 214, + 299, + 250 + ], + "spans": [ + { + "bbox": [ + 114, + 214, + 299, + 250 + ], + "type": "text", + "content": "2. Calculate the total number of integer points: - The total number of integer points in the rectangle (including the boundary) can be calculated by considering the range of x and y values: - The x-values range from " + }, + { + "bbox": [ + 114, + 214, + 299, + 250 + ], + "type": "inline_equation", + "content": "-5" + }, + { + "bbox": [ + 114, + 214, + 299, + 250 + ], + "type": "text", + "content": " to 5, giving us 11 integer values " + }, + { + "bbox": [ + 114, + 214, + 299, + 250 + ], + "type": "inline_equation", + "content": "(-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5)" + }, + { + "bbox": [ + 114, + 214, + 299, + 250 + ], + "type": "text", + "content": ". - The y-values range from " + }, + { + "bbox": [ + 114, + 214, + 299, + 250 + ], + "type": "inline_equation", + "content": "-4" + }, + { + "bbox": [ + 114, + 214, + 299, + 250 + ], + "type": "text", + "content": " to 4, giving us 9 integer values (" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 115, + 251, + 294, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 251, + 294, + 264 + ], + "spans": [ + { + "bbox": [ + 115, + 251, + 294, + 264 + ], + "type": "inline_equation", + "content": "-4, -3, -2, -1, 0, 1, 2, 3, 4)" + }, + { + "bbox": [ + 115, + 251, + 294, + 264 + ], + "type": "text", + "content": ". The total number of points in the rectangle (including the boundary) is:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 174, + 270, + 246, + 277 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 174, + 270, + 246, + 277 + ], + "spans": [ + { + "bbox": [ + 174, + 270, + 246, + 277 + ], + "type": "interline_equation", + "content": "\\text {T o t a l p o i n t s} = 1 1 \\times 9 = 9 9", + "image_path": "9b53fdc6f47f6ed75eda062dd07c79e825f26afc0ac0d0fb1f7fa2587d332132.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 282, + 304, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 282, + 304, + 319 + ], + "spans": [ + { + "bbox": [ + 114, + 282, + 304, + 319 + ], + "type": "text", + "content": "3. Calculate the number of points on the boundary: - The boundary consists of the four sides of the rectangle. - The top and bottom sides (y = 4 and y = -4) each have 11 integer points (from -5 to 5). - The left and right sides (x = 5 and x = -5) each have 9 integer points (from -4 to 4). - Therefore, the total number of points on the boundary is:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 144, + 323, + 275, + 331 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 323, + 275, + 331 + ], + "spans": [ + { + "bbox": [ + 144, + 323, + 275, + 331 + ], + "type": "interline_equation", + "content": "\\text {B o u n d a r y p o i n t s} = 2 \\times 1 1 + 2 \\times 9 = 2 2 + 1 8 = 4 0", + "image_path": "a645dd71a3993817a902a8e8a4efdcf103b1d6ffd7933658ca0a24594ac96eed.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 336, + 298, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 336, + 298, + 358 + ], + "spans": [ + { + "bbox": [ + 114, + 336, + 298, + 358 + ], + "type": "text", + "content": "4. Calculate the number of strictly interior points: - To find the number of points strictly in the interior, we subtract the points on the boundary from the total number of points:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 124, + 363, + 295, + 372 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 363, + 295, + 372 + ], + "spans": [ + { + "bbox": [ + 124, + 363, + 295, + 372 + ], + "type": "interline_equation", + "content": "I n t e r i o r \\text {p o i n t s} = \\text {T o t a l p o i n t s - B o u n d a r y p o i n t s} = 9 9 - 4 0 = 5 9", + "image_path": "7837d615d6c9a9750b72afac02ea2aa4e31641292385be12d2392f3f35fddae4.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 115, + 376, + 299, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 376, + 299, + 392 + ], + "spans": [ + { + "bbox": [ + 115, + 376, + 299, + 392 + ], + "type": "text", + "content": "Thus, the number of points with integer coordinates that will be strictly in the interior of this rectangular region is 59." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 310, + 100, + 493, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 100, + 493, + 116 + ], + "spans": [ + { + "bbox": [ + 310, + 100, + 493, + 116 + ], + "type": "text", + "content": "Answer with high-level planning identifies the optimal sub-tasks for the LLM to solve, increasing the chance of answering the question correctly" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 309, + 118, + 492, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 118, + 492, + 141 + ], + "spans": [ + { + "bbox": [ + 309, + 118, + 492, + 141 + ], + "type": "text", + "content": "High-level policy planning: First, identify the boundaries of the rectangular region. Then, determine the integer coordinates that lie strictly within these boundaries. Finally, count the number of such points." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 309, + 149, + 486, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 149, + 486, + 164 + ], + "spans": [ + { + "bbox": [ + 309, + 149, + 486, + 164 + ], + "type": "text", + "content": "Low-level policy reasoning following the high-policy plan: To solve the problem, we will follow the steps outlined in the instructions." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 170, + 490, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 170, + 490, + 207 + ], + "spans": [ + { + "bbox": [ + 308, + 170, + 490, + 207 + ], + "type": "text", + "content": "Step 1: Identify the boundaries of the rectangular region. The vertices of the rectangular region are: " + }, + { + "bbox": [ + 308, + 170, + 490, + 207 + ], + "type": "inline_equation", + "content": "(-5,4)" + }, + { + "bbox": [ + 308, + 170, + 490, + 207 + ], + "type": "inline_equation", + "content": "(-5,4)" + }, + { + "bbox": [ + 308, + 170, + 490, + 207 + ], + "type": "inline_equation", + "content": "(-5, - 4)" + }, + { + "bbox": [ + 308, + 170, + 490, + 207 + ], + "type": "inline_equation", + "content": "(5, - 4)" + }, + { + "bbox": [ + 308, + 170, + 490, + 207 + ], + "type": "text", + "content": " From these vertices, we can determine the boundaries:- The left boundary is " + }, + { + "bbox": [ + 308, + 170, + 490, + 207 + ], + "type": "inline_equation", + "content": "x = -5" + }, + { + "bbox": [ + 308, + 170, + 490, + 207 + ], + "type": "text", + "content": " The right boundary is " + }, + { + "bbox": [ + 308, + 170, + 490, + 207 + ], + "type": "inline_equation", + "content": "x = 5" + }, + { + "bbox": [ + 308, + 170, + 490, + 207 + ], + "type": "text", + "content": " .- The bottom boundary is " + }, + { + "bbox": [ + 308, + 170, + 490, + 207 + ], + "type": "inline_equation", + "content": "y = -4" + }, + { + "bbox": [ + 308, + 170, + 490, + 207 + ], + "type": "text", + "content": " .- The top boundary is " + }, + { + "bbox": [ + 308, + 170, + 490, + 207 + ], + "type": "inline_equation", + "content": "y = 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 213, + 477, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 213, + 477, + 235 + ], + "spans": [ + { + "bbox": [ + 308, + 213, + 477, + 235 + ], + "type": "text", + "content": "Step 2: Determine the integer coordinates that lie strictly within these boundaries. For a point " + }, + { + "bbox": [ + 308, + 213, + 477, + 235 + ], + "type": "inline_equation", + "content": "(x,y)" + }, + { + "bbox": [ + 308, + 213, + 477, + 235 + ], + "type": "text", + "content": " to be strictly inside the rectangle, it must satisfy: " + }, + { + "bbox": [ + 308, + 213, + 477, + 235 + ], + "type": "inline_equation", + "content": "-5 < x < 5 - 4 < y < 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 241, + 494, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 241, + 494, + 262 + ], + "spans": [ + { + "bbox": [ + 308, + 241, + 494, + 262 + ], + "type": "text", + "content": "Step 3: Count the integer coordinates that satisfy these conditions. Now, we will find the integer values for " + }, + { + "bbox": [ + 308, + 241, + 494, + 262 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 308, + 241, + 494, + 262 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 241, + 494, + 262 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 308, + 241, + 494, + 262 + ], + "type": "text", + "content": " that fall within these boundaries. For " + }, + { + "bbox": [ + 308, + 241, + 494, + 262 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 308, + 241, + 494, + 262 + ], + "type": "text", + "content": " : The integer values that satisfy " + }, + { + "bbox": [ + 308, + 241, + 494, + 262 + ], + "type": "inline_equation", + "content": "-5 < x < 5" + }, + { + "bbox": [ + 308, + 241, + 494, + 262 + ], + "type": "text", + "content": " are:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 262, + 490, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 262, + 490, + 284 + ], + "spans": [ + { + "bbox": [ + 308, + 262, + 490, + 284 + ], + "type": "inline_equation", + "content": "-4, -3, -2, -1, 0, 1, 2, 3, 4" + }, + { + "bbox": [ + 308, + 262, + 490, + 284 + ], + "type": "text", + "content": " This gives us 9 possible integer values. For " + }, + { + "bbox": [ + 308, + 262, + 490, + 284 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 308, + 262, + 490, + 284 + ], + "type": "text", + "content": " The integer values that satisfy " + }, + { + "bbox": [ + 308, + 262, + 490, + 284 + ], + "type": "inline_equation", + "content": "-4 < y < 4" + }, + { + "bbox": [ + 308, + 262, + 490, + 284 + ], + "type": "text", + "content": " are: " + }, + { + "bbox": [ + 308, + 262, + 490, + 284 + ], + "type": "inline_equation", + "content": "-3, -2, -1, 0, 1, 2, 3" + }, + { + "bbox": [ + 308, + 262, + 490, + 284 + ], + "type": "text", + "content": ". This gives us 7 possible integer values." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 290, + 493, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 290, + 493, + 305 + ], + "spans": [ + { + "bbox": [ + 308, + 290, + 493, + 305 + ], + "type": "text", + "content": "Step 4: Calculate the total number of points. Now, we multiply the number of possible integer values for " + }, + { + "bbox": [ + 308, + 290, + 493, + 305 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 308, + 290, + 493, + 305 + ], + "type": "text", + "content": " by the number of possible integer values for " + }, + { + "bbox": [ + 308, + 290, + 493, + 305 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 308, + 290, + 493, + 305 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 367, + 309, + 437, + 318 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 309, + 437, + 318 + ], + "spans": [ + { + "bbox": [ + 367, + 309, + 437, + 318 + ], + "type": "interline_equation", + "content": "\\text {T o t a l p o i n t s} = 9 \\times 7 = 6 3", + "image_path": "7cbee802c4e75cd55acfba9c2483cb9c2aae890a4cb8099c03a05a2668bd8ebf.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 323, + 487, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 323, + 487, + 338 + ], + "spans": [ + { + "bbox": [ + 308, + 323, + 487, + 338 + ], + "type": "text", + "content": "Finally, the number of points with integer coordinates that lie strictly in the interior of the rectangular region is " + }, + { + "bbox": [ + 308, + 323, + 487, + 338 + ], + "type": "inline_equation", + "content": "\\boxed{63}" + }, + { + "bbox": [ + 308, + 323, + 487, + 338 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 136, + 408, + 472, + 421 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 408, + 472, + 421 + ], + "spans": [ + { + "bbox": [ + 136, + 408, + 472, + 421 + ], + "type": "text", + "content": "Figure 9: Case Study comparing with and without high-level metacognition results." + } + ] + } + ], + "index": 26, + "type": "text" + }, + { + "bbox": [ + 105, + 455, + 173, + 470 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 455, + 173, + 470 + ], + "spans": [ + { + "bbox": [ + 105, + 455, + 173, + 470 + ], + "type": "text", + "content": "G Prompts" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 490, + 251, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 490, + 251, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 490, + 251, + 502 + ], + "type": "text", + "content": "G.1 Single-turn ReMA prompts" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 517, + 285, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 517, + 285, + 529 + ], + "spans": [ + { + "bbox": [ + 105, + 517, + 285, + 529 + ], + "type": "text", + "content": "G.1.1 Prompts for JSON data collection" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 543, + 296, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 543, + 296, + 555 + ], + "spans": [ + { + "bbox": [ + 105, + 543, + 296, + 555 + ], + "type": "text", + "content": "Prompt for metacognition reasoning to rewrite:" + } + ] + } + ], + "index": 30 + }, + { + "type": "code", + "bbox": [ + 108, + 561, + 500, + 705 + ], + "blocks": [ + { + "bbox": [ + 108, + 561, + 500, + 705 + ], + "lines": [ + { + "bbox": [ + 108, + 561, + 500, + 705 + ], + "spans": [ + { + "bbox": [ + 108, + 561, + 500, + 705 + ], + "type": "text", + "content": "System prompt: \nYou are a math expert trying to solve mathematical problems. \nBefore answering a question, your task is to rewrite the original question to make it clearer. \nProvide your rewritten content in JSON format: \n{ \"action\": \"REWRITE\", \"output\": \"{clearer question content}\" \n} \nRespond only with valid JSON. Do not write an introduction or summary. \nUser prompt: \nHere is the question: [problem_text]" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "code_body" + } + ], + "index": 31, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 105, + 711, + 312, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 711, + 312, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 711, + 312, + 723 + ], + "type": "text", + "content": "Prompt for metacognition reasoning to decompose:" + } + ] + } + ], + "index": 32 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 113, + 79, + 480, + 87 + ], + "blocks": [ + { + "bbox": [ + 113, + 79, + 480, + 87 + ], + "lines": [ + { + "bbox": [ + 113, + 79, + 480, + 87 + ], + "spans": [ + { + "bbox": [ + 113, + 79, + 480, + 87 + ], + "type": "text", + "content": "Question: The equation " + }, + { + "bbox": [ + 113, + 79, + 480, + 87 + ], + "type": "inline_equation", + "content": "x^{10} + (13x - 1)^{10} = 0" + }, + { + "bbox": [ + 113, + 79, + 480, + 87 + ], + "type": "text", + "content": " has 10 complex roots " + }, + { + "bbox": [ + 113, + 79, + 480, + 87 + ], + "type": "inline_equation", + "content": "r_1,\\overline{r}_1,r_2,\\overline{r}_2,r_3,\\overline{r}_3,r_4,\\overline{r}_4,r_5,\\overline{r}_5" + }, + { + "bbox": [ + 113, + 79, + 480, + 87 + ], + "type": "text", + "content": " , where the bar denotes complex conjugation. Find the value of " + }, + { + "bbox": [ + 113, + 79, + 480, + 87 + ], + "type": "inline_equation", + "content": "\\frac{1}{r_1\\overline{r}_1} +\\frac{1}{r_2\\overline{r}_2} +\\frac{1}{r_3\\overline{r}_3} +\\frac{1}{r_4\\overline{r}_4} +\\frac{1}{r_5\\overline{r}_5}" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_body" + } + ], + "index": 0, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 180, + 90, + 233, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 90, + 233, + 95 + ], + "spans": [ + { + "bbox": [ + 180, + 90, + 233, + 95 + ], + "type": "text", + "content": "High-Level Agent's Output" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 114, + 97, + 264, + 102 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 97, + 264, + 102 + ], + "spans": [ + { + "bbox": [ + 114, + 97, + 264, + 102 + ], + "type": "text", + "content": "To solve this problem, let's break it down into steps that a low-level math-solving agent can follow:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 106, + 170, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 106, + 170, + 111 + ], + "spans": [ + { + "bbox": [ + 115, + 106, + 170, + 111 + ], + "type": "text", + "content": "1. Understand the given equation:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 112, + 178, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 112, + 178, + 116 + ], + "spans": [ + { + "bbox": [ + 115, + 112, + 178, + 116 + ], + "type": "text", + "content": "The equation is " + }, + { + "bbox": [ + 115, + 112, + 178, + 116 + ], + "type": "inline_equation", + "content": "x^{10} + (13x - 1)^{10} = 0" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 117, + 153, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 117, + 153, + 121 + ], + "spans": [ + { + "bbox": [ + 115, + 117, + 153, + 121 + ], + "type": "text", + "content": "2. Rewrite the equation:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 121, + 184, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 121, + 184, + 125 + ], + "spans": [ + { + "bbox": [ + 115, + 121, + 184, + 125 + ], + "type": "text", + "content": "Notice that the equation can be rewritten as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 189, + 131, + 223, + 137 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 131, + 223, + 137 + ], + "spans": [ + { + "bbox": [ + 189, + 131, + 223, + 137 + ], + "type": "interline_equation", + "content": "x ^ {1 0} = - (1 3 x - 1) ^ {1 0}.", + "image_path": "4e685cd1fc6e7e386b1f144221571ae899897bfc8d69063160055ba364dfd977.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 114, + 141, + 149, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 141, + 149, + 145 + ], + "spans": [ + { + "bbox": [ + 114, + 141, + 149, + 145 + ], + "type": "text", + "content": "3. Consider the roots:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 114, + 146, + 293, + 165 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 114, + 146, + 293, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 146, + 293, + 154 + ], + "spans": [ + { + "bbox": [ + 114, + 146, + 293, + 154 + ], + "type": "text", + "content": "The roots of the equation are given as " + }, + { + "bbox": [ + 114, + 146, + 293, + 154 + ], + "type": "inline_equation", + "content": "r_1, \\overline{r}_1, r_2, \\overline{r}_2, r_3, \\overline{r}_3, r_4, \\overline{r}_4, r_5, \\overline{r}_5" + }, + { + "bbox": [ + 114, + 146, + 293, + 154 + ], + "type": "text", + "content": ", where the bar denotes complex conjugation. 4. Use the property of complex conjugates:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 155, + 286, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 155, + 286, + 161 + ], + "spans": [ + { + "bbox": [ + 114, + 155, + 286, + 161 + ], + "type": "text", + "content": "For any complex number " + }, + { + "bbox": [ + 114, + 155, + 286, + 161 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 114, + 155, + 286, + 161 + ], + "type": "text", + "content": " , the product of " + }, + { + "bbox": [ + 114, + 155, + 286, + 161 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 114, + 155, + 286, + 161 + ], + "type": "text", + "content": " and its conjugate " + }, + { + "bbox": [ + 114, + 155, + 286, + 161 + ], + "type": "inline_equation", + "content": "\\overline{z}" + }, + { + "bbox": [ + 114, + 155, + 286, + 161 + ], + "type": "text", + "content": " is the square of the modulus of " + }, + { + "bbox": [ + 114, + 155, + 286, + 161 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 114, + 155, + 286, + 161 + ], + "type": "text", + "content": " i.e., " + }, + { + "bbox": [ + 114, + 155, + 286, + 161 + ], + "type": "inline_equation", + "content": "z\\overline{z} = |z|^2" + }, + { + "bbox": [ + 114, + 155, + 286, + 161 + ], + "type": "text", + "content": " 5. Calculate the required sum:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 115, + 161, + 162, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 161, + 162, + 165 + ], + "spans": [ + { + "bbox": [ + 115, + 161, + 162, + 165 + ], + "type": "text", + "content": "3. Calculate the required sum " + }, + { + "bbox": [ + 115, + 161, + 162, + 165 + ], + "type": "inline_equation", + "content": "\\mathbf{W} = \\mathbf{w}_{1} + \\mathbf{w}_{2} + \\dots + \\mathbf{w}_{n}" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 175, + 175, + 238, + 186 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 175, + 238, + 186 + ], + "spans": [ + { + "bbox": [ + 175, + 175, + 238, + 186 + ], + "type": "interline_equation", + "content": "\\frac {1}{r _ {1} \\overline {{F _ {1}}}} + \\frac {1}{r _ {2} \\overline {{F _ {2}}}} + \\frac {1}{r _ {3} \\overline {{F _ {3}}}} + \\frac {1}{r _ {4} \\overline {{F _ {4}}}} + \\frac {1}{r _ {5} \\overline {{F _ {5}}}}", + "image_path": "ab388587275a1e57a8348ee40749023ad5049d876dde96e5d113fa279263566f.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 116, + 188, + 201, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 188, + 201, + 193 + ], + "spans": [ + { + "bbox": [ + 116, + 188, + 201, + 193 + ], + "type": "text", + "content": "Using the property from step 4, this can be rewritten as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 175, + 200, + 239, + 211 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 200, + 239, + 211 + ], + "spans": [ + { + "bbox": [ + 175, + 200, + 239, + 211 + ], + "type": "interline_equation", + "content": "\\frac {1}{| r _ {1} | ^ {2}} + \\frac {1}{| r _ {2} | ^ {2}} + \\frac {1}{| r _ {3} | ^ {2}} + \\frac {1}{| r _ {4} | ^ {2}} + \\frac {1}{| r _ {5} | ^ {2}},", + "image_path": "1a3854038f19d195ed185616c3f2b9e4fa543e0ca650c9a6ea7d1989a49d00de.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 114, + 214, + 297, + 241 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 114, + 214, + 157, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 214, + 157, + 218 + ], + "spans": [ + { + "bbox": [ + 114, + 214, + 157, + 218 + ], + "type": "text", + "content": "6. Simplify the expression:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 114, + 219, + 295, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 219, + 295, + 226 + ], + "spans": [ + { + "bbox": [ + 114, + 219, + 295, + 226 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 114, + 219, + 295, + 226 + ], + "type": "inline_equation", + "content": "r_1, r_2, r_3, r_4, r_5" + }, + { + "bbox": [ + 114, + 219, + 295, + 226 + ], + "type": "text", + "content": " are roots of the equation " + }, + { + "bbox": [ + 114, + 219, + 295, + 226 + ], + "type": "inline_equation", + "content": "x^{10} = -(13x - 1)^{10}" + }, + { + "bbox": [ + 114, + 219, + 295, + 226 + ], + "type": "text", + "content": ", the magnitudes " + }, + { + "bbox": [ + 114, + 219, + 295, + 226 + ], + "type": "inline_equation", + "content": "|r_i|" + }, + { + "bbox": [ + 114, + 219, + 295, + 226 + ], + "type": "text", + "content": " are the same for each pair " + }, + { + "bbox": [ + 114, + 219, + 295, + 226 + ], + "type": "inline_equation", + "content": "r_i" + }, + { + "bbox": [ + 114, + 219, + 295, + 226 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 114, + 219, + 295, + 226 + ], + "type": "inline_equation", + "content": "\\overline{r}_i" + }, + { + "bbox": [ + 114, + 219, + 295, + 226 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 114, + 219, + 295, + 226 + ], + "type": "inline_equation", + "content": "|r_i| = |r_i|" + }, + { + "bbox": [ + 114, + 219, + 295, + 226 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 114, + 219, + 295, + 226 + ], + "type": "inline_equation", + "content": "i, j" + }, + { + "bbox": [ + 114, + 219, + 295, + 226 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 115, + 227, + 166, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 227, + 166, + 231 + ], + "spans": [ + { + "bbox": [ + 115, + 227, + 166, + 231 + ], + "type": "text", + "content": "7. Find the common magnitude:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 114, + 232, + 297, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 232, + 297, + 241 + ], + "spans": [ + { + "bbox": [ + 114, + 232, + 297, + 241 + ], + "type": "text", + "content": "To find " + }, + { + "bbox": [ + 114, + 232, + 297, + 241 + ], + "type": "inline_equation", + "content": "|r_{1}|" + }, + { + "bbox": [ + 114, + 232, + 297, + 241 + ], + "type": "text", + "content": ", consider the magnitudes of the terms on both sides of the equation " + }, + { + "bbox": [ + 114, + 232, + 297, + 241 + ], + "type": "inline_equation", + "content": "x^{10} = -(13x - 1)^{10}" + }, + { + "bbox": [ + 114, + 232, + 297, + 241 + ], + "type": "text", + "content": ". The magnitude of the left side is " + }, + { + "bbox": [ + 114, + 232, + 297, + 241 + ], + "type": "inline_equation", + "content": "|x|^{10}" + }, + { + "bbox": [ + 114, + 232, + 297, + 241 + ], + "type": "text", + "content": " and the magnitude of the right side is " + }, + { + "bbox": [ + 114, + 232, + 297, + 241 + ], + "type": "inline_equation", + "content": "|13x - 1|^{10}" + }, + { + "bbox": [ + 114, + 232, + 297, + 241 + ], + "type": "text", + "content": ". Therefore:" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 190, + 248, + 222, + 254 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 248, + 222, + 254 + ], + "spans": [ + { + "bbox": [ + 190, + 248, + 222, + 254 + ], + "type": "interline_equation", + "content": "\\left| x \\right| ^ {1 0} = \\left| 1 3 x - 1 \\right| ^ {1 0}.", + "image_path": "3e06b9d59ab4e557586a3566be6437c22310f94ff0f6d0828c903eee7d02ad79.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 116, + 258, + 181, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 258, + 181, + 262 + ], + "spans": [ + { + "bbox": [ + 116, + 258, + 181, + 262 + ], + "type": "text", + "content": "Taking the 10th root of both sides, we get:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 194, + 270, + 220, + 275 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 270, + 220, + 275 + ], + "spans": [ + { + "bbox": [ + 194, + 270, + 220, + 275 + ], + "type": "interline_equation", + "content": "| x | = | 1 3 x - 1 |.", + "image_path": "36e0a413692113a7e783656a749d6c396724839d2dbe2992be4b5dfc8d667660.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 114, + 278, + 145, + 287 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 114, + 278, + 139, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 278, + 139, + 282 + ], + "spans": [ + { + "bbox": [ + 114, + 278, + 139, + 282 + ], + "type": "text", + "content": "8. Solve for " + }, + { + "bbox": [ + 114, + 278, + 139, + 282 + ], + "type": "inline_equation", + "content": "|x|" + }, + { + "bbox": [ + 114, + 278, + 139, + 282 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 115, + 283, + 145, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 283, + 145, + 287 + ], + "spans": [ + { + "bbox": [ + 115, + 283, + 145, + 287 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 115, + 283, + 145, + 287 + ], + "type": "inline_equation", + "content": "|x| = a" + }, + { + "bbox": [ + 115, + 283, + 145, + 287 + ], + "type": "text", + "content": ". Then:" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 114, + 289, + 168, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 289, + 168, + 293 + ], + "spans": [ + { + "bbox": [ + 114, + 289, + 168, + 293 + ], + "type": "inline_equation", + "content": "\\therefore m - 1 \\neq 0" + }, + { + "bbox": [ + 114, + 289, + 168, + 293 + ], + "type": "text", + "content": " ;" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 194, + 295, + 218, + 300 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 295, + 218, + 300 + ], + "spans": [ + { + "bbox": [ + 194, + 295, + 218, + 300 + ], + "type": "interline_equation", + "content": "a = \\left| 1 3 a - 1 \\right|.", + "image_path": "07fc65b6c33535685fdd786bf88fd2c3719bf9a2836ea2cfdd87b86c9fbb436d.jpg" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 115, + 304, + 154, + 322 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 115, + 304, + 154, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 304, + 154, + 308 + ], + "spans": [ + { + "bbox": [ + 115, + 304, + 154, + 308 + ], + "type": "text", + "content": "This gives us two cases:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 115, + 309, + 130, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 309, + 130, + 313 + ], + "spans": [ + { + "bbox": [ + 115, + 309, + 130, + 313 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 115, + 309, + 130, + 313 + ], + "type": "inline_equation", + "content": "a = 13" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 115, + 314, + 145, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 314, + 145, + 318 + ], + "spans": [ + { + "bbox": [ + 115, + 314, + 145, + 318 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 115, + 314, + 145, + 318 + ], + "type": "inline_equation", + "content": "a = -(13a - 1)" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 115, + 319, + 150, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 319, + 150, + 322 + ], + "spans": [ + { + "bbox": [ + 115, + 319, + 150, + 322 + ], + "type": "text", + "content": "Solving the first case:" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 172, + 329, + 241, + 339 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 172, + 329, + 241, + 339 + ], + "spans": [ + { + "bbox": [ + 172, + 329, + 241, + 339 + ], + "type": "interline_equation", + "content": "a = 1 3 a - 1 \\Longrightarrow 1 2 a = 1 \\Longrightarrow a = \\frac {1}{1 2}.", + "image_path": "0237a522e20431ebc766f44dcd6ea2566b93869ecb82778c21cee24038e74754.jpg" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 116, + 342, + 154, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 342, + 154, + 346 + ], + "spans": [ + { + "bbox": [ + 116, + 342, + 154, + 346 + ], + "type": "text", + "content": "Solving the second case:" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 171, + 353, + 242, + 363 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 353, + 242, + 363 + ], + "spans": [ + { + "bbox": [ + 171, + 353, + 242, + 363 + ], + "type": "interline_equation", + "content": "a = - 1 3 a + 1 \\Longrightarrow 1 4 a = 1 \\Longrightarrow a = \\frac {1}{1 4}.", + "image_path": "ca970fefe3377e52dda01105b9874b3532e444010e03bc6c2807c37e7459b450.jpg" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 114, + 365, + 297, + 383 + ], + "type": "list", + "angle": 0, + "index": 40, + "blocks": [ + { + "bbox": [ + 114, + 365, + 297, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 365, + 297, + 373 + ], + "spans": [ + { + "bbox": [ + 114, + 365, + 297, + 373 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 114, + 365, + 297, + 373 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 114, + 365, + 297, + 373 + ], + "type": "text", + "content": " must be the same for all roots, we need to check which value is consistent. Given the nature of the problem, we can assume " + }, + { + "bbox": [ + 114, + 365, + 297, + 373 + ], + "type": "inline_equation", + "content": "a = \\frac{1}{2}" + }, + { + "bbox": [ + 114, + 365, + 297, + 373 + ], + "type": "text", + "content": " as the correct magnitude." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 374, + 156, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 374, + 156, + 378 + ], + "spans": [ + { + "bbox": [ + 114, + 374, + 156, + 378 + ], + "type": "text", + "content": "9. Calculate the final sum:" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 379, + 168, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 379, + 168, + 383 + ], + "spans": [ + { + "bbox": [ + 114, + 379, + 168, + 383 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 114, + 379, + 168, + 383 + ], + "type": "inline_equation", + "content": "|r_i| = \\frac{1}{2}" + }, + { + "bbox": [ + 114, + 379, + 168, + 383 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 114, + 379, + 168, + 383 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 114, + 379, + 168, + 383 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 39 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 187, + 391, + 225, + 403 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 391, + 225, + 403 + ], + "spans": [ + { + "bbox": [ + 187, + 391, + 225, + 403 + ], + "type": "interline_equation", + "content": "\\frac {1}{| r _ {1} | ^ {2}} = \\frac {1}{(\\frac {1}{4}) ^ {2}} = 1 4 4.", + "image_path": "d1ac43fc68ceb3c6df31a21c8f515a2c7a6eab60ec12eb55a3eb012f71308f76.jpg" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 116, + 407, + 150, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 407, + 150, + 411 + ], + "spans": [ + { + "bbox": [ + 116, + 407, + 150, + 411 + ], + "type": "text", + "content": "Therefore, the sum is:" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 168, + 419, + 246, + 424 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 419, + 246, + 424 + ], + "spans": [ + { + "bbox": [ + 168, + 419, + 246, + 424 + ], + "type": "interline_equation", + "content": "1 4 4 + 1 4 4 + 1 4 4 + 1 4 4 + 1 4 4 = 5 \\times 1 4 4 = 7 2 0.", + "image_path": "622da8514c8776b09adf1944ffa4c54f42ccb3c0dc87f16462c8dd56fc63f652.jpg" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 114, + 428, + 142, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 428, + 142, + 432 + ], + "spans": [ + { + "bbox": [ + 114, + 428, + 142, + 432 + ], + "type": "text", + "content": "10. Final answer:" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 149, + 432, + 200, + 437 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 432, + 200, + 437 + ], + "spans": [ + { + "bbox": [ + 149, + 432, + 200, + 437 + ], + "type": "interline_equation", + "content": "\\frac {1}{\\frac {1}{n} + \\frac {1}{n}} + \\frac {1}{\\frac {1}{n} + \\frac {1}{n}} + \\frac {1}{\\frac {1}{n} + \\frac {1}{n}} \\text {i s} 7 2 0.", + "image_path": "1c302497f77a8108527d48c628148c9c17719bb8a4d6440b23231a5f2df60a61.jpg" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 374, + 90, + 426, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 374, + 90, + 426, + 95 + ], + "spans": [ + { + "bbox": [ + 374, + 90, + 426, + 95 + ], + "type": "text", + "content": "Low-Level Agent's Output" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 304, + 97, + 425, + 102 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 97, + 425, + 102 + ], + "spans": [ + { + "bbox": [ + 304, + 97, + 425, + 102 + ], + "type": "text", + "content": "Let's go through the solution step by step to ensure clarity and correctness." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 304, + 106, + 373, + 124 + ], + "type": "list", + "angle": 0, + "index": 52, + "blocks": [ + { + "bbox": [ + 304, + 106, + 359, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 106, + 359, + 110 + ], + "spans": [ + { + "bbox": [ + 304, + 106, + 359, + 110 + ], + "type": "text", + "content": "1. Understand the given equation:" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 304, + 110, + 367, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 110, + 367, + 114 + ], + "spans": [ + { + "bbox": [ + 304, + 110, + 367, + 114 + ], + "type": "text", + "content": "The equation is " + }, + { + "bbox": [ + 304, + 110, + 367, + 114 + ], + "type": "inline_equation", + "content": "x^{10} + (13x - 1)^{10} = 0" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 304, + 114, + 343, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 114, + 343, + 119 + ], + "spans": [ + { + "bbox": [ + 304, + 114, + 343, + 119 + ], + "type": "text", + "content": "2. Rewrite the equation:" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 304, + 120, + 373, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 120, + 373, + 124 + ], + "spans": [ + { + "bbox": [ + 304, + 120, + 373, + 124 + ], + "type": "text", + "content": "Notice that the equation can be rewritten as:" + } + ] + } + ], + "index": 51 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 383, + 131, + 417, + 137 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 383, + 131, + 417, + 137 + ], + "spans": [ + { + "bbox": [ + 383, + 131, + 417, + 137 + ], + "type": "interline_equation", + "content": "x ^ {1 0} = - (1 3 x - 1) ^ {1 0}.", + "image_path": "6525765c83f841fa74203734033cb3929d8d0bde8888d550d515f348b36a5f29.jpg" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 304, + 141, + 339, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 141, + 339, + 145 + ], + "spans": [ + { + "bbox": [ + 304, + 141, + 339, + 145 + ], + "type": "text", + "content": "3. Consider the roots:" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 304, + 146, + 482, + 167 + ], + "type": "list", + "angle": 0, + "index": 58, + "blocks": [ + { + "bbox": [ + 304, + 146, + 482, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 146, + 482, + 154 + ], + "spans": [ + { + "bbox": [ + 304, + 146, + 482, + 154 + ], + "type": "text", + "content": "The roots of the equation are given as " + }, + { + "bbox": [ + 304, + 146, + 482, + 154 + ], + "type": "inline_equation", + "content": "r_1, \\overline{r}_1, r_2, \\overline{r}_2, r_3, \\overline{r}_3, r_4, \\overline{r}_4, r_5, \\overline{r}_5" + }, + { + "bbox": [ + 304, + 146, + 482, + 154 + ], + "type": "text", + "content": ", where the bar denotes complex conjugation. 4. Use the property of complex conjugates:" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 304, + 155, + 476, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 155, + 476, + 162 + ], + "spans": [ + { + "bbox": [ + 304, + 155, + 476, + 162 + ], + "type": "text", + "content": "For any complex number " + }, + { + "bbox": [ + 304, + 155, + 476, + 162 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 304, + 155, + 476, + 162 + ], + "type": "text", + "content": ", the product of " + }, + { + "bbox": [ + 304, + 155, + 476, + 162 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 304, + 155, + 476, + 162 + ], + "type": "text", + "content": " and its conjugate " + }, + { + "bbox": [ + 304, + 155, + 476, + 162 + ], + "type": "inline_equation", + "content": "\\overline{z}" + }, + { + "bbox": [ + 304, + 155, + 476, + 162 + ], + "type": "text", + "content": " is the square of the modulus of " + }, + { + "bbox": [ + 304, + 155, + 476, + 162 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 304, + 155, + 476, + 162 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 304, + 155, + 476, + 162 + ], + "type": "inline_equation", + "content": "z\\overline{z} = |z|^2" + }, + { + "bbox": [ + 304, + 155, + 476, + 162 + ], + "type": "text", + "content": ". 5. Calculate the required sum:" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 304, + 163, + 351, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 163, + 351, + 167 + ], + "spans": [ + { + "bbox": [ + 304, + 163, + 351, + 167 + ], + "type": "text", + "content": "We need to find the value of:" + } + ] + } + ], + "index": 57 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 369, + 175, + 432, + 186 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 175, + 432, + 186 + ], + "spans": [ + { + "bbox": [ + 369, + 175, + 432, + 186 + ], + "type": "interline_equation", + "content": "\\frac {1}{r _ {1} F _ {1}} + \\frac {1}{r _ {2} F _ {2}} + \\frac {1}{r _ {3} F _ {3}} + \\frac {1}{r _ {4} F _ {4}} + \\frac {1}{r _ {5} F _ {5}}", + "image_path": "28eba59bcd1627af7ee1dbdd7fe9a9861b579d0aebd07602fe6fed78017ca843.jpg" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 306, + 188, + 391, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 188, + 391, + 193 + ], + "spans": [ + { + "bbox": [ + 306, + 188, + 391, + 193 + ], + "type": "text", + "content": "Using the property from step 4, this can be rewritten as:" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 368, + 199, + 432, + 210 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 368, + 199, + 432, + 210 + ], + "spans": [ + { + "bbox": [ + 368, + 199, + 432, + 210 + ], + "type": "interline_equation", + "content": "\\frac {1}{| r _ {1} | ^ {2}} + \\frac {1}{| r _ {2} | ^ {2}} + \\frac {1}{| r _ {3} | ^ {2}} + \\frac {1}{| r _ {4} | ^ {2}} + \\frac {1}{| r _ {5} | ^ {2}} + \\frac {1}{| r _ {6} | ^ {2}}", + "image_path": "b6835da4e0c1c0b9108bd67a52f6f1b3fa00f0b7df3bbbd73a4d0bfb0e85389d.jpg" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 304, + 214, + 495, + 241 + ], + "type": "list", + "angle": 0, + "index": 66, + "blocks": [ + { + "bbox": [ + 304, + 214, + 347, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 214, + 347, + 218 + ], + "spans": [ + { + "bbox": [ + 304, + 214, + 347, + 218 + ], + "type": "text", + "content": "6. Simplify the expression:" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 304, + 218, + 495, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 218, + 495, + 226 + ], + "spans": [ + { + "bbox": [ + 304, + 218, + 495, + 226 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 304, + 218, + 495, + 226 + ], + "type": "inline_equation", + "content": "r_1, r_2, r_3, r_4, r_5" + }, + { + "bbox": [ + 304, + 218, + 495, + 226 + ], + "type": "text", + "content": " are roots of the equation " + }, + { + "bbox": [ + 304, + 218, + 495, + 226 + ], + "type": "inline_equation", + "content": "x^{10} = -(13x - 1)^{10}" + }, + { + "bbox": [ + 304, + 218, + 495, + 226 + ], + "type": "text", + "content": ", the magnitudes " + }, + { + "bbox": [ + 304, + 218, + 495, + 226 + ], + "type": "inline_equation", + "content": "|r_i|" + }, + { + "bbox": [ + 304, + 218, + 495, + 226 + ], + "type": "text", + "content": " are the same for each pair " + }, + { + "bbox": [ + 304, + 218, + 495, + 226 + ], + "type": "inline_equation", + "content": "r_1" + }, + { + "bbox": [ + 304, + 218, + 495, + 226 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 218, + 495, + 226 + ], + "type": "inline_equation", + "content": "\\overline{r}_i" + }, + { + "bbox": [ + 304, + 218, + 495, + 226 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 304, + 218, + 495, + 226 + ], + "type": "inline_equation", + "content": "|r_i| = |r_i|" + }, + { + "bbox": [ + 304, + 218, + 495, + 226 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 304, + 218, + 495, + 226 + ], + "type": "inline_equation", + "content": "i, j" + }, + { + "bbox": [ + 304, + 218, + 495, + 226 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 304, + 227, + 356, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 227, + 356, + 231 + ], + "spans": [ + { + "bbox": [ + 304, + 227, + 356, + 231 + ], + "type": "text", + "content": "7. Find the common magnitude:" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 304, + 232, + 493, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 232, + 493, + 241 + ], + "spans": [ + { + "bbox": [ + 304, + 232, + 493, + 241 + ], + "type": "text", + "content": "To find " + }, + { + "bbox": [ + 304, + 232, + 493, + 241 + ], + "type": "inline_equation", + "content": "|\\boldsymbol{r}_i|" + }, + { + "bbox": [ + 304, + 232, + 493, + 241 + ], + "type": "text", + "content": ", consider the determinants of the terms on both sides of the equation " + }, + { + "bbox": [ + 304, + 232, + 493, + 241 + ], + "type": "inline_equation", + "content": "x^{10} = -(13x - 1)^{10}" + }, + { + "bbox": [ + 304, + 232, + 493, + 241 + ], + "type": "text", + "content": ". The magnitude of the left side is " + }, + { + "bbox": [ + 304, + 232, + 493, + 241 + ], + "type": "inline_equation", + "content": "|z|^10" + }, + { + "bbox": [ + 304, + 232, + 493, + 241 + ], + "type": "text", + "content": " and the magnitude of the right side is " + }, + { + "bbox": [ + 304, + 232, + 493, + 241 + ], + "type": "inline_equation", + "content": "|13z - 1|^{10}" + }, + { + "bbox": [ + 304, + 232, + 493, + 241 + ], + "type": "text", + "content": ". Therefore:" + } + ] + } + ], + "index": 65 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 384, + 247, + 416, + 254 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 384, + 247, + 416, + 254 + ], + "spans": [ + { + "bbox": [ + 384, + 247, + 416, + 254 + ], + "type": "interline_equation", + "content": "| x | ^ {1 0} = | 1 3 x - 1 | ^ {1 0}.", + "image_path": "9f10fb34be2f1c3eccf6de5c2a2b698ca17ec6df1df591b2e8bee7ea6c659fd4.jpg" + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 306, + 257, + 371, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 257, + 371, + 262 + ], + "spans": [ + { + "bbox": [ + 306, + 257, + 371, + 262 + ], + "type": "text", + "content": "Taking the 10th root of both sides, we get:" + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 387, + 269, + 413, + 274 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 387, + 269, + 413, + 274 + ], + "spans": [ + { + "bbox": [ + 387, + 269, + 413, + 274 + ], + "type": "interline_equation", + "content": "\\left| x \\right| = \\left| 1 3 x - 1 \\right|.", + "image_path": "ba93ff6ca809911552f39438514acafb5a346d56b0ab4f63663e3f4afc26565a.jpg" + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 304, + 277, + 329, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 277, + 329, + 282 + ], + "spans": [ + { + "bbox": [ + 304, + 277, + 329, + 282 + ], + "type": "text", + "content": "8. Solve for " + }, + { + "bbox": [ + 304, + 277, + 329, + 282 + ], + "type": "inline_equation", + "content": "|x|" + }, + { + "bbox": [ + 304, + 277, + 329, + 282 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 304, + 283, + 335, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 283, + 335, + 287 + ], + "spans": [ + { + "bbox": [ + 304, + 283, + 335, + 287 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 304, + 283, + 335, + 287 + ], + "type": "inline_equation", + "content": "|x| = a" + }, + { + "bbox": [ + 304, + 283, + 335, + 287 + ], + "type": "text", + "content": ". Then:" + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 388, + 295, + 412, + 300 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 295, + 412, + 300 + ], + "spans": [ + { + "bbox": [ + 388, + 295, + 412, + 300 + ], + "type": "interline_equation", + "content": "a = | 1 3 x - 1 |.", + "image_path": "14f1085c1bd170ec01d7d192516fafed37c24536c8291207e594c8f5bd827dd5.jpg" + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 304, + 304, + 343, + 322 + ], + "type": "list", + "angle": 0, + "index": 77, + "blocks": [ + { + "bbox": [ + 304, + 304, + 343, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 304, + 343, + 308 + ], + "spans": [ + { + "bbox": [ + 304, + 304, + 343, + 308 + ], + "type": "text", + "content": "This gives us two cases:" + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 304, + 309, + 335, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 309, + 335, + 314 + ], + "spans": [ + { + "bbox": [ + 304, + 309, + 335, + 314 + ], + "type": "text", + "content": "-α=13a-1" + } + ] + } + ], + "index": 74 + }, + { + "bbox": [ + 304, + 314, + 335, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 314, + 335, + 318 + ], + "spans": [ + { + "bbox": [ + 304, + 314, + 335, + 318 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 304, + 314, + 335, + 318 + ], + "type": "inline_equation", + "content": "a = -(13a - 1)" + } + ] + } + ], + "index": 75 + }, + { + "bbox": [ + 304, + 319, + 340, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 319, + 340, + 322 + ], + "spans": [ + { + "bbox": [ + 304, + 319, + 340, + 322 + ], + "type": "text", + "content": "Solving the first case:" + } + ] + } + ], + "index": 76 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 365, + 329, + 434, + 339 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 329, + 434, + 339 + ], + "spans": [ + { + "bbox": [ + 365, + 329, + 434, + 339 + ], + "type": "interline_equation", + "content": "a = 1 3 a - 1 \\Longrightarrow 1 2 a = 1 \\Longrightarrow a = \\frac {1}{1 2}.", + "image_path": "3c25888f2b698f2efd7b4ef136864d3430e01e3a7ee444879308ad71c36e297e.jpg" + } + ] + } + ], + "index": 78 + }, + { + "bbox": [ + 306, + 342, + 345, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 342, + 345, + 346 + ], + "spans": [ + { + "bbox": [ + 306, + 342, + 345, + 346 + ], + "type": "text", + "content": "Solving the second case:" + } + ] + } + ], + "index": 79 + }, + { + "bbox": [ + 365, + 353, + 435, + 363 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 353, + 435, + 363 + ], + "spans": [ + { + "bbox": [ + 365, + 353, + 435, + 363 + ], + "type": "interline_equation", + "content": "a = - 1 3 a + 1 \\Rightarrow 1 4 a = 1 \\Rightarrow a = \\frac {1}{1 4}.", + "image_path": "15bca33a252a7d7747e1b41007c900efc23110c386a855fa01292b21327669cf.jpg" + } + ] + } + ], + "index": 80 + }, + { + "bbox": [ + 304, + 365, + 493, + 384 + ], + "type": "list", + "angle": 0, + "index": 84, + "blocks": [ + { + "bbox": [ + 304, + 365, + 493, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 365, + 493, + 373 + ], + "spans": [ + { + "bbox": [ + 304, + 365, + 493, + 373 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 304, + 365, + 493, + 373 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 304, + 365, + 493, + 373 + ], + "type": "text", + "content": " must be the same for all roots, we need to check which value is consistent. Given the nature of the problem, we can assume " + }, + { + "bbox": [ + 304, + 365, + 493, + 373 + ], + "type": "inline_equation", + "content": "a = \\frac{1}{2}" + }, + { + "bbox": [ + 304, + 365, + 493, + 373 + ], + "type": "text", + "content": " as the correct magnitude." + } + ] + } + ], + "index": 81 + }, + { + "bbox": [ + 304, + 374, + 347, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 374, + 347, + 379 + ], + "spans": [ + { + "bbox": [ + 304, + 374, + 347, + 379 + ], + "type": "text", + "content": "9. Calculate the final sum:" + } + ] + } + ], + "index": 82 + }, + { + "bbox": [ + 304, + 379, + 359, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 379, + 359, + 384 + ], + "spans": [ + { + "bbox": [ + 304, + 379, + 359, + 384 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 304, + 379, + 359, + 384 + ], + "type": "inline_equation", + "content": "|r_i| = \\frac{1}{12}" + }, + { + "bbox": [ + 304, + 379, + 359, + 384 + ], + "type": "text", + "content": " for all i, we have:" + } + ] + } + ], + "index": 83 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 381, + 391, + 419, + 404 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 391, + 419, + 404 + ], + "spans": [ + { + "bbox": [ + 381, + 391, + 419, + 404 + ], + "type": "interline_equation", + "content": "\\frac {1}{| r _ {i} | ^ {2}} = \\frac {1}{(\\mathrm {古}) ^ {2}} = 1 4 4.", + "image_path": "6c039a69d6e8cd9522c4b7613deff397a371db9e812a8d0ec40925ac042bba22.jpg" + } + ] + } + ], + "index": 85 + }, + { + "bbox": [ + 306, + 407, + 340, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 407, + 340, + 411 + ], + "spans": [ + { + "bbox": [ + 306, + 407, + 340, + 411 + ], + "type": "text", + "content": "Therefore, the sum is:" + } + ] + } + ], + "index": 86 + }, + { + "bbox": [ + 361, + 418, + 439, + 424 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 418, + 439, + 424 + ], + "spans": [ + { + "bbox": [ + 361, + 418, + 439, + 424 + ], + "type": "interline_equation", + "content": "1 4 4 + 1 4 4 + 1 4 4 + 1 4 4 + 1 4 4 = 5 \\times 1 4 4 = 7 2 0.", + "image_path": "05170e814c4da8f834eb3b09fcc2a7641bdce982966a108ae9e495f6791f6dd7.jpg" + } + ] + } + ], + "index": 87 + }, + { + "bbox": [ + 304, + 428, + 332, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 428, + 332, + 432 + ], + "spans": [ + { + "bbox": [ + 304, + 428, + 332, + 432 + ], + "type": "text", + "content": "10. Final answer:" + } + ] + } + ], + "index": 88 + }, + { + "bbox": [ + 306, + 432, + 392, + 437 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 432, + 392, + 437 + ], + "spans": [ + { + "bbox": [ + 306, + 432, + 392, + 437 + ], + "type": "interline_equation", + "content": "\\text {The v a l u e o f} \\frac {1}{r _ {1} r _ {1}} + \\frac {1}{r _ {2} r _ {2}} + \\frac {1}{r _ {3} r _ {3}} + \\frac {1}{r _ {4} r _ {4}} + \\frac {1}{r _ {5} r _ {5}} \\text {i s} \\boxed {7 2 0}.", + "image_path": "7241552645a9f715abbfd5856f36fe3e9bf0185fcaa8355ef1915299ed2a840d.jpg" + } + ] + } + ], + "index": 89 + }, + { + "type": "code", + "bbox": [ + 113, + 479, + 479, + 488 + ], + "blocks": [ + { + "bbox": [ + 113, + 479, + 479, + 488 + ], + "lines": [ + { + "bbox": [ + 113, + 479, + 479, + 488 + ], + "spans": [ + { + "bbox": [ + 113, + 479, + 479, + 488 + ], + "type": "text", + "content": "Question: The equation " + }, + { + "bbox": [ + 113, + 479, + 479, + 488 + ], + "type": "inline_equation", + "content": "x^{10} + (13x - 1)^{10} = 0" + }, + { + "bbox": [ + 113, + 479, + 479, + 488 + ], + "type": "text", + "content": " has 10 complex roots " + }, + { + "bbox": [ + 113, + 479, + 479, + 488 + ], + "type": "inline_equation", + "content": "r_1, \\overline{r}_1, r_2, \\overline{r}_2, r_3, \\overline{r}_3, r_4, \\overline{r}_4, r_5, \\overline{r}_5" + }, + { + "bbox": [ + 113, + 479, + 479, + 488 + ], + "type": "text", + "content": ", where the bar denotes complex conjugation. Find the value of " + }, + { + "bbox": [ + 113, + 479, + 479, + 488 + ], + "type": "inline_equation", + "content": "\\frac{1}{r_1r_4} + \\frac{1}{r_2r_2} + \\frac{1}{r_3r_3} + \\frac{1}{r_4r_4} + \\frac{1}{r_5r_5}" + }, + { + "bbox": [ + 113, + 479, + 479, + 488 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 91, + "angle": 0, + "type": "code_body" + } + ], + "index": 91, + "sub_type": "code", + "guess_lang": "latex" + }, + { + "bbox": [ + 114, + 490, + 171, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 490, + 171, + 494 + ], + "spans": [ + { + "bbox": [ + 114, + 490, + 171, + 494 + ], + "type": "text", + "content": "High-Level Agent's Output" + } + ] + } + ], + "index": 92 + }, + { + "bbox": [ + 217, + 490, + 283, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 490, + 283, + 495 + ], + "spans": [ + { + "bbox": [ + 217, + 490, + 283, + 495 + ], + "type": "text", + "content": "Low-Level Agent's Output (part I)" + } + ] + } + ], + "index": 93 + }, + { + "bbox": [ + 108, + 495, + 178, + 665 + ], + "type": "list", + "angle": 0, + "index": 115, + "blocks": [ + { + "bbox": [ + 108, + 495, + 171, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 495, + 171, + 506 + ], + "spans": [ + { + "bbox": [ + 108, + 495, + 171, + 506 + ], + "type": "text", + "content": "To solve this problem, we need to follow these steps:" + } + ] + } + ], + "index": 94 + }, + { + "bbox": [ + 108, + 509, + 171, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 509, + 171, + 518 + ], + "spans": [ + { + "bbox": [ + 108, + 509, + 171, + 518 + ], + "type": "text", + "content": "1. Understand the given equation: The equation is " + }, + { + "bbox": [ + 108, + 509, + 171, + 518 + ], + "type": "inline_equation", + "content": "x^{10} - (13x - 1)^{10} = 0" + }, + { + "bbox": [ + 108, + 509, + 171, + 518 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 95 + }, + { + "bbox": [ + 108, + 521, + 166, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 521, + 166, + 526 + ], + "spans": [ + { + "bbox": [ + 108, + 521, + 166, + 526 + ], + "type": "text", + "content": "2. Identify the roots: The roots of the" + } + ] + } + ], + "index": 96 + }, + { + "bbox": [ + 108, + 526, + 142, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 526, + 142, + 530 + ], + "spans": [ + { + "bbox": [ + 108, + 526, + 142, + 530 + ], + "type": "text", + "content": "equation are given as" + } + ] + } + ], + "index": 97 + }, + { + "bbox": [ + 108, + 531, + 176, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 531, + 176, + 541 + ], + "spans": [ + { + "bbox": [ + 108, + 531, + 176, + 541 + ], + "type": "inline_equation", + "content": "\\overline{r_1},\\overline{r_2},\\overline{r_3},\\overline{r_4},\\overline{r_5},\\overline{r_6},\\overline{r_7},\\overline{r_8}" + }, + { + "bbox": [ + 108, + 531, + 176, + 541 + ], + "type": "text", + "content": " where the bar denotes complex conjugation." + } + ] + } + ], + "index": 98 + }, + { + "bbox": [ + 108, + 545, + 176, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 545, + 176, + 550 + ], + "spans": [ + { + "bbox": [ + 108, + 545, + 176, + 550 + ], + "type": "text", + "content": "3. Use properties of complex conjugates:" + } + ] + } + ], + "index": 99 + }, + { + "bbox": [ + 108, + 551, + 177, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 551, + 177, + 563 + ], + "spans": [ + { + "bbox": [ + 108, + 551, + 177, + 563 + ], + "type": "text", + "content": "For any complex number " + }, + { + "bbox": [ + 108, + 551, + 177, + 563 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 108, + 551, + 177, + 563 + ], + "type": "text", + "content": ", the product " + }, + { + "bbox": [ + 108, + 551, + 177, + 563 + ], + "type": "inline_equation", + "content": "z \\cdot z" + }, + { + "bbox": [ + 108, + 551, + 177, + 563 + ], + "type": "text", + "content": " is a complex function of the modulus of " + }, + { + "bbox": [ + 108, + 551, + 177, + 563 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 108, + 551, + 177, + 563 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 108, + 551, + 177, + 563 + ], + "type": "inline_equation", + "content": "|z \\cdot \\overline{z}| = |z|^2" + }, + { + "bbox": [ + 108, + 551, + 177, + 563 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 100 + }, + { + "bbox": [ + 108, + 568, + 177, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 568, + 177, + 573 + ], + "spans": [ + { + "bbox": [ + 108, + 568, + 177, + 573 + ], + "type": "text", + "content": "4. Simplify the expression: We need to find" + } + ] + } + ], + "index": 101 + }, + { + "bbox": [ + 108, + 574, + 162, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 574, + 162, + 578 + ], + "spans": [ + { + "bbox": [ + 108, + 574, + 162, + 578 + ], + "type": "text", + "content": "the value of 1" + } + ] + } + ], + "index": 102 + }, + { + "bbox": [ + 108, + 578, + 162, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 578, + 162, + 582 + ], + "spans": [ + { + "bbox": [ + 108, + 578, + 162, + 582 + ], + "type": "text", + "content": "r1+ r2r3+ r4" + } + ] + } + ], + "index": 103 + }, + { + "bbox": [ + 108, + 584, + 162, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 584, + 162, + 588 + ], + "spans": [ + { + "bbox": [ + 108, + 584, + 162, + 588 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 104 + }, + { + "bbox": [ + 108, + 590, + 173, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 590, + 173, + 594 + ], + "spans": [ + { + "bbox": [ + 108, + 590, + 173, + 594 + ], + "type": "text", + "content": "Apply an property of complex conjugates. Since " + }, + { + "bbox": [ + 108, + 590, + 173, + 594 + ], + "type": "inline_equation", + "content": "\\pi_1 \\cdot \\overline{\\pi}_1 = |\\pi_1|^2" + }, + { + "bbox": [ + 108, + 590, + 173, + 594 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 105 + }, + { + "bbox": [ + 108, + 594, + 126, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 594, + 126, + 598 + ], + "spans": [ + { + "bbox": [ + 108, + 594, + 126, + 598 + ], + "type": "text", + "content": "1+" + } + ] + } + ], + "index": 106 + }, + { + "bbox": [ + 108, + 600, + 126, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 600, + 126, + 604 + ], + "spans": [ + { + "bbox": [ + 108, + 600, + 126, + 604 + ], + "type": "text", + "content": "A" + } + ] + } + ], + "index": 107 + }, + { + "bbox": [ + 108, + 605, + 171, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 605, + 171, + 609 + ], + "spans": [ + { + "bbox": [ + 108, + 605, + 171, + 609 + ], + "type": "text", + "content": "6.Sum the reciprocals: Sum the values" + } + ] + } + ], + "index": 108 + }, + { + "bbox": [ + 108, + 610, + 162, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 610, + 162, + 615 + ], + "spans": [ + { + "bbox": [ + 108, + 610, + 162, + 615 + ], + "type": "text", + "content": "1+1 1 1" + } + ] + } + ], + "index": 109 + }, + { + "bbox": [ + 108, + 620, + 178, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 620, + 178, + 639 + ], + "spans": [ + { + "bbox": [ + 108, + 620, + 178, + 639 + ], + "type": "text", + "content": "7. Use Vieta's formulas: To find the sum of the reciprocals of the moduli squared, we can use the fact that the sum of the roots of the polynomial and their properties." + } + ] + } + ], + "index": 110 + }, + { + "bbox": [ + 108, + 643, + 172, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 643, + 172, + 647 + ], + "spans": [ + { + "bbox": [ + 108, + 643, + 172, + 647 + ], + "type": "text", + "content": "8. Finalize the solution: Combine all the" + } + ] + } + ], + "index": 111 + }, + { + "bbox": [ + 108, + 647, + 151, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 647, + 151, + 651 + ], + "spans": [ + { + "bbox": [ + 108, + 647, + 151, + 651 + ], + "type": "text", + "content": "steps to find the final value" + } + ] + } + ], + "index": 112 + }, + { + "bbox": [ + 108, + 655, + 175, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 655, + 175, + 659 + ], + "spans": [ + { + "bbox": [ + 108, + 655, + 175, + 659 + ], + "type": "text", + "content": "Now, let's guide the low-level math solving" + } + ] + } + ], + "index": 113 + }, + { + "bbox": [ + 108, + 660, + 157, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 660, + 157, + 665 + ], + "spans": [ + { + "bbox": [ + 108, + 660, + 157, + 665 + ], + "type": "text", + "content": "agent to implement these steps." + } + ] + } + ], + "index": 114 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 182, + 495, + 316, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 495, + 316, + 498 + ], + "spans": [ + { + "bbox": [ + 182, + 495, + 316, + 498 + ], + "type": "text", + "content": "To solve the given problem, we start by analyzing the equation " + }, + { + "bbox": [ + 182, + 495, + 316, + 498 + ], + "type": "inline_equation", + "content": "x^{10} + (13x - 1)^{10} = 0" + }, + { + "bbox": [ + 182, + 495, + 316, + 498 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 116 + }, + { + "bbox": [ + 182, + 499, + 219, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 499, + 219, + 503 + ], + "spans": [ + { + "bbox": [ + 182, + 499, + 219, + 503 + ], + "type": "text", + "content": "This can be rewritten as:" + } + ] + } + ], + "index": 117 + }, + { + "bbox": [ + 234, + 507, + 268, + 511 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 234, + 507, + 268, + 511 + ], + "spans": [ + { + "bbox": [ + 234, + 507, + 268, + 511 + ], + "type": "interline_equation", + "content": "x ^ {1 0} = - (1 3 x - 1) ^ {1 0}.", + "image_path": "f5bf38bd49abe8bfdb677b4b0f81847c247b0abe866c361f4b13aeab02a794fc.jpg" + } + ] + } + ], + "index": 118 + }, + { + "bbox": [ + 182, + 515, + 247, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 515, + 247, + 520 + ], + "spans": [ + { + "bbox": [ + 182, + 515, + 247, + 520 + ], + "type": "text", + "content": "Taking the 10th root on both sides, we get:" + } + ] + } + ], + "index": 119 + }, + { + "bbox": [ + 220, + 524, + 264, + 528 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 524, + 264, + 528 + ], + "spans": [ + { + "bbox": [ + 220, + 524, + 264, + 528 + ], + "type": "interline_equation", + "content": "x = \\omega (1 3 x - 1),", + "image_path": "50ccdd806d3c2fbb81d2905bb78d2552273c33edff77da4aad98cefd9ca79a0c.jpg" + } + ] + } + ], + "index": 120 + }, + { + "bbox": [ + 182, + 532, + 310, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 532, + 310, + 541 + ], + "spans": [ + { + "bbox": [ + 182, + 532, + 310, + 541 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 182, + 532, + 310, + 541 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 182, + 532, + 310, + 541 + ], + "type": "text", + "content": " is a 10th root of " + }, + { + "bbox": [ + 182, + 532, + 310, + 541 + ], + "type": "inline_equation", + "content": "-1" + }, + { + "bbox": [ + 182, + 532, + 310, + 541 + ], + "type": "text", + "content": ". The 10th roots of " + }, + { + "bbox": [ + 182, + 532, + 310, + 541 + ], + "type": "inline_equation", + "content": "-1" + }, + { + "bbox": [ + 182, + 532, + 310, + 541 + ], + "type": "text", + "content": " are given by " + }, + { + "bbox": [ + 182, + 532, + 310, + 541 + ], + "type": "inline_equation", + "content": "\\omega = e^{\\pi (2k + 1) / 10}" + }, + { + "bbox": [ + 182, + 532, + 310, + 541 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 182, + 532, + 310, + 541 + ], + "type": "inline_equation", + "content": "k = 0, 1, 2, \\ldots, 9" + }, + { + "bbox": [ + 182, + 532, + 310, + 541 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 121 + }, + { + "bbox": [ + 182, + 545, + 233, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 545, + 233, + 550 + ], + "spans": [ + { + "bbox": [ + 182, + 545, + 233, + 550 + ], + "type": "text", + "content": "Rewriting the equation, we have:" + } + ] + } + ], + "index": 122 + }, + { + "bbox": [ + 183, + 553, + 317, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 183, + 553, + 317, + 563 + ], + "spans": [ + { + "bbox": [ + 183, + 553, + 317, + 563 + ], + "type": "inline_equation", + "content": "x = \\omega (13x - 1)\\Rightarrow x - 13\\omega x = -\\omega \\Rightarrow x(1 - 13\\omega) = -\\omega \\Rightarrow x = \\frac{-\\omega}{1 - 13\\omega}" + } + ] + } + ], + "index": 123 + }, + { + "bbox": [ + 182, + 564, + 235, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 564, + 235, + 568 + ], + "spans": [ + { + "bbox": [ + 182, + 564, + 235, + 568 + ], + "type": "text", + "content": "Thus, the roots of the equation are:" + } + ] + } + ], + "index": 124 + }, + { + "bbox": [ + 183, + 574, + 264, + 582 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 183, + 574, + 264, + 582 + ], + "spans": [ + { + "bbox": [ + 183, + 574, + 264, + 582 + ], + "type": "interline_equation", + "content": "r _ {k} = \\frac {- \\omega_ {k}}{1 - 1 3 \\omega_ {k}}", + "image_path": "528333c7c549e5b59e02481e7570b1ab4ebdcf2fe735070193c07c2a4fc150ac.jpg" + } + ] + } + ], + "index": 125 + }, + { + "bbox": [ + 182, + 585, + 241, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 585, + 241, + 589 + ], + "spans": [ + { + "bbox": [ + 182, + 585, + 241, + 589 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 182, + 585, + 241, + 589 + ], + "type": "inline_equation", + "content": "\\omega_{k} = e^{i\\pi (2k + 1) / 10}" + }, + { + "bbox": [ + 182, + 585, + 241, + 589 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 182, + 585, + 241, + 589 + ], + "type": "inline_equation", + "content": "k = 0,1,2" + } + ] + } + ], + "index": 126 + }, + { + "bbox": [ + 182, + 594, + 315, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 594, + 315, + 603 + ], + "spans": [ + { + "bbox": [ + 182, + 594, + 315, + 603 + ], + "type": "text", + "content": "The roots come in conjugate pairs, so the roots are " + }, + { + "bbox": [ + 182, + 594, + 315, + 603 + ], + "type": "inline_equation", + "content": "r_1, \\overline{r_1}, r_2, \\overline{r_2}, r_3, \\overline{r_3}, r_4, \\overline{r_4}, r_5, \\overline{r_5}" + }, + { + "bbox": [ + 182, + 594, + 315, + 603 + ], + "type": "text", + "content": ". We need to find the value of:" + } + ] + } + ], + "index": 127 + }, + { + "bbox": [ + 219, + 605, + 282, + 616 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 605, + 282, + 616 + ], + "spans": [ + { + "bbox": [ + 219, + 605, + 282, + 616 + ], + "type": "interline_equation", + "content": "\\frac {1}{r _ {1} \\overline {{r _ {1}}}} + \\frac {1}{r _ {2} \\overline {{r _ {2}}}} + \\frac {1}{r _ {3} \\overline {{r _ {3}}}} + \\frac {1}{r _ {4} \\overline {{r _ {4}}}} + \\frac {1}{r _ {5} \\overline {{r _ {5}}}}", + "image_path": "c6e921c75e514affe23ba47738a4334e7ae564f4ed6caa716d917d2741943324.jpg" + } + ] + } + ], + "index": 128 + }, + { + "bbox": [ + 182, + 618, + 227, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 618, + 227, + 623 + ], + "spans": [ + { + "bbox": [ + 182, + 618, + 227, + 623 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 182, + 618, + 227, + 623 + ], + "type": "inline_equation", + "content": "r_k\\overline{r_k} = |r_k|^2" + }, + { + "bbox": [ + 182, + 618, + 227, + 623 + ], + "type": "text", + "content": " we have:" + } + ] + } + ], + "index": 129 + }, + { + "bbox": [ + 239, + 626, + 263, + 637 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 626, + 263, + 637 + ], + "spans": [ + { + "bbox": [ + 239, + 626, + 263, + 637 + ], + "type": "interline_equation", + "content": "\\frac {1}{r _ {k} r _ {k}} = \\frac {1}{| r _ {k} | ^ {2}}", + "image_path": "4c2a5c3a0dfb0eb00cd8928e39973a9df874ba2cf7234966644df2fb2cd4cdb1.jpg" + } + ] + } + ], + "index": 130 + }, + { + "bbox": [ + 182, + 640, + 231, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 640, + 231, + 644 + ], + "spans": [ + { + "bbox": [ + 182, + 640, + 231, + 644 + ], + "type": "text", + "content": "We need to find " + }, + { + "bbox": [ + 182, + 640, + 231, + 644 + ], + "type": "inline_equation", + "content": "|r_k|^2" + }, + { + "bbox": [ + 182, + 640, + 231, + 644 + ], + "type": "text", + "content": ". Note that:" + } + ] + } + ], + "index": 131 + }, + { + "bbox": [ + 237, + 648, + 263, + 657 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 648, + 263, + 657 + ], + "spans": [ + { + "bbox": [ + 237, + 648, + 263, + 657 + ], + "type": "interline_equation", + "content": "r _ {k} = \\frac {- \\omega_ {k}}{1 - 1 3 \\omega_ {k}}", + "image_path": "938abda3f283b1d544ed098377c88573b548599d2d006a47e793f5c697c16537.jpg" + } + ] + } + ], + "index": 132 + }, + { + "bbox": [ + 377, + 491, + 446, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 377, + 491, + 446, + 495 + ], + "spans": [ + { + "bbox": [ + 377, + 491, + 446, + 495 + ], + "type": "text", + "content": "Low-Level Agent's Output (part II)" + } + ] + } + ], + "index": 133 + }, + { + "bbox": [ + 323, + 498, + 359, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 498, + 359, + 503 + ], + "spans": [ + { + "bbox": [ + 323, + 498, + 359, + 503 + ], + "type": "text", + "content": "The magnitude of " + }, + { + "bbox": [ + 323, + 498, + 359, + 503 + ], + "type": "inline_equation", + "content": "r_k" + }, + { + "bbox": [ + 323, + 498, + 359, + 503 + ], + "type": "text", + "content": " is:" + } + ] + } + ], + "index": 134 + }, + { + "bbox": [ + 372, + 506, + 449, + 517 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 506, + 449, + 517 + ], + "spans": [ + { + "bbox": [ + 372, + 506, + 449, + 517 + ], + "type": "interline_equation", + "content": "| r _ {k} | = \\left| \\frac {- \\omega_ {k}}{1 - 1 3 \\omega_ {k}} \\right| = \\left| \\frac {| \\omega_ {k} |}{1 - 1 3 \\omega_ {k}} \\right| = \\frac {1}{1 - 1 3 \\omega_ {k}}", + "image_path": "4d01cf958b1cef0ac5f73fe67da58dfb9f7784a3720c1d5bf7c308c749b09a32.jpg" + } + ] + } + ], + "index": 135 + }, + { + "bbox": [ + 323, + 520, + 332, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 520, + 332, + 524 + ], + "spans": [ + { + "bbox": [ + 323, + 520, + 332, + 524 + ], + "type": "text", + "content": "Thus," + } + ] + } + ], + "index": 136 + }, + { + "bbox": [ + 394, + 527, + 428, + 537 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 394, + 527, + 428, + 537 + ], + "spans": [ + { + "bbox": [ + 394, + 527, + 428, + 537 + ], + "type": "interline_equation", + "content": "| r _ {k} | ^ {2} = \\frac {1}{| 1 - 1 3 \\omega_ {k} | ^ {2}}", + "image_path": "a64ee91f6fec3fd7b197bb471e73dc04d0d0d12e6b2af7b48fca4a932ea681cb.jpg" + } + ] + } + ], + "index": 137 + }, + { + "bbox": [ + 323, + 540, + 339, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 540, + 339, + 544 + ], + "spans": [ + { + "bbox": [ + 323, + 540, + 339, + 544 + ], + "type": "text", + "content": "Therefore," + } + ] + } + ], + "index": 138 + }, + { + "bbox": [ + 394, + 548, + 429, + 558 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 394, + 548, + 429, + 558 + ], + "spans": [ + { + "bbox": [ + 394, + 548, + 429, + 558 + ], + "type": "interline_equation", + "content": "\\frac {1}{r _ {2} r _ {1}} = | 1 - 1 3 \\omega_ {k} | ^ {2}.", + "image_path": "3fd02be075315fbd643707dc3d3d5497904efd1829a267bf356bdc98e9fbe27f.jpg" + } + ] + } + ], + "index": 139 + }, + { + "bbox": [ + 323, + 562, + 392, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 562, + 392, + 566 + ], + "spans": [ + { + "bbox": [ + 323, + 562, + 392, + 566 + ], + "type": "text", + "content": "We need to sum this over the 5 pairs of roots:" + } + ] + } + ], + "index": 140 + }, + { + "bbox": [ + 398, + 569, + 424, + 580 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 398, + 569, + 424, + 580 + ], + "spans": [ + { + "bbox": [ + 398, + 569, + 424, + 580 + ], + "type": "interline_equation", + "content": "\\sum_ {k = 0} ^ {4} | 1 - 1 3 \\omega_ {k} | ^ {2}", + "image_path": "80fb1f97ec83f17fbd8b560be77fe8de42b9125cf2446d0cdbeb37b3b7666a68.jpg" + } + ] + } + ], + "index": 141 + }, + { + "bbox": [ + 323, + 585, + 373, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 585, + 373, + 589 + ], + "spans": [ + { + "bbox": [ + 323, + 585, + 373, + 589 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 323, + 585, + 373, + 589 + ], + "type": "inline_equation", + "content": "\\omega_{k} = e^{i\\pi (2k + 1) / 10}" + }, + { + "bbox": [ + 323, + 585, + 373, + 589 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 142 + }, + { + "bbox": [ + 323, + 592, + 497, + 605 + ], + "type": "list", + "angle": 0, + "index": 145, + "blocks": [ + { + "bbox": [ + 324, + 592, + 497, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 592, + 497, + 597 + ], + "spans": [ + { + "bbox": [ + 324, + 592, + 497, + 597 + ], + "type": "inline_equation", + "content": "|1 - 13\\omega_{k}|^{2} = (1 - 13\\omega_{k})(1 - 13\\overline{\\omega_{k}}) = (1 - 13\\omega_{k})(1 - 13e^{-i\\pi (2k + 1) / 10}) = 1 - 13\\omega_{k} - 13e^{-i\\pi (2k + 1) / 10} + 169." + } + ] + } + ], + "index": 143 + }, + { + "bbox": [ + 323, + 600, + 361, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 600, + 361, + 605 + ], + "spans": [ + { + "bbox": [ + 323, + 600, + 361, + 605 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 323, + 600, + 361, + 605 + ], + "type": "inline_equation", + "content": "\\omega_{k}\\overline{\\omega_{k}} = 1" + }, + { + "bbox": [ + 323, + 600, + 361, + 605 + ], + "type": "text", + "content": " we get:" + } + ] + } + ], + "index": 144 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 323, + 609, + 500, + 619 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 609, + 500, + 619 + ], + "spans": [ + { + "bbox": [ + 323, + 609, + 500, + 619 + ], + "type": "interline_equation", + "content": "\\left| 1 - 1 3 \\omega_ {k} \\right| ^ {2} = 1 6 9 - 1 3 (\\omega_ {k} + e ^ {- i \\pi (2 k + 1) \\cdot 1 0}) + 1 = 1 7 0 - 1 3 \\cdot 2 \\cos \\left(\\frac {\\pi (2 k + 1)}{1 0}\\right) = 1 7 0 - 2 6 \\cos \\left(\\frac {\\pi (2 k + 1)}{1 0}\\right)", + "image_path": "d51f2e94513b319fbfd3670c878bfba4fad1b14bbbb2d664cf5d96071695f0f5.jpg" + } + ] + } + ], + "index": 146 + }, + { + "bbox": [ + 323, + 622, + 383, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 622, + 383, + 627 + ], + "spans": [ + { + "bbox": [ + 323, + 622, + 383, + 627 + ], + "type": "text", + "content": "Summing over " + }, + { + "bbox": [ + 323, + 622, + 383, + 627 + ], + "type": "inline_equation", + "content": "k = 0,1,2,3,4" + }, + { + "bbox": [ + 323, + 622, + 383, + 627 + ], + "type": "text", + "content": " we get:" + } + ] + } + ], + "index": 147 + }, + { + "bbox": [ + 335, + 630, + 487, + 642 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 630, + 487, + 642 + ], + "spans": [ + { + "bbox": [ + 335, + 630, + 487, + 642 + ], + "type": "interline_equation", + "content": "\\sum_ {k = 0} ^ {n - 1} \\left(1 7 0 - 2 6 \\cos \\left(\\frac {\\pi (2 k + 1)}{1 0}\\right)\\right) = 5 \\cdot 1 7 0 - 2 6 \\sum_ {k = 0} ^ {n - 1} \\cos \\left(\\frac {\\pi (2 k + 1)}{1 0}\\right) = 8 5 0 - 2 6 \\cdot 0 = 8 5 0.", + "image_path": "8748dabc4481672874c3f2795f0d6264a961d9e141b9cdee0eb288d7db2dc87c.jpg" + } + ] + } + ], + "index": 148 + }, + { + "bbox": [ + 323, + 646, + 351, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 646, + 351, + 650 + ], + "spans": [ + { + "bbox": [ + 323, + 646, + 351, + 650 + ], + "type": "text", + "content": "Thus, the value is:" + } + ] + } + ], + "index": 149 + }, + { + "type": "image", + "bbox": [ + 406, + 653, + 416, + 658 + ], + "blocks": [ + { + "bbox": [ + 174, + 451, + 436, + 463 + ], + "lines": [ + { + "bbox": [ + 174, + 451, + 436, + 463 + ], + "spans": [ + { + "bbox": [ + 174, + 451, + 436, + 463 + ], + "type": "text", + "content": "Figure 10: Case Study for consistency reward of high-level agent" + } + ] + } + ], + "index": 90, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 406, + 653, + 416, + 658 + ], + "lines": [ + { + "bbox": [ + 406, + 653, + 416, + 658 + ], + "spans": [ + { + "bbox": [ + 406, + 653, + 416, + 658 + ], + "type": "image", + "image_path": "6cce98a8906c58ccf3aa6cedf13fe8485572fcdb57dd27e9b6cd696eb48335fd.jpg" + } + ] + } + ], + "index": 150, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 164, + 674, + 446, + 686 + ], + "lines": [ + { + "bbox": [ + 164, + 674, + 446, + 686 + ], + "spans": [ + { + "bbox": [ + 164, + 674, + 446, + 686 + ], + "type": "text", + "content": "Figure 11: Case Study for base correctness reward of high-level agent" + } + ] + } + ], + "index": 151, + "angle": 0, + "type": "image_caption" + } + ], + "index": 150 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 152 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 148, + 77, + 462, + 87 + ], + "blocks": [ + { + "bbox": [ + 148, + 77, + 462, + 87 + ], + "lines": [ + { + "bbox": [ + 148, + 77, + 462, + 87 + ], + "spans": [ + { + "bbox": [ + 148, + 77, + 462, + 87 + ], + "type": "text", + "content": "Question: The product of " + }, + { + "bbox": [ + 148, + 77, + 462, + 87 + ], + "type": "inline_equation", + "content": "3t^{2} + 5t + a" + }, + { + "bbox": [ + 148, + 77, + 462, + 87 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 148, + 77, + 462, + 87 + ], + "type": "inline_equation", + "content": "4t^{2} + bt - 2" + }, + { + "bbox": [ + 148, + 77, + 462, + 87 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 148, + 77, + 462, + 87 + ], + "type": "inline_equation", + "content": "12t^{4} + 26t^{3} - 8t^{2} - 16t + 6" + }, + { + "bbox": [ + 148, + 77, + 462, + 87 + ], + "type": "text", + "content": ". What is " + }, + { + "bbox": [ + 148, + 77, + 462, + 87 + ], + "type": "inline_equation", + "content": "a + b" + }, + { + "bbox": [ + 148, + 77, + 462, + 87 + ], + "type": "text", + "content": "?" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_body" + }, + { + "bbox": [ + 309, + 93, + 421, + 102 + ], + "lines": [ + { + "bbox": [ + 309, + 93, + 421, + 102 + ], + "spans": [ + { + "bbox": [ + 309, + 93, + 421, + 102 + ], + "type": "text", + "content": "Llama3.2-1B-Instruct training Output:" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_caption" + } + ], + "index": 0, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 108, + 106, + 304, + 144 + ], + "blocks": [ + { + "bbox": [ + 108, + 93, + 219, + 102 + ], + "lines": [ + { + "bbox": [ + 108, + 93, + 219, + 102 + ], + "spans": [ + { + "bbox": [ + 108, + 93, + 219, + 102 + ], + "type": "text", + "content": "Llama3.1-8B-Instruct training Output:" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 108, + 106, + 304, + 144 + ], + "lines": [ + { + "bbox": [ + 108, + 106, + 304, + 144 + ], + "spans": [ + { + "bbox": [ + 108, + 106, + 304, + 144 + ], + "type": "text", + "content": "{\n \"action\": \"DECOMPOSE\",\n \"output\": \"Decompose the problem into finding the coefficients of the product of two polynomials and then solving for a and b.\"\n}" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "json" + }, + { + "type": "code", + "bbox": [ + 311, + 106, + 503, + 145 + ], + "blocks": [ + { + "bbox": [ + 311, + 106, + 503, + 145 + ], + "lines": [ + { + "bbox": [ + 311, + 106, + 503, + 145 + ], + "spans": [ + { + "bbox": [ + 311, + 106, + 503, + 145 + ], + "type": "text", + "content": "{ \"action\": \"EMPTY\", \"output\".\"\" }" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 108, + 150, + 294, + 369 + ], + "blocks": [ + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "lines": [ + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "spans": [ + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": "Step 1: To find the product of the two polynomials " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "3t^{2} + 5t + a" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "4t^{2} + bt - 2" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ", we need to multiply each term of the first polynomial by each term of the second polynomial and then combine like terms. \nStep 2: The product of the two polynomials can be written as " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "(3t^{2} + 5t + a)(4t^{2} + bt - 2)" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ". \nStep 3: Using the distributive property, we multiply each term in the first polynomial by each term in the second polynomial: " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "(3t^{2})(4t^{2}) + (3t^{2})(bt) + (3t^{2})(-2) + (5t)(4t^{2}) + (5t)(bt) + (5t)(-2) + (a)(4t^{2}) + (a)(bt) + (a)(-2)" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ". \nStep 4: Simplifying the products, we get " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "12t^{4} + 3b^{3} - 6t^{2} + 20t^{3} + 5bt^{2} - 10t + 4at^{2} + abt - 2a" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ". \nStep 5: Combining like terms, we have " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "12t^{4} + (3b + 20)t^{3} + (5b - 6 + 4a)t^{2} + (ab - 10)t - 2a" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ". \nStep 6: We are given that the product of the two polynomials is " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "12t^{4} + 26t^{3} - 8t^{2} - 16t + 6" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ". \nWe can equate the coefficients of the corresponding powers of " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": " in the two expressions. \nStep 7: Equating the coefficients of " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "t^3" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ", we get " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "3b + 20 = 26" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ". \nStep 8: Equating the coefficients of " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "t^2" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ", we get " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "5b - 6 + 4a = -8" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ". \nStep 9: Equating the constant terms, we get " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "-2a = 6" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ". \nStep 10: Solving the equation " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "-2a = 6" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ", we find that " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "a = -3" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ". \nStep 11: Substituting " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "a = -3" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": " into the equation " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "5b - 6 + 4a = -8" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ", we get " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "5b - 6 + 4(-3) = -8" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ", which simplifies to " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "5b - 18 = -8" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ". \nStep 12: Solving for " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ", we find that " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "5b = 10" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ", so " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "b = 2" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ". \nStep 13: Now that we have found the values of " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ", we can calculate their sum: " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "a + b = -3 + 2 = -1" + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "text", + "content": ". \nThe final answer is: " + }, + { + "bbox": [ + 108, + 150, + 294, + 369 + ], + "type": "inline_equation", + "content": "\\boxed{-1}" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 310, + 154, + 496, + 177 + ], + "blocks": [ + { + "bbox": [ + 310, + 154, + 496, + 177 + ], + "lines": [ + { + "bbox": [ + 310, + 154, + 496, + 177 + ], + "spans": [ + { + "bbox": [ + 310, + 154, + 496, + 177 + ], + "type": "text", + "content": "Step 1: The problem asks us to find the values of " + }, + { + "bbox": [ + 310, + 154, + 496, + 177 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 310, + 154, + 496, + 177 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 310, + 154, + 496, + 177 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 310, + 154, + 496, + 177 + ], + "type": "text", + "content": " in the quadratic expressions " + }, + { + "bbox": [ + 310, + 154, + 496, + 177 + ], + "type": "inline_equation", + "content": "3t^2 + 5t + a" + }, + { + "bbox": [ + 310, + 154, + 496, + 177 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 310, + 154, + 496, + 177 + ], + "type": "inline_equation", + "content": "4t^2 + bt - 2" + }, + { + "bbox": [ + 310, + 154, + 496, + 177 + ], + "type": "text", + "content": ", respectively, given that their product is " + }, + { + "bbox": [ + 310, + 154, + 496, + 177 + ], + "type": "inline_equation", + "content": "12t^4 + 26t^3 - 8t^2 - 16t + 6" + }, + { + "bbox": [ + 310, + 154, + 496, + 177 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 310, + 184, + 501, + 207 + ], + "blocks": [ + { + "bbox": [ + 310, + 184, + 501, + 207 + ], + "lines": [ + { + "bbox": [ + 310, + 184, + 501, + 207 + ], + "spans": [ + { + "bbox": [ + 310, + 184, + 501, + 207 + ], + "type": "text", + "content": "Step 2: To solve this, we need to multiply the two quadratic expressions and equate the resulting expression to the given product " + }, + { + "bbox": [ + 310, + 184, + 501, + 207 + ], + "type": "inline_equation", + "content": "12t^4 + 26t^3 - 8t^2 - 16t + 6" + }, + { + "bbox": [ + 310, + 184, + 501, + 207 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_body" + } + ], + "index": 7, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 310, + 214, + 466, + 230 + ], + "blocks": [ + { + "bbox": [ + 310, + 214, + 466, + 230 + ], + "lines": [ + { + "bbox": [ + 310, + 214, + 466, + 230 + ], + "spans": [ + { + "bbox": [ + 310, + 214, + 466, + 230 + ], + "type": "text", + "content": "Step 3: We start by multiplying the two quadratic expressions: " + }, + { + "bbox": [ + 310, + 214, + 466, + 230 + ], + "type": "inline_equation", + "content": "(3t^{2} + 5t + a)(4t^{2} + bt - 2)" + }, + { + "bbox": [ + 310, + 214, + 466, + 230 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_body" + } + ], + "index": 8, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 310, + 237, + 477, + 252 + ], + "blocks": [ + { + "bbox": [ + 310, + 237, + 477, + 252 + ], + "lines": [ + { + "bbox": [ + 310, + 237, + 477, + 252 + ], + "spans": [ + { + "bbox": [ + 310, + 237, + 477, + 252 + ], + "type": "text", + "content": "Step 4: This gives us " + }, + { + "bbox": [ + 310, + 237, + 477, + 252 + ], + "type": "inline_equation", + "content": "12t^4 + (3b + 5)t^3 + (4a + 5b - 6)t^2 + (-2b + 10t + 2a)t - 2a." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_body" + } + ], + "index": 9, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 310, + 258, + 499, + 274 + ], + "blocks": [ + { + "bbox": [ + 310, + 258, + 499, + 274 + ], + "lines": [ + { + "bbox": [ + 310, + 258, + 499, + 274 + ], + "spans": [ + { + "bbox": [ + 310, + 258, + 499, + 274 + ], + "type": "text", + "content": "Step 5: We know that the coefficients of the resulting expression must be equal to the coefficients of the given product " + }, + { + "bbox": [ + 310, + 258, + 499, + 274 + ], + "type": "inline_equation", + "content": "12t^4 + 26t^3 - 8t^2 - 16t + 6" + }, + { + "bbox": [ + 310, + 258, + 499, + 274 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "code_body" + } + ], + "index": 10, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 310, + 281, + 495, + 296 + ], + "blocks": [ + { + "bbox": [ + 310, + 281, + 495, + 296 + ], + "lines": [ + { + "bbox": [ + 310, + 281, + 495, + 296 + ], + "spans": [ + { + "bbox": [ + 310, + 281, + 495, + 296 + ], + "type": "text", + "content": "Step 6: Equating the coefficients of " + }, + { + "bbox": [ + 310, + 281, + 495, + 296 + ], + "type": "inline_equation", + "content": "t^3" + }, + { + "bbox": [ + 310, + 281, + 495, + 296 + ], + "type": "text", + "content": " gives us the equation " + }, + { + "bbox": [ + 310, + 281, + 495, + 296 + ], + "type": "inline_equation", + "content": "3b + 5 = 26" + }, + { + "bbox": [ + 310, + 281, + 495, + 296 + ], + "type": "text", + "content": ", which simplifies to " + }, + { + "bbox": [ + 310, + 281, + 495, + 296 + ], + "type": "inline_equation", + "content": "3b = 21" + }, + { + "bbox": [ + 310, + 281, + 495, + 296 + ], + "type": "text", + "content": " and thus " + }, + { + "bbox": [ + 310, + 281, + 495, + 296 + ], + "type": "inline_equation", + "content": "b = 7" + }, + { + "bbox": [ + 310, + 281, + 495, + 296 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_body" + } + ], + "index": 11, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 310, + 303, + 496, + 319 + ], + "blocks": [ + { + "bbox": [ + 310, + 303, + 496, + 319 + ], + "lines": [ + { + "bbox": [ + 310, + 303, + 496, + 319 + ], + "spans": [ + { + "bbox": [ + 310, + 303, + 496, + 319 + ], + "type": "text", + "content": "Step 7: Equating the constant terms gives us " + }, + { + "bbox": [ + 310, + 303, + 496, + 319 + ], + "type": "inline_equation", + "content": "-2a = -6" + }, + { + "bbox": [ + 310, + 303, + 496, + 319 + ], + "type": "text", + "content": ", which simplifies to " + }, + { + "bbox": [ + 310, + 303, + 496, + 319 + ], + "type": "inline_equation", + "content": "a = 3" + }, + { + "bbox": [ + 310, + 303, + 496, + 319 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "code_body" + } + ], + "index": 12, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 310, + 326, + 499, + 334 + ], + "blocks": [ + { + "bbox": [ + 310, + 326, + 499, + 334 + ], + "lines": [ + { + "bbox": [ + 310, + 326, + 499, + 334 + ], + "spans": [ + { + "bbox": [ + 310, + 326, + 499, + 334 + ], + "type": "text", + "content": "Step 8: Now that we have the values of " + }, + { + "bbox": [ + 310, + 326, + 499, + 334 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 310, + 326, + 499, + 334 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 310, + 326, + 499, + 334 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 310, + 326, + 499, + 334 + ], + "type": "text", + "content": ", we can find the sum " + }, + { + "bbox": [ + 310, + 326, + 499, + 334 + ], + "type": "inline_equation", + "content": "a + b" + }, + { + "bbox": [ + 310, + 326, + 499, + 334 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "code_body" + } + ], + "index": 13, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 310, + 340, + 446, + 348 + ], + "blocks": [ + { + "bbox": [ + 310, + 340, + 446, + 348 + ], + "lines": [ + { + "bbox": [ + 310, + 340, + 446, + 348 + ], + "spans": [ + { + "bbox": [ + 310, + 340, + 446, + 348 + ], + "type": "text", + "content": "Step 9: Adding " + }, + { + "bbox": [ + 310, + 340, + 446, + 348 + ], + "type": "inline_equation", + "content": "a = 3" + }, + { + "bbox": [ + 310, + 340, + 446, + 348 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 310, + 340, + 446, + 348 + ], + "type": "inline_equation", + "content": "b = 7" + }, + { + "bbox": [ + 310, + 340, + 446, + 348 + ], + "type": "text", + "content": " gives us " + }, + { + "bbox": [ + 310, + 340, + 446, + 348 + ], + "type": "inline_equation", + "content": "a + b = 10" + }, + { + "bbox": [ + 310, + 340, + 446, + 348 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "code_body" + } + ], + "index": 14, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 310, + 355, + 373, + 364 + ], + "blocks": [ + { + "bbox": [ + 310, + 355, + 373, + 364 + ], + "lines": [ + { + "bbox": [ + 310, + 355, + 373, + 364 + ], + "spans": [ + { + "bbox": [ + 310, + 355, + 373, + 364 + ], + "type": "text", + "content": "The final answer is: " + }, + { + "bbox": [ + 310, + 355, + 373, + 364 + ], + "type": "inline_equation", + "content": "\\boxed{10}" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "code_body" + } + ], + "index": 15, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 162, + 377, + 447, + 389 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 377, + 447, + 389 + ], + "spans": [ + { + "bbox": [ + 162, + 377, + 447, + 389 + ], + "type": "text", + "content": "Figure 12: Case Study for interpretability experiments in Section 4.2.2" + } + ] + } + ], + "index": 16, + "type": "text" + }, + { + "type": "code", + "bbox": [ + 108, + 423, + 476, + 565 + ], + "blocks": [ + { + "bbox": [ + 108, + 411, + 175, + 422 + ], + "lines": [ + { + "bbox": [ + 108, + 411, + 175, + 422 + ], + "spans": [ + { + "bbox": [ + 108, + 411, + 175, + 422 + ], + "type": "text", + "content": "System prompt:" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 108, + 423, + 476, + 565 + ], + "lines": [ + { + "bbox": [ + 108, + 423, + 476, + 565 + ], + "spans": [ + { + "bbox": [ + 108, + 423, + 476, + 565 + ], + "type": "text", + "content": "You are a math expert trying to solve mathematical problems. Before answering a question, your task is to decompose the original question to make it clearer. \nProvide your rewritten content in JSON format: \n{\"action\": \"DECOMPOSE\", \"output\": \"{decomposed question content}}\" \n}} \nRespond only with valid JSON. Do not write an introduction or summary. \nUser prompt: \nHere is the question: [problem.text]" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "code_body" + } + ], + "index": 18, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 106, + 571, + 458, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 571, + 458, + 582 + ], + "spans": [ + { + "bbox": [ + 106, + 571, + 458, + 582 + ], + "type": "text", + "content": "Prompt for generating final answers using on the question and metacognition reasoning:" + } + ] + } + ], + "index": 19 + }, + { + "type": "code", + "bbox": [ + 108, + 602, + 500, + 646 + ], + "blocks": [ + { + "bbox": [ + 108, + 590, + 174, + 601 + ], + "lines": [ + { + "bbox": [ + 108, + 590, + 174, + 601 + ], + "spans": [ + { + "bbox": [ + 108, + 590, + 174, + 601 + ], + "type": "text", + "content": "System prompt:" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 108, + 602, + 500, + 646 + ], + "lines": [ + { + "bbox": [ + 108, + 602, + 500, + 646 + ], + "spans": [ + { + "bbox": [ + 108, + 602, + 500, + 646 + ], + "type": "text", + "content": "You are a math expert tasked with solving problems step by step. Follow the provided instructions precisely, showing all reasoning and intermediate steps. Present the final answer within \\boxed{\\{\\}}\\}." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "code_body" + }, + { + "bbox": [ + 109, + 646, + 164, + 655 + ], + "lines": [ + { + "bbox": [ + 109, + 646, + 164, + 655 + ], + "spans": [ + { + "bbox": [ + 109, + 646, + 164, + 655 + ], + "type": "text", + "content": "User prompt:" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "code_caption" + } + ], + "index": 21, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 108, + 656, + 337, + 710 + ], + "blocks": [ + { + "bbox": [ + 108, + 656, + 337, + 710 + ], + "lines": [ + { + "bbox": [ + 108, + 656, + 337, + 710 + ], + "spans": [ + { + "bbox": [ + 108, + 656, + 337, + 710 + ], + "type": "text", + "content": "Here is the question and instructions: \nQuestion \n[problem_text] \nProvided Instruction \n[instruction_text]" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "code_body" + } + ], + "index": 23, + "sub_type": "code", + "guess_lang": "txt" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 261, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 261, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 261, + 85 + ], + "type": "text", + "content": "G.1.2 Prompts for Math problems" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 91, + 167, + 103 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 91, + 167, + 103 + ], + "spans": [ + { + "bbox": [ + 106, + 91, + 167, + 103 + ], + "type": "text", + "content": "VRP prompt:" + } + ] + } + ], + "index": 1 + }, + { + "type": "code", + "bbox": [ + 106, + 104, + 493, + 173 + ], + "blocks": [ + { + "bbox": [ + 106, + 104, + 493, + 173 + ], + "lines": [ + { + "bbox": [ + 106, + 104, + 493, + 173 + ], + "spans": [ + { + "bbox": [ + 106, + 104, + 493, + 173 + ], + "type": "text", + "content": "System prompt: \nYou are a math expert tasked with solving problems step by step. Present the final answer within \\boxed{}?. \nUser prompt: \nHere is the question: \n{Question}" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 106, + 178, + 169, + 190 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 178, + 169, + 190 + ], + "spans": [ + { + "bbox": [ + 106, + 178, + 169, + 190 + ], + "type": "text", + "content": "MRP prompt:" + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 106, + 191, + 501, + 303 + ], + "blocks": [ + { + "bbox": [ + 106, + 191, + 501, + 303 + ], + "lines": [ + { + "bbox": [ + 106, + 191, + 501, + 303 + ], + "spans": [ + { + "bbox": [ + 106, + 191, + 501, + 303 + ], + "type": "text", + "content": "System prompt: \nYou are a math expert tasked with solving problems. When solving a problem, your first task is to provide a high-level solution plan as an instruction. Then you need to follow the provided instructions precisely, showing all reasoning and intermediate steps. Finally, you must present the final answer within boxed}. \nUser prompt: \nHere is the question: {Question}" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 106, + 308, + 186, + 320 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 308, + 186, + 320 + ], + "spans": [ + { + "bbox": [ + 106, + 308, + 186, + 320 + ], + "type": "text", + "content": "MAMRP prompt:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 320, + 175, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 320, + 175, + 331 + ], + "spans": [ + { + "bbox": [ + 106, + 320, + 175, + 331 + ], + "type": "text", + "content": "high-level agent:" + } + ] + } + ], + "index": 6 + }, + { + "type": "code", + "bbox": [ + 106, + 332, + 507, + 487 + ], + "blocks": [ + { + "bbox": [ + 106, + 332, + 507, + 487 + ], + "lines": [ + { + "bbox": [ + 106, + 332, + 507, + 487 + ], + "spans": [ + { + "bbox": [ + 106, + 332, + 507, + 487 + ], + "type": "text", + "content": "System prompt: \nYou are a math expert specialized in solving mathematical problems, you need to teach a weaker agent with minimal capability in math how to solve a problem step-by-step. \nYour task is to provide a high-level solution plan for the given problem, in order to guide a low-level math solving agent to solve the problem. \nYou can not directly answer the question. You'll be punished if you include any answer in your response. \nYou need to first think deeply in mind and output your final instruction. \nUser prompt: \nHere is the question: \n{Question}" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_body" + } + ], + "index": 7, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 106, + 489, + 171, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 489, + 171, + 500 + ], + "spans": [ + { + "bbox": [ + 106, + 489, + 171, + 500 + ], + "type": "text", + "content": "low-level agent:" + } + ] + } + ], + "index": 8 + }, + { + "type": "code", + "bbox": [ + 106, + 501, + 501, + 655 + ], + "blocks": [ + { + "bbox": [ + 106, + 501, + 501, + 655 + ], + "lines": [ + { + "bbox": [ + 106, + 501, + 501, + 655 + ], + "spans": [ + { + "bbox": [ + 106, + 501, + 501, + 655 + ], + "type": "text", + "content": "System prompt: \nYou are a math expert tasked with solving problems step by step. Follow the provided instructions precisely, showing all reasoning and intermediate steps. Present the final answer within \\boxed{}/. User prompt: Here is the question and instructions: [Question] {Question} [End of Question] [Provided Instruction] {instruction} [End of Instruction]" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_body" + } + ], + "index": 9, + "sub_type": "code", + "guess_lang": "txt" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 309, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 309, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 309, + 85 + ], + "type": "text", + "content": "G.1.3 Prompts for LLM-as-a-Judge problems" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 91, + 297, + 102 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 91, + 297, + 102 + ], + "spans": [ + { + "bbox": [ + 105, + 91, + 297, + 102 + ], + "type": "text", + "content": "We adopt the prompts from Saha et al. [2025a]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 102, + 167, + 114 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 102, + 167, + 114 + ], + "spans": [ + { + "bbox": [ + 105, + 102, + 167, + 114 + ], + "type": "text", + "content": "VRP prompt:" + } + ] + } + ], + "index": 2 + }, + { + "type": "code", + "bbox": [ + 106, + 115, + 507, + 412 + ], + "blocks": [ + { + "bbox": [ + 106, + 115, + 507, + 412 + ], + "lines": [ + { + "bbox": [ + 106, + 115, + 507, + 412 + ], + "spans": [ + { + "bbox": [ + 106, + 115, + 507, + 412 + ], + "type": "text", + "content": "System prompt: \nPlease act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. You should choose the assistant that follows the user's instructions and answers the user's question better. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of their responses. Begin your evaluation by comparing the two responses and provide a short explanation. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision.. \nDo not allow the length of the responses to influence your evaluation.. \nDo not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[A]\" if assistant A is better, \"[B]\" if assistant B is better.. \nUser prompt: \n[User Question] {instruction} [End of User Question] [The Start of Assistant A's Answer] {response_A} [The End of Assistant A's Answer] [The Start of Assistant B's Answer] {response_B} [The End of Assistant B's Answer]" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "txt" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 168, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 168, + 84 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 168, + 84 + ], + "type": "text", + "content": "MRP prompt:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 108, + 86, + 175, + 97 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 86, + 175, + 97 + ], + "spans": [ + { + "bbox": [ + 108, + 86, + 175, + 97 + ], + "type": "text", + "content": "System prompt:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 108, + 98, + 505, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 98, + 505, + 502 + ], + "spans": [ + { + "bbox": [ + 108, + 98, + 505, + 502 + ], + "type": "text", + "content": "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. You should choose the assistant that follows the user's instructions and answers the user's question better. First of your task is to build an evaluation plan that can then be executed to assess the response quality. Whenever appropriate, you can choose to also include a step-by-step reference answer as part of the evaluation plan. Enclose your evaluation plan between the tags \"[Start of Evaluation Plan]\" and \"[End of Evaluation Plan)\". After that, please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. You should choose the assistant that follows the user's instructions and answers the user's question better. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of their responses. Begin your evaluation by comparing the two responses and provide a short explanation. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[A]\" if assistant A is better, \"[B]\" if assistant B is better. User prompt: [User Question] {instruction} [End of User Question] [The Start of Assistant A's Answer] {response_A} [The End of Assistant A's Answer] [The Start of Assistant B's Answer] {response_B} [The End of Assistant B's Answer]" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 507, + 254, + 519 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 507, + 254, + 519 + ], + "spans": [ + { + "bbox": [ + 106, + 507, + 254, + 519 + ], + "type": "text", + "content": "MAMRP prompt: high-level agent:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 108, + 521, + 175, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 521, + 175, + 533 + ], + "spans": [ + { + "bbox": [ + 108, + 521, + 175, + 533 + ], + "type": "text", + "content": "System prompt:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 108, + 533, + 488, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 533, + 488, + 598 + ], + "spans": [ + { + "bbox": [ + 108, + 533, + 488, + 598 + ], + "type": "text", + "content": "We want to evaluate the quality of the responses provided by AI assistants to the user question displayed below. For that, your task is to help us build an evaluation plan that can then be executed to assess the response quality. Whenever appropriate, you can choose to also include a step-by-step reference answer as part of the evaluation plan." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 109, + 598, + 164, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 598, + 164, + 609 + ], + "spans": [ + { + "bbox": [ + 109, + 598, + 164, + 609 + ], + "type": "text", + "content": "User prompt:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 110, + 609, + 241, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 609, + 241, + 641 + ], + "spans": [ + { + "bbox": [ + 110, + 609, + 241, + 641 + ], + "type": "text", + "content": "[User Question] \n{Question} \n[End of User Question]" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "36" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 73, + 171, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 73, + 171, + 83 + ], + "spans": [ + { + "bbox": [ + 106, + 73, + 171, + 83 + ], + "type": "text", + "content": "low-level agent:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 109, + 87, + 174, + 97 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 87, + 174, + 97 + ], + "spans": [ + { + "bbox": [ + 109, + 87, + 174, + 97 + ], + "type": "text", + "content": "System prompt:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 108, + 98, + 507, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 98, + 507, + 150 + ], + "spans": [ + { + "bbox": [ + 108, + 98, + 507, + 150 + ], + "type": "text", + "content": "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. Your evaluation should be performed by following the provided evaluation plan step-by-step. Avoid copying the plan when doing the evaluation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 108, + 152, + 495, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 152, + 495, + 174 + ], + "spans": [ + { + "bbox": [ + 108, + 152, + 495, + 174 + ], + "type": "text", + "content": "Please also only stick to the given plan and provide explanation of how the plan is executed to compare the two responses." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 108, + 175, + 494, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 175, + 494, + 195 + ], + "spans": [ + { + "bbox": [ + 108, + 175, + 494, + 195 + ], + "type": "text", + "content": "Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 108, + 196, + 484, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 196, + 484, + 228 + ], + "spans": [ + { + "bbox": [ + 108, + 196, + 484, + 228 + ], + "type": "text", + "content": "Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 108, + 228, + 504, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 228, + 504, + 260 + ], + "spans": [ + { + "bbox": [ + 108, + 228, + 504, + 260 + ], + "type": "text", + "content": "After providing your evaluation, output your final verdict by strictly following this format: \"[A]\" if assistant A is better, \"[B]\" if assistant B is better." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 110, + 262, + 164, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 262, + 164, + 272 + ], + "spans": [ + { + "bbox": [ + 110, + 262, + 164, + 272 + ], + "type": "text", + "content": "User prompt:" + } + ] + } + ], + "index": 7 + }, + { + "type": "code", + "bbox": [ + 109, + 272, + 319, + 403 + ], + "blocks": [ + { + "bbox": [ + 109, + 272, + 319, + 403 + ], + "lines": [ + { + "bbox": [ + 109, + 272, + 319, + 403 + ], + "spans": [ + { + "bbox": [ + 109, + 272, + 319, + 403 + ], + "type": "text", + "content": "[User Question] \n{instruction} \n{End of User Question] \n{The Start of Assistant A's Answer} \n{response_A} \n{The End of Assistant A's Answer} \n{The Start of Assistant B's Answer} \n{response_B} \n{The End of Assistant B's Answer} \n{The Start of Evaluation Plan} \n{evaluation計劃} \n{The End of Evaluation Plan}" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_body" + } + ], + "index": 8, + "sub_type": "code", + "guess_lang": "txt" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "37" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 36 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 250, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 250, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 250, + 85 + ], + "type": "text", + "content": "G.2 Multi-turn ReMA prompts" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 92, + 324, + 103 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 92, + 324, + 103 + ], + "spans": [ + { + "bbox": [ + 105, + 92, + 324, + 103 + ], + "type": "text", + "content": "G.2.1 SFT data collection of multi-turn MAMRP" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 108, + 112, + 175, + 123 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 112, + 175, + 123 + ], + "spans": [ + { + "bbox": [ + 108, + 112, + 175, + 123 + ], + "type": "text", + "content": "System prompt:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 108, + 123, + 498, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 123, + 498, + 177 + ], + "spans": [ + { + "bbox": [ + 108, + 123, + 498, + 177 + ], + "type": "text", + "content": "You are classifying reasoning process data into two types of thinking. You will be given a question-answer pair from a reasoning dataset. Your task is to split all words into two parts. These words are crucial for analyzing reasoning patterns, so do not skip any details." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 108, + 178, + 488, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 178, + 488, + 232 + ], + "spans": [ + { + "bbox": [ + 108, + 178, + 488, + 232 + ], + "type": "text", + "content": "- **Meta-Thinking Agent (MTA):** Responsible for high-level thought processes. This includes planning, evaluating steps, expressing uncertainty, making observations, or setting goals. Avoid detailed calculations. The content should be enclosed in `` and ``." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 108, + 232, + 494, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 232, + 494, + 276 + ], + "spans": [ + { + "bbox": [ + 108, + 232, + 494, + 276 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 108, + 232, + 494, + 276 + ], + "type": "inline_equation", + "content": "\\star \\star" + }, + { + "bbox": [ + 108, + 232, + 494, + 276 + ], + "type": "text", + "content": " Reasoning Agent (RA): " + }, + { + "bbox": [ + 108, + 232, + 494, + 276 + ], + "type": "inline_equation", + "content": "\\star \\star" + }, + { + "bbox": [ + 108, + 232, + 494, + 276 + ], + "type": "text", + "content": " Responsible for detailed problem-solving steps, such as calculations, logical deductions, or breaking down a problem into subproblems. The content should be enclosed in `` and ``." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 108, + 277, + 231, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 277, + 231, + 285 + ], + "spans": [ + { + "bbox": [ + 108, + 277, + 231, + 285 + ], + "type": "text", + "content": "\\*\\*Rules to follow: \\*\\*" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 108, + 287, + 493, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 287, + 493, + 330 + ], + "spans": [ + { + "bbox": [ + 108, + 287, + 493, + 330 + ], + "type": "text", + "content": "1. **Do not assign large chunks of text to a single type of thinking.** The reasoning process consists of small, nonlinear thinking steps, so alternate appropriately between Meta-Thinking and Reasoning steps." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 108, + 331, + 462, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 331, + 462, + 363 + ], + "spans": [ + { + "bbox": [ + 108, + 331, + 462, + 363 + ], + "type": "text", + "content": "2. **Keep the words from the original solution unmodified whenever possible.** Words like \"Wait,\" \"Hmm,\" \"But,\" etc., typically indicate Meta-Thinking and should be preserved." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 108, + 364, + 321, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 364, + 321, + 373 + ], + "spans": [ + { + "bbox": [ + 108, + 364, + 321, + 373 + ], + "type": "text", + "content": "3. " + }, + { + "bbox": [ + 108, + 364, + 321, + 373 + ], + "type": "inline_equation", + "content": "\\star \\star" + }, + { + "bbox": [ + 108, + 364, + 321, + 373 + ], + "type": "text", + "content": " When finalizing the answer: " + }, + { + "bbox": [ + 108, + 364, + 321, + 373 + ], + "type": "inline_equation", + "content": "\\star \\star" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 108, + 374, + 488, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 374, + 488, + 396 + ], + "spans": [ + { + "bbox": [ + 108, + 374, + 488, + 396 + ], + "type": "text", + "content": "- The \\*\\*Meta-Thinking Agent (MTA) \\*\\* must explicitly confirm the answer before completion and output '[FINISH]'." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 108, + 396, + 470, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 396, + 470, + 416 + ], + "spans": [ + { + "bbox": [ + 108, + 396, + 470, + 416 + ], + "type": "text", + "content": "- The " + }, + { + "bbox": [ + 108, + 396, + 470, + 416 + ], + "type": "inline_equation", + "content": "\\star \\star" + }, + { + "bbox": [ + 108, + 396, + 470, + 416 + ], + "type": "text", + "content": " Reasoning Agent (RA) " + }, + { + "bbox": [ + 108, + 396, + 470, + 416 + ], + "type": "inline_equation", + "content": "\\star \\star" + }, + { + "bbox": [ + 108, + 396, + 470, + 416 + ], + "type": "text", + "content": " should then provide the final answer in the correct format." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 108, + 418, + 447, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 418, + 447, + 438 + ], + "spans": [ + { + "bbox": [ + 108, + 418, + 447, + 438 + ], + "type": "text", + "content": "4. **Do not skip any reasoning steps, even if they seem redundant, incorrect or irrelevant**" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 108, + 439, + 500, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 439, + 500, + 472 + ], + "spans": [ + { + "bbox": [ + 108, + 439, + 500, + 472 + ], + "type": "text", + "content": "5. **Do not modify or remove any part of the original reasoning process**, even if it seems redundant or repetitive. The goal is to **preserve the exact flow of thought** as it naturally occurs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 108, + 472, + 500, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 472, + 500, + 505 + ], + "spans": [ + { + "bbox": [ + 108, + 472, + 500, + 505 + ], + "type": "text", + "content": "6. **Retain all expressions such as \"Wait,\" \"Hmm,\" \"But wait,\" etc., exactly as they appear. These indicate important cognitive processes and should not be skipped or altered.**" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 108, + 506, + 265, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 506, + 265, + 526 + ], + "spans": [ + { + "bbox": [ + 108, + 506, + 265, + 526 + ], + "type": "text", + "content": "Here are examples for you: [Examples] ..." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 108, + 527, + 164, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 527, + 164, + 537 + ], + "spans": [ + { + "bbox": [ + 108, + 527, + 164, + 537 + ], + "type": "text", + "content": "User prompt:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 108, + 538, + 223, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 538, + 223, + 603 + ], + "spans": [ + { + "bbox": [ + 108, + 538, + 223, + 603 + ], + "type": "text", + "content": "[Begin of Question] \n{question} \n[End of Question] \n[Begin of Solution] \n{solution} \n[End of Solution]" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "38" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 37 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 255, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 255, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 255, + 85 + ], + "type": "text", + "content": "G.2.2 Prompt for math problems" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 91, + 235, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 91, + 235, + 102 + ], + "spans": [ + { + "bbox": [ + 106, + 91, + 235, + 102 + ], + "type": "text", + "content": "Meta-Thinking Agent (MTA):" + } + ] + } + ], + "index": 1 + }, + { + "type": "code", + "bbox": [ + 106, + 109, + 506, + 255 + ], + "blocks": [ + { + "bbox": [ + 106, + 109, + 506, + 255 + ], + "lines": [ + { + "bbox": [ + 106, + 109, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 106, + 109, + 506, + 255 + ], + "type": "text", + "content": "System prompt: \nYou are a meta-think agent that represents human high-level think process, when solving a question, you will have a discussion with human, each time you think about what to do next: e.g. \n- Exploring multiple angles and approaches \n- Breaking down the solution into clear steps \n- Continuously reflecting on intermediate results honestly and adapt your strategy as you progress \n- Backtracking when necessary \n- Requesting exploration of multiple solutions individually \n- Finally confirm the answer with the tag [FINISH] \nUser prompt: \n{question}" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 106, + 259, + 207, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 259, + 207, + 272 + ], + "spans": [ + { + "bbox": [ + 106, + 259, + 207, + 272 + ], + "type": "text", + "content": "Reasoning Agent (RA):" + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 106, + 277, + 494, + 346 + ], + "blocks": [ + { + "bbox": [ + 106, + 277, + 494, + 346 + ], + "lines": [ + { + "bbox": [ + 106, + 277, + 494, + 346 + ], + "spans": [ + { + "bbox": [ + 106, + 277, + 494, + 346 + ], + "type": "text", + "content": "System prompt: Please reason step by step follow the given instruction, when asked to finalize your answer, put your answer within \\boxed{} User prompt: {question} {instruction}" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "txt" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "39" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 38 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09567/17e53201-29b3-43fd-8f2e-78d7b00a58a6_content_list.json b/data/2025/2503_09xxx/2503.09567/17e53201-29b3-43fd-8f2e-78d7b00a58a6_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..8db224c478eb7a4aea02af3611e9532fc9581bfb --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/17e53201-29b3-43fd-8f2e-78d7b00a58a6_content_list.json @@ -0,0 +1,9025 @@ +[ + { + "type": "text", + "text": "Qiguang Chen† Libo Qin‡ Jinhao Liu† Dengyun Peng† Jiannan Guan† Peng Wang‡ Mengkang Hu◇ Yuhang Zhou Te Gao† Wanxiang Che† LARG,", + "bbox": [ + 228, + 262, + 787, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$\\dagger$ Research Center for Social Computing and Interactive Robotics, $\\dagger$ Harbin Institute of Technology", + "bbox": [ + 290, + 309, + 723, + 338 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{\\ddagger}$ School of Computer Science and Engineering, Central South University", + "bbox": [ + 267, + 339, + 750, + 354 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The University of Hong Kong", + "bbox": [ + 401, + 354, + 614, + 368 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Fudan University", + "bbox": [ + 444, + 368, + 573, + 382 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{qgchen,car}@ir.hit.edu.cn,lbqin@csu.edu.cn", + "bbox": [ + 300, + 383, + 717, + 396 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Project: https://long-cot.github.io/", + "bbox": [ + 344, + 409, + 669, + 425 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Github: LightChen233/Awesome-Long-Chain-of-Thought-Reasoning", + "bbox": [ + 223, + 436, + 790, + 452 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/3bcec7826e1fbcdb6dfe89578c968d54d801a27312caf0fc018f86ba59fa632d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 184, + 492, + 831, + 825 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 233, + 119, + 299, + 164 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Towards Reasoning Era: A Survey of Chain-of-Thought for Reasoning Large Language Models", + "bbox": [ + 307, + 133, + 769, + 212 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.09567v5 [cs.AI] 18 Jul 2025", + "bbox": [ + 22, + 285, + 58, + 710 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 89, + 540, + 107 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recent advancements in reasoning with large language models (RLLMs), such as OpenAI-o1 and DeepSeek-R1, have demonstrated their impressive capabilities in complex domains like mathematics and coding. A central factor in their success lies in the application of long chain-of-thought (Long CoT) characteristics, which enhance reasoning abilities and enable the solution of intricate problems. However, despite these developments, a comprehensive survey on Long CoT is still lacking, limiting our understanding of its distinctions from traditional short chain-of-thought (Short CoT) and complicating ongoing debates on issues like \"overthinking\" and \"inference-time scaling\". This survey seeks to fill this gap by offering a unified perspective on Long CoT. Specifically, (1) We first distinguish Long CoT from Short CoT and introduce a novel taxonomy to categorize current reasoning paradigms. (2) Next, we explore the key characteristics of Long CoT: deep reasoning, extensive exploration, and feasible reflection, which enable models to handle more complex tasks and produce more efficient, coherent outcomes compared to the shallower Short CoT. (3) We then investigate key phenomena such as the emergence of Long CoT with these characteristics, including overthinking, and inference-time scaling, offering insights into how these processes manifest in practice. (4) Finally, we identify significant research gaps and highlight promising future directions, including the integration of multi-modal reasoning, efficiency improvements, and enhanced knowledge frameworks. By providing a structured overview, this survey aims to inspire future research and further the development of reasoning large language models1.", + "bbox": [ + 228, + 121, + 769, + 428 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 453, + 316, + 469 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In recent years, as shown in Figure 1, the emergence of reasoning large language models (RLLMs) such as OpenAI o1 [307] and DeepSeek R1 [227] has sparked a growing body of research into Long Chain-of-Thought (Long CoT) reasoning, greatly improving their mathematical reasoning, programming tasks, and multidisciplinary knowledge reasoning capabilities [696, 980, 722, 79, 961, 200, 1113, 793], even passing Turing Test [334]. This shift marks a significant departure from traditional approaches to task handling in large language models (LLMs) [1147, 619, 622, 599]. Unlike the shorter chain-of-thought (Short CoT) used in traditional LLMs, Long CoT reasoning entails a more detailed, iterative process of exploration and reflection within a given problem space by inference-time scaling [419, 733, 524]. This process has led to notable advancements in mathematical and logical reasoning, as well as in exploring how supervised fine-tuning (SFT) and reinforcement learning (RL) techniques can enhance the learning and exploration of extended reasoning chains [623, 550].", + "bbox": [ + 169, + 484, + 826, + 652 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, there is no comprehensive survey to systematically understand the main factors and recent efforts of Long CoT for RLLMs, which hinders the development of RLLMs. As a result, there are ongoing debates about the effectiveness of simple \"inference-time scaling\" for Longer CoT [864, 486] versus the argument that \"over-thinking\" from excessively long scaling can harm LLMs and introduce unnecessary complexity [103, 142, 357]. Moreover, some researchers argue that, when solving specific problems, there is no clear relationship between length and accuracy [886].", + "bbox": [ + 169, + 657, + 826, + 742 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address this gap, we provide an extensive and comprehensive survey of Long CoT. Specifically, as illustrated in Figure 2, we first define and examine the distinctions between Long CoT and traditional Short CoT, focusing on the following key aspects: (1) Deep Reasoning, which requires a sufficient depth of logical processing to manage an extensive set of logical nodes; (2) Extensive Exploration, which involves generating parallel uncertain nodes and transitioning from known to unknown logic; and (3) Feasible Reflection, which involves feedback and refinement of logical connections. These characteristics enable Long CoT paradigms to integrate more intricate reasoning and accommodate a broader range of logical structures, ultimately leading to more efficient and coherent outcomes. Subsequently, we systematically explore the underlying explanations for key phenomena associated with Long CoT, such as its emergence, the overthinking phenomenon,", + "bbox": [ + 169, + 746, + 828, + 887 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "Our logo refers to a cute cartoon image - Snake Puppy. Header Image is inspired by Yaoting et al. [959]", + "bbox": [ + 191, + 897, + 812, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/be56637e4c051b7d3d3a17014777899ed5d63d7f01c713db6f624e8d8196114d.jpg", + "image_caption": [ + "Figure 1: Evolution of selected Long CoT over the past three years, where colored branches represent different characteristics: deep reasoning, feasible reflection, and extensive exploration. Each characteristic is further divided into key areas: Deep reasoning includes its format and learning methods. Feasible reflection focuses on feedback and refinement techniques during reflection process as optimization strategies. Extensive exploration addresses scaling, internal, and external exploration as key improvements to Long CoT." + ], + "image_footnote": [], + "bbox": [ + 181, + 92, + 823, + 335 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "inference-time scaling during testing, and the \"Aha Moment,\" among others. To our knowledge, This is the first comprehensive survey dedicated to these specific topics. Finally, considering the extensive body of literature, we highlight promising areas for future research and suggest valuable open-resource frameworks and datasets that can serve as a foundation for future investigations.", + "bbox": [ + 169, + 455, + 823, + 512 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The main contributions of this work are as follows:", + "bbox": [ + 171, + 517, + 509, + 531 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Systematic Distinction: In this work, we first introduce the concept of Long CoT reasoning and distinguish it from the traditional Short CoT, thereby providing a clear framework for understanding both paradigms and their respective characteristics.", + "- Explanation of Hot Phenomena: We systematically investigate the notable phenomena associated with Long CoT reasoning, such as overthinking, inference-time scaling, and the \"Aha Moment\", offering valuable insights into the cognitive processes involved in complex reasoning.", + "- Emerging Challenges and Frontiers: We explore the emerging challenges within the field of Long CoT reasoning and identify key research frontiers. Given the vast body of literature, we highlight areas where further inquiry could significantly advance the development of Long CoT methodologies." + ], + "bbox": [ + 174, + 539, + 823, + 685 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Discussion of Long CoT v.s. Short CoT", + "text_level": 1, + "bbox": [ + 171, + 708, + 529, + 724 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This section formalizes the key differences between Long Chain-of-Thought (Long CoT) and Short Chain-of-Thought (Short CoT), emphasizing reasoning depth, revisiting connections, and logical node exploration [858]. These distinctions are clearly separate from System 1 and System 2 thinking. The comparison between Long CoT and Short CoT is framed within System 2, with Long CoT involving more thorough reasoning, reflection, and exploration, while Short CoT generally prioritizes shallow and efficient logic over exhaustive reasoning.", + "bbox": [ + 169, + 739, + 826, + 825 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Overview of Short CoT", + "text_level": 1, + "bbox": [ + 171, + 843, + 375, + 857 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As illustrated by Figure 2, Short CoT is typically characterized by a shallow, linear reasoning process, where conclusions are drawn sequentially, often relying on a limited number of logical nodes [551]. This reasoning is usually rapid and straightforward, with simple, surface-level transitions and minimal", + "bbox": [ + 169, + 869, + 826, + 912 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 42, + 225, + 69 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 230, + 59, + 305, + 68 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Proof of Number Theory Problem: For any positive integer $n$ , there exists a positive integer $m$ such that $m^2 + 1$ is divisible by $n$ .", + "bbox": [ + 240, + 94, + 774, + 107 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/f0eadf51633e7fb658ee5728c1487fe0163f55fb50102d3c0b34bdb3de0da945.jpg", + "image_caption": [ + "Figure 2: The differences between advanced Long CoT and traditional Short CoT are characterized by three key characteristics: deep reasoning, feasible reflection, and extensive exploration. Moreover, Long CoT integrates all these characteristics to achieve substantial logical efficacy." + ], + "image_footnote": [], + "bbox": [ + 181, + 113, + 816, + 271 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "exploration of alternative paths, which restricts its generalizability [683]. Formally, given a reasoning model $\\mathcal{R}$ , we can define the rationale of Short CoT $(\\mathsf{C}\\circ \\mathsf{T}_S)$ as follows:", + "bbox": [ + 169, + 349, + 823, + 378 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {C o T} _ {S} = \\mathcal {R} \\left(\\left\\{n _ {i} \\right\\} _ {i = 1} ^ {k} | (k \\leq \\mathcal {B} _ {s}) \\wedge (j = 1 \\Leftrightarrow \\forall i \\leq k, n _ {i} \\rightarrow n _ {i + j}) \\wedge (\\forall i \\neq j \\leq k, n _ {i} \\neq n _ {j})\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 212, + 388, + 825, + 406 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $n_1$ to $n_k$ represent a sequence of logical nodes, which naturally satisfy that $\\forall i, n_i \\to n_{i+1}$ . Here, $\\mathcal{B}_s$ denotes the upper boundary on the number of logical nodes, as defined by Chen et al. [90]. In this paradigm, the reasoning progresses sequentially from one node to the next, with minimal revisitation of previous nodes and little exploration of alternative logical paths.", + "bbox": [ + 169, + 415, + 826, + 472 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 Overview of Long CoT", + "text_level": 1, + "bbox": [ + 171, + 491, + 374, + 506 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In contrast, Long CoT involves deeper reasoning, reflective analysis, and a broader exploration of logical structures. It facilitates reasoning across a wider range of logical steps, addressing both known and unknown elements of a problem [194, 858]. Building on this, Long CoT expands upon the constraints presented in Equation 1 based on tree structures by incorporating three critical components: deep reasoning, exploration, and reflection.", + "bbox": [ + 169, + 518, + 826, + 588 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "These components play distinct yet complementary roles in enhancing cognitive processes. Deep reasoning ensures each logical step is executed rigorously, even within complex structures, fostering robust logic across intricate relationships. Exploration encourages the identification of new pathways, revealing potential avenues that may not be immediately obvious. Reflection enables iterative analysis and reassessment of conclusions, allowing reasoning to evolve throughout problem-solving. By distinguishing these three categories, Long CoT enhances its ability to address a broader range of problems with precision and depth. As shown in Figure 3, we will now discuss these key differences in detail.", + "bbox": [ + 169, + 594, + 826, + 705 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2.1 Deep Reasoning for Long CoT", + "text_level": 1, + "bbox": [ + 171, + 723, + 437, + 739 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As shown by Figure 2, deep reasoning refers to the capability to perform deep and thorough logical analysis across multiple interconnected logical nodes, where Short CoT generally can never achieve. This capability is essential when tackling complex problems that require a massive number of logical deductions to arrive at a valid conclusion. To better define and understand deep reasoning, we frame it as a capability that primarily relaxes the first constraint in Equation 1, as expressed by the following:", + "bbox": [ + 169, + 750, + 826, + 819 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nk \\leq \\mathcal {B} _ {s} \\mapsto k \\leq \\mathcal {B} _ {l} \\wedge \\mathcal {B} _ {s} \\ll \\mathcal {B} _ {l}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 392, + 829, + 823, + 844 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{B}_l$ represents the upper boundary for Long CoT reasoning, which can accommodate much more intricate logical nodes compared to the smaller boundary $\\mathcal{B}_s$ for Short CoT. The larger boundary $\\mathcal{B}_l$ alleviates issues related to insufficient depth in reasoning, thereby reducing the risk of generating unresolved answers or hallucinated responses in short-form reasoning.", + "bbox": [ + 169, + 854, + 825, + 912 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 41, + 225, + 70 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 64 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "REASONING GROUP", + "bbox": [ + 230, + 64, + 305, + 69 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/fa1bbeeb7a7a9e97707e957eb9cfc744f2a2eba4ab0e5a7c5f73282936c28213.jpg", + "image_caption": [ + "Figure 3: Taxonomy of Long CoT, which includes deep reasoning, feasible reflection, and extensive exploration methodologies." + ], + "image_footnote": [], + "bbox": [ + 173, + 78, + 831, + 539 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Key Difference: Reasoning Depth", + "text_level": 1, + "bbox": [ + 204, + 604, + 429, + 619 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Short CoT typically addresses a limited set of logical nodes, involving shallow reasoning, and struggles with problems requiring complex or intricate logical structures.", + "- Long CoT is designed to accommodate a significantly larger set of logical nodes, allowing for deeper logic and more thorough analysis during the reasoning process." + ], + "bbox": [ + 199, + 627, + 800, + 688 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.2.2 Extensive Exploration for Long CoT", + "text_level": 1, + "bbox": [ + 171, + 715, + 478, + 729 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown by Figure 2, Long CoT encourages branching out to extensively explore uncertain or unknown logical nodes, thereby expanding the potential set of reasoning paths. This exploration is particularly critical when solving problems characterized by ambiguity, incomplete information, or multiple possible solutions [43, 1016, 871]. More specifically, we describe how extensive exploration primarily addresses the relaxation of the second constraint in Equation 1, which can be formalized as follows:", + "bbox": [ + 169, + 741, + 823, + 823 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nj = 1 \\Leftrightarrow \\forall i \\leq k, n _ {i} \\rightarrow n _ {i + j} \\mapsto \\exists m, \\forall i, \\forall j \\leq m, n _ {i} \\rightarrow n _ {i + j}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 290, + 830, + 823, + 847 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the condition indicates that for a logical node $n_i$ , there are $m$ nodes that are explored in parallel. The acceptability of parallel exploration allows for a more systematic approach, enabling the exploration of previously unconsidered logical paths. This, in turn, helps maximize the understanding of all possible solutions, ultimately leading to the correct final answer.", + "bbox": [ + 169, + 854, + 823, + 912 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 41, + 225, + 69 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 65 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "REASONING GROUP", + "bbox": [ + 230, + 65, + 305, + 69 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Key Difference: Exploration of Logical Nodes", + "text_level": 1, + "bbox": [ + 204, + 90, + 511, + 106 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Short CoT generally restricts exploration to a fixed set of logical nodes, often resulting in oversimplified reasoning and limited exploration.", + "- Long CoT explores more various paths, including uncertain or uncharted areas, fostering more nuanced and comprehensive problem-solving." + ], + "bbox": [ + 199, + 114, + 797, + 175 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2.2.3 Feasible Reflection for Long CoT", + "text_level": 1, + "bbox": [ + 171, + 198, + 455, + 214 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As shown by Figure 2, Long CoT involves revisiting previous logical nodes to verify their connections are valid and accurate, and then correcting them or selecting an alternative logical path. Formally, feasible reflection relaxes the third constraint in Equation 1, which originally requires acyclic reasoning such that $n_i \\neq n_j$ for all $i \\neq j \\leq k$ . In contrast, feasible reflection permits the reasoning path to return to a previously visited node, captured as:", + "bbox": [ + 169, + 223, + 823, + 292 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\forall i \\neq j \\leq k, n _ {i} \\neq n _ {j} \\mapsto \\exists i < j \\leq k, n _ {i} = n _ {j}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 301, + 825, + 318 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where this condition indicates that, for a logical node $n_{j-1}$ , the subsequent node is not limited to the original next node $\\hat{n}_j$ . Instead, it may transition to $n_i$ (i.e., the next logical node becomes $n_j$ , where $n_j = n_i$ ). Practically, reflection implementation consists of two components:", + "bbox": [ + 169, + 325, + 823, + 369 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Feedback refers to evaluating both overall and intermediate outputs for correctness and quality, also known as critique or verification. It can be derived from external sources, validation checks, or by reflecting on prior conclusions within the reasoning process. Formally, at each step $n_i$ , a verification process $\\mathcal{V}_i$ ensures the correctness, feasibility, and consistency of the reasoning. If an issue is identified, the process redirects $n_i$ to the nearest correct node $n_j$ , where $j < i$ . This relationship is formalized as:", + "bbox": [ + 169, + 378, + 825, + 462 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {F} _ {i}, n _ {j} \\leftarrow \\operatorname {F e e d b a c k} \\left(\\mathrm {C o T} _ {L} ^ {i}\\right) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 403, + 463, + 823, + 479 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\mathrm{CoT}_L^i = \\{n_1,\\dots ,n_i\\}$ represents the current logical path up to the $i$ -th logical node for Long CoT.", + "bbox": [ + 169, + 484, + 823, + 513 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Refinement involves adjusting intermediate steps or modifying the logical flow to correct inconsistencies or address gaps based on the given feedback. This process can be expressed mathematically as follows:", + "bbox": [ + 169, + 525, + 826, + 566 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\widetilde {n} _ {i + 1} \\leftarrow \\operatorname {R e f i n e} \\left(n _ {i + 1} \\mid \\mathrm {C o T} _ {L} ^ {i}, \\mathcal {F} _ {i}, n _ {j}\\right), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 566, + 823, + 584 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\widetilde{n}_{i+1}$ represents the refined version of the subsequent logical node $n_{i+1}$ , according to the current logic $\\mathbb{C} \\circ \\mathbb{T}_L^i$ , feedback result $\\mathcal{F}_i$ , and previous logical node $n_j$ .", + "bbox": [ + 169, + 588, + 823, + 618 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Overall, incorporating reflection ensures that errors are identified and corrected promptly. This capability enables LLMs to quickly shift to alternative reasoning paths or correct their current trajectory. By doing so, error propagation is minimized, resulting in more accurate conclusions.", + "bbox": [ + 169, + 623, + 825, + 666 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Key Difference: Feedback & Refinement", + "text_level": 1, + "bbox": [ + 204, + 675, + 475, + 690 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Short CoT typically moves in a straightforward, non-repetitive manner from one node to the next, so that cannot correct their logic.", + "- Long CoT allows for revisiting and revising earlier decisions by feedback and refinement, ensuring that estimizable and prior logical conclusions during the reasoning progress." + ], + "bbox": [ + 200, + 698, + 799, + 758 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2.2.4 Unified Application and Development History of Three Capabilities", + "text_level": 1, + "bbox": [ + 171, + 782, + 694, + 797 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The Long CoT discussed here represents a unified reasoning system that seamlessly integrates and applies three key capabilities: deep reasoning, reflective mechanisms, and exploration capabilities. In contrast, during the Short CoT era, these capabilities developed independently, each evolving in isolation.", + "bbox": [ + 169, + 806, + 825, + 863 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As shown in Figure 2, early efforts primarily focused on enhancing deep reasoning within traditional CoT paradigms. This was followed by the gradual introduction of reflective mechanisms, which were initially based on human-designed pipelines. Over time, exploration capabilities were added, and", + "bbox": [ + 169, + 869, + 825, + 912 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 41, + 225, + 70 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 64 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "REASONING GROUP", + "bbox": [ + 230, + 64, + 305, + 69 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "these components were ultimately merged, giving rise to the modern concept of Long CoT, a unified approach to reasoning that seeks to enhance all three capabilities in harmony.", + "bbox": [ + 169, + 90, + 823, + 119 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The progression of Long CoT is gradual, rather than a sudden emergence through isolated models like o1 [307] and R1 [227]. Instead, it develops gradually. For example, earlier systems, such as ToT [955], enhance exploration but lack reflective mechanisms, disqualifying them as Long CoT [95]. While GoT [48] incorporates self-reflection based on ToT, its original model still lacked robust deep reasoning, preventing it from qualifying as Long CoT at that time. It is also notable that modern Long CoT systems, often neglect earlier technologies. This article addresses this gap by tracing the evolution of each capability, with the final section offering a comprehensive analysis of the integrated Long CoT system.", + "bbox": [ + 169, + 126, + 826, + 238 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In summary, Long CoT and Short CoT represent distinct paradigms. Long CoT features a deeper, broader, and more reflective reasoning process, enhancing both accuracy and coherence. Short CoT, by contrast, is better suited to simpler, well-defined problems. This distinction highlights the scalability and adaptability of Long CoT, making it particularly effective for more complex reasoning.", + "bbox": [ + 169, + 243, + 826, + 300 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Key Difference: Unified Application of Three Capabilities", + "text_level": 1, + "bbox": [ + 204, + 308, + 591, + 324 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "It is important to highlight that Long CoT integrates these three distinct capabilities to perform complex reasoning. In contrast, traditional Short CoT optimization typically focuses on only one of these characteristics.", + "bbox": [ + 194, + 329, + 800, + 372 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3 Long CoT Analysis & Evaluation", + "text_level": 1, + "bbox": [ + 171, + 397, + 483, + 415 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.1 Analysis & Explanation for Long CoT", + "text_level": 1, + "bbox": [ + 171, + 428, + 478, + 444 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Research on Long CoT has significantly enhanced RLLMs by improving reasoning accuracy, reducing errors, and supporting dynamic decision-making. However, several phenomena and their corresponding mechanisms remain inadequately summarized. This section addresses key topics, including the mechanisms of Long CoT and their underlying principles [644, 63, 545, 642]. Methodologically, two main perspectives have emerged to explain Long CoT: (1) External Behavior Analysis (§ 3.1.1) and (2) Internal Mechanism Analysis (§ 3.1.2).", + "bbox": [ + 169, + 454, + 826, + 537 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.1.1 Long CoT External Behavior Analysis", + "text_level": 1, + "bbox": [ + 171, + 551, + 491, + 566 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The primary research stream focuses on explaining RLLM behaviors for Long CoT [25]. As illustrated in Figure 4, six key phenomena are identified and discussed for Long CoT in this part.", + "bbox": [ + 169, + 575, + 823, + 604 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Long CoT Emergence Phenomenon Research shows that contextual examples improve large models' generative abilities by guiding the formation of reasoning chains [1012, 671, 417, 343, 532, 846, 1017, 1141]. Wang et al. [759] and Lippmann and Yang [461] demonstrate that these examples standardize reasoning chain generation relevant to the answers both in in-context-learning and supervised-finetuning. In an experiment by Madaan et al. [538], removing problem-specific entities from contextual examples, while retaining only the logical structure, led to similar performance as using complete examples, highlighting the logical structure imitation of Long CoT during inference. From a learning perspective, Ye et al. [963] analyzes and reveals the three-stage developmental trajectory of Long CoT: early memorization, followed by in-distribution generalization, and ultimately cross-distribution generalization, thereby enabling the model to exhibit Long CoT capabilities.", + "bbox": [ + 169, + 614, + 826, + 753 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "More recently, Stechly et al. [688] and Wang and Zhou [815] have shown that modifying the decoding process or designing specific prompts can activate the Long CoT within pre-trained models. They propose that CoT is embedded during pre-training and requires specific activation [941]. Further, Sadr et al. [642] focus the Long CoT source from the training data, and build on this with the notion of \"model attribution\", to specifically identify the training data most influential for specific outputs. Building on this, Guo et al. [227] and Xie et al. [886] investigate using rule-based reinforcement learning to directly activate Long CoT during pre-training, aiming to enhance performance [881]. Furthermore, Gandhi et al. [194] identify four key cognitive behaviors, including verification, backtracking, sub-target setting, and backward chaining, which successfully facilitate Long CoT. Qwen series models [926] inherently demonstrate these behaviors, which can be easily triggered by rule-based reinforcement. In contrast, the models of Llama series [168] lack these", + "bbox": [ + 169, + 758, + 828, + 912 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 42, + 225, + 69 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 64 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "REASONING GROU", + "bbox": [ + 233, + 66, + 300, + 70 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/5a5b622a5ef9a492838399c45ff5d29022e17e93ea38f8784aa310a395d4009d.jpg", + "image_caption": [ + "Figure 4: Analysis of the six classic phenomena of Long CoT external behavior: (a) emergence of Long CoT in current RLLMs; (b) reasoning boundaries and limitations of current Long CoT systems; (c) overthinking caused by scaling beyond RLLMs' reasoning boundaries, leading to performance decay; (d) inference-time scaling, discussing mainstream scaling methods, corresponding scaling laws and their limitations; (e) use of process reward model (PRM) or outcome reward model (ORM); (f) exploration of the \"aha\" moment and its underlying causes." + ], + "image_footnote": [], + "bbox": [ + 181, + 89, + 338, + 239 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b58ea3cfdd162d9e8dc0a98bea568dac497bec49f930188fabfb39d4a8af9188.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 343, + 90, + 473, + 239 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7574cfd5bdc73debbbe23c4cd13dc43c38b3f705661075e1c69e68c8876576bc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 480, + 90, + 656, + 239 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7acf371a734b42dff8be38ed39013e080e5d0020e7a712fdcc41abb09ba80b65.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 661, + 90, + 813, + 239 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "capabilities and thus requires example-based reinforcement learning to improve significantly [65]. Moreover, Wang et al. [812] identify a pretraining scaling law that explains how increasing calculation size in RLLMs enhances their reasoning capabilities. Wang et al. [796] further explore the scaling law of Long CoT, showing that more fine-grained Long CoT granularity leads to more efficient and effective generalization performance.", + "bbox": [ + 169, + 367, + 823, + 436 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Reasoning Boundary Phenomenon Recent research has highlighted the upper bounds and limitations of RLLMs across various reasoning tasks [303, 283, 684, 261, 185, 252]. Specifically, Bi et al. [53] investigate these bounds in code generation, showing that RLLMs struggle with tasks that exceed certain complexity thresholds [600], especially when imitating Long CoT samples of varying complexity. In the context of upper-bound performance, Merrill and Sabharwal [548] and Li et al. [430] focus on single-step arithmetic tasks, concluding that model performance is constrained by input length. Moreover, Feng et al. [177] proposes a mathematical model indicating that fixed-size models cannot produce accurate numerical answers beyond specific limits. However, increasing the number of reasoning steps improves a model's capability requirements to solve more complex problems.", + "bbox": [ + 169, + 454, + 826, + 592 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Inspired by these explorations, Chen et al. [90] first define the \"reasoning boundary\" phenomenon and quantify these limits, showing that surpassing an RLLM's reasoning capacity leads to performance decline [92]. Similarly, Zhou et al. [1130] introduce GSM-Infinite, linking different upper limits to accuracy levels. Chen et al. [90] also examine the interaction between these boundaries across tasks of varying complexity, providing insights into the effectiveness of Long CoT strategies [1085]. Moreover, Amiri et al. [12] propose a \"tight lower bound\" for Long CoT further guiding reasoning error reductions. Further, Baeumel et al. [28] suggest that due to its reliance on a single-digit lookahead heuristic, there are inherent boundaries in performing addition with multiple operands, which thus hinders the fundamental limitation of LLMs in scaling to more complex numerical reasoning. Liu et al. [483] further investigate the role of reinforcement learning in expanding these reasoning boundaries instead of relying solely on pretraining capabilities.", + "bbox": [ + 169, + 599, + 826, + 752 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Overthinking Phenomenon Research has highlighted the overthinking phenomenon [103, 330, 574, 142, 357, 595], where performance improves with longer reasoning chains up to a threshold, after which it declines. In contrast, Xie et al. [886] and Ma et al. [534] find no significant correlation between reasoning length and accuracy. To explain this, one line of research suggests that Long CoT strategies [21, 441], like avoiding \"snowball errors\" [192]. Alternatively, Chen et al. [90], Wolf et al. [851] highlight a performance drop when the reasoning boundaries are exceeded, providing an explanation for the overthinking phenomenon. This suggests that reasoning length and logical complexity should be kept below a certain boundary [1080]. Building on this, Wu et al. [867] mathematically determine the feasible reasoning length for Long CoT. Finally, Chen et al. [93] introduces Ohm's law of Long CoT, which accurately predicts and controls performance.", + "bbox": [ + 169, + 772, + 826, + 912 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 42, + 225, + 69 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 232, + 59, + 305, + 65 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "REASONING GROUP", + "bbox": [ + 232, + 65, + 305, + 69 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Inference-Time Scaling Phenomenon Recent advances in inference-time scaling algorithms [524, 843] have garnered significant attention, particularly for their ability to extend reasoning length and improve performance [524, 455, 875]. Specifically, Brown et al. [57] identify a phenomenon called \"Large Language Monkeys\", in which a series of reasoning tasks show that with enough trials, a correct result can be achieved. Additionally, o1 [307] and R1 [227] demonstrated that directly scaling the length of model inference improves final performance.", + "bbox": [ + 169, + 90, + 826, + 175 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To understand inference-time scaling, we will discuss these two paradigms: (1) Sequential Scaling: Sequential scaling involves increasing the reasoning path length. While this can enhance performance, studies by Jin et al. [330] show that, beyond a certain point, longer reasoning paths can degrade performance due to error accumulation. They suggest an optimal path length that depends on the model's capabilities and task complexity [15, 652, 31]. Furthermore, Chen et al. [90] and Wu et al. [867] explain that excessive exploration lengths beyond the RLLM's inherent reasoning boundary lead to performance decay, which guides RLLMs for deeper reasoning capabilities [32]. (2) Parallel Scaling: Parallel scaling involves performing multiple reasoning steps and verifying the results. While it shows promise, Parashar et al. [583] and Wang et al. [820] argue that simply increasing inference time does not guarantee improved performance. Wu et al. [864] show that the computational FLOPs $N$ of inference are correlated with the lower bound of performance error, which scales with $\\log N$ . Additionally, Chen et al. [93] establish an upper bound for parallel scaling, showing that RLLMs cannot exceed Pass@k verification through various verifiers. They further argue that sampling optimization cannot exceed the model's internal reasoning limitations, demonstrating that for $N$ samples, accuracy is proportional to $\\frac{m}{(k / \\log N + b)^2}$ , where $m$ , $n$ , and $b$ are model-dependent constants.", + "bbox": [ + 169, + 181, + 826, + 393 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "PRM & ORM Selection Phenomenon As RLLMs evolve, it is crucial to navigate the debate around the selection between process supervision and outcome supervision, two key reinforcement learning paradigms for complex reasoning tasks. The phenomenon of choosing between these two approaches has become a pivotal issue, as it is essential to differentiate and decide which supervision strategy is more suitable for specific tasks [899, 187, 1059]. While process supervision is intuitively advantageous for long-term reward assignments, the exact relationship between the two approaches remains unclear. It is commonly believed that process supervision is more challenging due to the trajectory-level coverage problem, which demands significant effort to collect fine-grained supervision data [1102, 679]. Additionally, PRM faces the issue of reward hacking [13, 152, 573, 30, 399], where agents exploit flaws in the reward function to produce unintended behaviors [227]. Addressing this to surpass rule-based reward systems has become an important research area [227, 886, 594]. Furthermore, Lampinen et al. [368] and Tan [708] establish a causal link between intermediate steps and final answers in qualitative experiments. Building on this, Jia et al. [317] demonstrate that, under the standard data coverage assumption, reinforcement learning with outcome supervision is not statistically more challenging than process supervision, aside from polynomial factors. More strictly, He et al. [247] mathematically demonstrate that outcome-level rewards suffice for online reinforcement learning in RLLMs.", + "bbox": [ + 169, + 402, + 826, + 638 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Aha Moment Phenomenon Earlier, Guo et al. [227] demonstrated that direct RL using rule-based rewards can trigger the aha moment, fostering natural self-reflection without supervision [172]. Following this, Team [721], Xie et al. [886] replicate this phenomenon. Further, Zhou et al. [1119] and Meng et al. [547] further extend this phenomenon to multimodal scenarios. However, Liu et al. [498] argue that the aha moment may not emerge in R1-Zero-like training. Instead, they observe that self-reflection patterns, such as superficial self-reflection (SSR), appear at epoch 0, the stage of base models. In this case, self-reflections do not necessarily lead to correct answers. Upon closer examination of R1-Zero training via RL, they find that the increasing response length results not from self-reflection, but from RL optimizing well-designed rule-based rewards. Moreover, Yang et al. [939] demonstrate that the \"aha moment\" is externally marked by increased use of anthropomorphic language during self-reflection and a dynamic adjustment of uncertainty in response to problem difficulty. This process enables the model to maintain reasoning without succumbing to \"Reasoning Collapse.\" Internally, it is characterized by a clear distinction between anthropomorphic traits and logical reasoning, with anthropomorphic language intensifying as the problem becomes more complex.", + "bbox": [ + 169, + 647, + 826, + 857 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Reinforcement Learning Entropy Phenomenon In reinforcement learning for Long CoT, the entropy mechanism is a crucial factor influencing the performance of RLLMs. Policy entropy measures the diversity and exploratory strength of a model's outputs. By managing this entropy", + "bbox": [ + 169, + 869, + 825, + 912 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 42, + 308, + 71 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "effectively, a model preserves exploration and thus excels on complex reasoning tasks. Earlier, Jang and Kim [310] investigate how initial entropy affects exploration in deep RL and proposed an entropy-aware initialization to encourage effective exploration. Building on this, Zhang et al. [1036] developed an Entropy-Regularized PRM that balances policy updates against large deviations from the starting distribution, thereby improving reasoning. Cheng et al. [116] found that high-entropy regions correlate positively with three exploratory reasoning behaviors: (1) key tokens linking logical steps, (2) self-verification and correction, and (3) rare behaviors underrepresented in the base model. Most recently, Agarwal et al. [5] introduced an Entropy Minimization method and demonstrated its strong impact on LLM performance in mathematical, physical, and coding tasks.", + "bbox": [ + 169, + 90, + 826, + 217 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "However, recent research indicates that, during early training, policy entropy declines sharply, causing the model to converge prematurely on specific output patterns and limiting further reasoning improvement [144]. In reinforcement learning, policy entropy $(H)$ and downstream task performance $(R)$ follow an exponential relation: $R = -a\\cdot e^{H} + b$ , so a drop in entropy produces a rapid performance decline until saturation. This \"policy entropy collapse\" is common without entropy control, as reduced entropy constrains exploration and stalls reasoning gains [144]. To counter this collapse, two methods, Clip-Cov and KL-Cov, regulate entropy by constraining updates on high-covariance tokens. Clip-Cov clips their update magnitudes, whereas KL-Cov imposes a Kullback-Leibler penalty. Empirical results show both techniques prevent collapse and enhance reasoning performance [144].", + "bbox": [ + 169, + 222, + 826, + 348 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "3.1.2 Long CoT Internal Mechanism Analysis", + "text_level": 1, + "bbox": [ + 171, + 363, + 504, + 378 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The second stream of research investigates the internal mechanisms of Long CoT-related RLLMs.", + "bbox": [ + 169, + 387, + 815, + 402 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Reasoning Internal Mechanism Recent studies have explored the internal mechanisms underlying the coherent rationale outputs of Long CoT, with particular emphasis on attention mechanisms [675, 632]. These studies primarily examine neural substructures in RLLMs, framing CoT reasoning from a white-box perspective [819, 992, 233, 169]. Weston and Sukhbaatar [849] introduces the concept of System 2 Attention (S2A), which demonstrates Long CoT generation by selectively focusing attention on relevant information. Additionally, Li et al. [407] explore gradient distributions between direct output and Long CoT layers, revealing that Long CoT layers help maintain stability by distinguishing relevant from irrelevant reasoning [840]. Finally, Zhang et al. [1068] conceptualize RLLMs as finite state automata, offering further insight into how internal dynamics influence external behavior. Despite Short CoT's struggles with self-correction, Bertolazzi et al. [47] show that these models rely on consistency heads (attention heads) to assess the alignment of numerical values in arithmetic solutions through internal shortcuts.", + "bbox": [ + 169, + 415, + 826, + 582 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Knowledge Incorporating Mechanism Current RLLMs primarily focus on mathematics and coding but have shown potential for generalization to other knowledge-rich domains, sparking growing interest in the mechanism for integrating domain-specific knowledge into Long CoT [860, 886, 1105]. Prystawski et al. [609] suggest that generative models store entity knowledge learned during pre-training independently, with the reasoning process in Long CoT linking this knowledge across entities. Radha and Goktas [630] recently introduced the Probabilistic Mixture Model (PMM), which categorizes model outputs into reasoning, memorization, and guessing. They also propose an Information-Theoretic Consistency (ITC) analysis to quantify the relationship between model confidence and strategy selection. Additionally, Jin et al. [331] define \"Concept Depth\" as the lowest layers at which complex concepts are understood, demonstrating varying levels of knowledge integration in RLLMs. Ou et al. [572] examine RLLM knowledge internalization through knowledge loop evolution, arguing that new knowledge acquisition is shaped by its connection to existing knowledge, with the loop evolving from formation to optimization and from shallow to deep.", + "bbox": [ + 169, + 593, + 826, + 776 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "3.2 Long CoT Evaluations", + "text_level": 1, + "bbox": [ + 171, + 791, + 372, + 806 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "3.2.1 Metrics", + "text_level": 1, + "bbox": [ + 171, + 816, + 279, + 830 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In benchmarking, various metrics assess model performance across reasoning tasks, each focusing on different aspects of reasoning ability. These metrics evaluate both RLLMs' effectiveness in achieving desired outcomes and their learning efficiency. As a result, metrics for RLLMs have gained increasing attention in recent research. For mathematical or code-related tasks, three key metrics are commonly used: Accuracy, Pass@k, and Cons@k based on regex extraction:", + "bbox": [ + 169, + 842, + 825, + 912 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 42, + 308, + 71 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Accuracy measures the proportion of correct outputs.", + "- Pass@k evaluates the likelihood of generating at least one correct solution within $k$ attempts.", + "- Cons@k assesses consistency by determining the model's ability to consistently produce correct or logically coherent solutions across multiple attempts." + ], + "bbox": [ + 174, + 92, + 823, + 152 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In scientific or commonsense question-answering tasks, evaluation often uses Exact Match (EM) and Accuracy based on regex extraction, where EM determines whether the model's output exactly matches the expected solution.", + "bbox": [ + 169, + 157, + 825, + 200 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "For feedback techniques like ORM or PRM, Rank and Best-of-N metrics are often used:", + "bbox": [ + 171, + 205, + 782, + 220 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Rank measures whether the reward model correctly prioritizes the best reasoning processes from the top $k$ candidates.", + "- Best-of-N selects the highest-scoring solution from $N$ generated reasoning trajectories, indirectly measuring the reward model's effectiveness based on final outcomes." + ], + "bbox": [ + 173, + 227, + 825, + 285 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "3.2.2 Decoding Strategies", + "text_level": 1, + "bbox": [ + 171, + 301, + 364, + 316 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Decoding strategies are essential for controlling the inference process. Common approaches include Greedy Decoding, Beam Search, and Major@k. Both Greedy Decoding and Beam Search limit the sampling range to reduce randomness, guiding the model toward more consistent outputs. In contrast, Major@k identifies the most reliable solution by selecting the one with the highest consistency from a set of $k$ candidate solutions.", + "bbox": [ + 169, + 325, + 823, + 397 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "3.2.3 Benchmarks", + "text_level": 1, + "bbox": [ + 171, + 412, + 313, + 426 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In the realm of Benchmarks, the focus lies on assessing the reasoning capabilities of RLLMs across diverse domains. There are two primary categories: (1) Outcome Benchmarks, which focus on the holistic view of Long CoT reasoning, and (2) Process Benchmarks, which concentrate on the local view of the Long CoT process or individual capabilities.", + "bbox": [ + 169, + 436, + 823, + 494 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Outcome Benchmarks In the realm of Outcome Benchmarks, the first focus lies on evaluating the logical reasoning capabilities:", + "bbox": [ + 169, + 506, + 823, + 535 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Complex Mathematics: A central focus in complex mathematics is evaluating benchmarks like GSM8K [141] and MATH [253], which assess basic mathematical problem-solving abilities [1125, 1112]. Recent additions, such as AIME 2024 [8], AIME 2025 [571], MATH-500 [449], AMC 2023 [9], USAMO [598], OlympiadBench [239], and OlympiadArena [298], expand the evaluation of LLM performance in mathematics. Moreover, Putnam-AXIOM [224] and FrontierMath [210] introduce more complex problems that challenge future reasoning systems. Additionally, ThinkBench [291] and MATH-Perturb [288] focus on robust evaluation for Long CoT [38, 987].", + "- Complex Coding: Complex coding benchmarks are also vital, with competitions like Codeforces, SWEbench [327], CodeContests [427], and LiveCodeBench [309] evaluating LLM coding and problem-solving skills. Notable additions such as MHPP [148], ProBench [934], HumanEval Pro, MBPP Pro [993], and EquiBench [833] enhance the scope and complexity of coding challenges. Moreover, some studies have explored applying these benchmarks in real-world code development scenarios for automatic code generation and evaluation [243, 744].", + "- Commonsense Puzzle: Commonsense puzzle benchmarks, including LiveBench [850], BIG-Bench Hard [705] and ZebraLogic [450], assess models' ability to reason about commonsense situations. The ARC [131] and DRE-Bench [947] is often viewed as a challenging commonsense-based AGI test. JustLogic [87] further contributes to the evaluation of deductive reasoning and commonsense problem-solving. Moreover, Li et al. [382] introduce QuestBench, a benchmark designed to evaluate the ability of RLLMs to generate insightful and meaningful questions." + ], + "bbox": [ + 174, + 542, + 826, + 827 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The second focus area concerns Knowledge Benchmarks, essential for evaluating a model's capability in complex reasoning across various tasks for out of distribution evaluation [776]:", + "bbox": [ + 169, + 833, + 823, + 863 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "- Scientific Reasoning: Scientific Reasoning benchmarks, such as GPQA Diamond [637], MMLU-Pro [821], and SuperGPQA [165], assess multi-domain reasoning in fields like chemistry, biology, and physics [157]. These benchmarks test models' ability to not only accumulate knowledge", + "bbox": [ + 174, + 869, + 826, + 912 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 42, + 308, + 71 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "but also integrate it for problem-solving. Humanity's Last Exam (HLE) [602] further challenges models by requiring deep interdisciplinary reasoning across scientific disciplines. Further, Chung et al. [140] propose TPBench to evaluate the effectiveness of RLLMs in solving theoretical physics problems.", + "bbox": [ + 186, + 90, + 823, + 145 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "- Medical Reasoning: In the realm of Medical Reasoning, the need for complex, domain-specific, and accurate reasoning is paramount [1094, 1024, 905, 589]. Benchmarks, such as MedQA [328], JAMA Clinical Challenge [76], LLMEval-Med [1049] and Medbullets [76], simulate diagnostic and treatment decision-making processes, reflecting real-world medical practice. These benchmarks evaluate a model's handling of medical knowledge and reasoning, from diagnosis to treatment planning. Additionally, MedXpertQA [1150] introduces a comprehensive evaluation framework combining text and multimodal data, specifically assessing AI's reasoning capabilities in healthcare.", + "bbox": [ + 174, + 148, + 826, + 261 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "3.2.4 Process Evaluations", + "text_level": 1, + "bbox": [ + 171, + 277, + 366, + 292 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Deep Reasoning Benchmarks Recent progress in RLLMs underscores the need for specialized benchmarks to evaluate their deep reasoning abilities in Long CoT [375, 1133]. Notably, Lin et al. [450] introduces ZebraLogic, a framework for assessing logical reasoning, especially in complex non-monotonic scenarios. Similarly, BigGSM [90] and GSM-Ranges [670] focus on perturbing numerical values to test logical and arithmetic reasoning in edge cases beyond the models' training distribution. ROSCOE [212], ReCEval [606], DiVeRSe [425], HLV [71], and CoT-Kinetics [51] are designed to assess each step in the deep reasoning process during Long CoT tasks.", + "bbox": [ + 169, + 303, + 826, + 402 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Exploration Benchmarks Several studies assess RLLMs' exploration capabilities in Long CoT tasks. Specifically, Sys2Bench [583] evaluates the exploration and scaling abilities of RLLMs, emphasizing generalization across diverse tasks. BanditBench [566] extends this by testing model performance in interactive environments, offering insights into practical applications. Additionally, Heyman and Zylberberg [254] introduce a graph coloring problem to assess reasoning and spatial exploration in complex problem-solving scenarios.", + "bbox": [ + 169, + 414, + 826, + 501 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Reflection Benchmarks Reflection benchmarks measure RLLMs' ability to identify, reflect upon, and correct errors in Long CoT reasoning. These benchmarks fall into two categories: feedback and refinement. (1) Feedback Benchmark: These benchmarks assess the ability of LLMs to detect errors and respond to feedback for improvement. For example, Lambert et al. [367] introduces RewardBench to evaluate RLLMs' reward capabilities. This framework is extended by Multimodal RewardBench[960], and CodeCriticBench [1025] to include multimodal and code contexts, respectively. Benchmarks such as ProcessBench [1102], PRMBench [679], MR-Ben [1021], and DeltaBench [250] focus on error detection and correction across various tasks at the step level. Additionally, RealL Mistake [337] and JudgeBench [709] address more real-world error evaluation. (2) Refinement Benchmark: These benchmarks focus on error correction in complex tasks. CriticBench [456] assesses critique-correction capabilities, while MLDebugging [287], and ErrorRadar [922] specializes in coding or multimodal reasoning error detection and refinement. FinerReason [72] introduces a commonsense puzzle for broader feedback and refinement evaluations. Medec [1] adapts error correction to healthcare, addressing medical issues.", + "bbox": [ + 169, + 512, + 828, + 708 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "3.2.5 Advanced Evaluation", + "text_level": 1, + "bbox": [ + 171, + 720, + 375, + 734 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Agentic & Embodied Reasoning Agentic and Embodied reasoning requires models to demonstrate an understanding of real-world interactions, tool use, and adaptive reasoning in response to change. To assess real-world understanding, Wang et al. [798] introduce a benchmark that evaluates agents' ability to reason about physical concepts. Zhang et al. [1064] extend this by assessing agents' interactions with real-world physics. Additionally, realistic tasks often demand complex planning and tool usage, necessitating benchmarks to evaluate agent reasoning. These benchmarks assess agents' abilities to navigate and complete tasks in digital environments. Building on this, Huang et al. [283] propose a framework for evaluating decision-making in multi-agent, competitive settings. Nath et al. [562] introduce ToolComp, a benchmark designed to evaluate multi-step tool-use reasoning. To analyze adaptive reasoning in the face of real-world change, OSWorld [887], CogAgent [260], Mobile-Agent-E [828], WebShop [954], WebArena [1126], WGSR-Bench [972], and WebGames [735] assess AI systems across domains such as operating systems, mobile GUIs, browser tasks, and interactive", + "bbox": [ + 169, + 744, + 828, + 912 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 42, + 308, + 71 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "entertainment [1106, 780, 512, 552]. Hu et al. [272] present Text2World, which evaluates agents' ability to generate interactive environments from text to test agent adaptability [995].", + "bbox": [ + 169, + 90, + 826, + 122 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Multimodal Reasoning Multimodal reasoning refers to a system's ability to integrate and reason across diverse input types, including text, images [316]. This capability is crucial for solving complex problems that require information from diverse formats.", + "bbox": [ + 169, + 135, + 826, + 179 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Complex Mathematics: Mathematical reasoning often integrates both textual and visual components, such as equations, graphs, or diagrams [921]. Specifically, challenges like MathVista [508], MathVision [782], MathVerse [1054], M3CoT-Math [91], CMMaTH [433], EnigmaEval [763], CoMT-Geometry [125], and PGPS9K [1050] aim to advance multimodal reasoning in mathematics, improving the evaluation of multimodal Long CoT logic.", + "- Complex Code: The second area of focus involves code-related reasoning, where systems interpret textual descriptions and code snippets. Benchmarks like HumanEval-V [1035], Code-Vision [767], Plot2Code [852], and ChartMimic [931] evaluate systems' capabilities to generate or interpret code from natural language and multimodal inputs for assessing systems that integrate natural language processing with programming tasks.", + "- **Complex Science:** This area involves integrating scientific texts with related diagrams or experimental data. Benchmarks like ScienceQA [507], M3CoT-Science [91], BMMR [874], and ScienceBoard [698] evaluate how well models combine science information with Long CoT reasoning across various scientific domains [966]. Further, Guo et al. [229] propose MolPuzzle for the evaluation of molecular structure elucidation.", + "- Commonsense Puzzle: This area focuses on commonsense reasoning, where systems combine reasoning cues and images to make deeper conclusions. Chen et al. [91] introduce M3CoT-Commensense, which incorporates commonsense Long CoT reasoning for complex multimodal interactions. Further, PuzzleVQA [128], MMReason [953] and LEGO-Puzzles [711] focus more on abstract and spatial puzzle reasoning, respectively. Additionally, Wang et al. [760] propose two benchmarks: Clue-Visual Question Answering (CVQA), which tests visual comprehension through three task types, and Clue of Password-Visual Question Answering (CPVQA), which features two task types focusing on the interpretation and application of visual data." + ], + "bbox": [ + 174, + 185, + 826, + 513 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "AI for Research Recent advancements in AI have significantly advanced scientific research [94, 1124, 817, 215], with platforms like SciWorld [798] improving the research process. Simultaneously, Pricope [608] and Chan et al. [67] introduce a machine-learning platform to evaluate the potential of RLLMs in automating experiments. Several studies also examine RLLMs' ability to generate innovative research ideas. For instance, Si et al. [672] conduct evaluations with over 100 NLP researchers to assess RLLMs' creativity, revealing notable limitations [404, 856, 726]. Additionally, Li et al. [434] introduce SolutionBench, a benchmark for assessing systems' ability to generate feasible solutions for complex engineering problems.", + "bbox": [ + 169, + 530, + 828, + 643 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4 Deep Reasoning for Long CoT", + "text_level": 1, + "bbox": [ + 171, + 662, + 460, + 680 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Deep reasoning capabilities primarily require profound depth and comprehensiveness in cognitive and reasoning processes. In the absence of such capabilities, RLLMs suffer significant performance declines [758, 823]. Current methods for enhancing deep reasoning can be categorized into two main approaches: (1) Deep Reasoning Format ( $\\S$ 4.1), which involves utilizing various reasoning execution formats to maximize the reasoning step length $k$ within reasoning boundary $\\mathcal{B}_l$ in Equation (2), by selecting the most suitable reasoning format; and (2) Deep Reasoning Learning ( $\\S$ 4.2), which focuses on improving the model's internal capabilities to enhance its deep reasoning abilities, thereby extending the reasoning boundary $\\mathcal{B}_l$ in Equation (2) intrinsically.", + "bbox": [ + 169, + 696, + 826, + 808 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.1 Deep Reasoning Format", + "text_level": 1, + "bbox": [ + 171, + 828, + 382, + 843 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "As illustrated in Figure 5, deep reasoning formats can be categorized into three main types: natural language ( $\\S$ 4.1.1), structured language ( $\\S$ 4.1.2), and latent-space reasoning ( $\\S$ 4.1.3), the latter of which is further subdivided into token-, vector-, and manager-driven latent reasoning. The reasoning performance across these formats is presented in Table 1.", + "bbox": [ + 169, + 854, + 825, + 912 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 42, + 308, + 71 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "(a) Natural Language Deep Reasoning", + "text_level": 1, + "bbox": [ + 186, + 92, + 397, + 104 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "To predict the output of the given input for Conway's Game of Life, we need to apply the rules of the game to each cell on the board. The rules are as follows:", + "bbox": [ + 196, + 111, + 482, + 142 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Any live cell with fewer than two live neighbors dies (underpopulation)...", + "Given Input Board: ...", + "$\\spadesuit$ Step-by-Step Analysis: ...", + "$\\spadesuit$ Final Output: After applying the rules to each cell..." + ], + "bbox": [ + 197, + 143, + 482, + 193 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/ff164d8152d0e42a100061acaca7da8e5deb82846df7779c8c7d61fa44616288.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 441, + 164, + 488, + 204 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "(b) Structured Language Deep Reasoning", + "text_level": 1, + "bbox": [ + 506, + 92, + 738, + 106 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/5627b8fe0330637f914d05c2ea3b75f4df43c678ec6ae3f0e9b7da8f94f4f43f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 112, + 537, + 131 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "import necessary packages from collections import Cou", + "bbox": [ + 539, + 111, + 692, + 130 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "import necessary packages", + "bbox": [ + 511, + 131, + 669, + 140 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "from collections import Counter", + "bbox": [ + 511, + 141, + 689, + 148 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "all class and function definitions in the code", + "bbox": [ + 511, + 148, + 785, + 159 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "file, if any", + "bbox": [ + 511, + 159, + 584, + 167 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "class Solution(object):", + "bbox": [ + 511, + 167, + 645, + 176 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "defgameOfLifeInfinite(self, live): ctr = Counter((I, J) for i, j i", + "bbox": [ + 532, + 176, + 733, + 193 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/998c396b19ab3cb07b28b1eb72b14b5078de0675ec62ef33294a59363f34d6e2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 735, + 167, + 807, + 204 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "(c) Latent Space Deep Reasoning", + "text_level": 1, + "bbox": [ + 189, + 210, + 379, + 224 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/823ca26e30f5429ff0ae86df5e048ed2430dd9bdc62c2d874445fe64c1774d87.jpg", + "image_caption": [ + "Figure 5: Three main categories of deep reasoning formats: natural language, structured language, and latent-space reasoning (subdivided into token-, vector-, and manager-driven latent reasoning), with examples drawn from Li et al. [401]." + ], + "image_footnote": [], + "bbox": [ + 191, + 224, + 228, + 299 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/aa202666c0347e30f452afb50132bf46686ba98433600b7ee8d0e4c2f30ad8f5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 228, + 224, + 423, + 306 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/09f798353b1e4615f84c4d824a90ec1e55d3d23579c20ec2917c35a81ade4452.jpg", + "image_caption": [], + "image_footnote": [ + "Reasoning Vector Driven Latent Space Deep Reasoning" + ], + "bbox": [ + 423, + 208, + 622, + 289 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/3effed9f4c545b03b2ac2c365b4d87fe9724f58c4dddd4294def63e3d2f5672e.jpg", + "image_caption": [], + "image_footnote": [ + "Reasoning Manager Driven Latent Space Deep Reasoning" + ], + "bbox": [ + 622, + 208, + 812, + 290 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "4.1.1 Natural Language Deep Reasoning", + "text_level": 1, + "bbox": [ + 171, + 388, + 468, + 404 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Traditionally, researchers have sought to adapt natural language for intuitive and free-flowing deep reasoning [836, 1118, 303, 617, 1070, 765, 205]. Early work by Wei et al. [836] demonstrated that the use of natural language Long CoT significantly enhances the reasoning capabilities of RLLMs. Further, the Natural Program framework [460] allows RLLMs to engage in deeper natural language reasoning by ensuring a more structured and rigorous logical analysis. More recently, CodeI/O [401] has introduced a technique that reorganizes code-based reasoning patterns into natural language formats, further boosting the reasoning potential of RLLMs [36]. Similarly, Li et al. [387] propose CoRT, which integrates code into reasoning to facilitate a mixture of formats, resulting in improved cognitive performance.", + "bbox": [ + 169, + 412, + 826, + 539 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "4.1.2 Structured Language Deep Reasoning", + "text_level": 1, + "bbox": [ + 171, + 555, + 491, + 570 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Structured language deep reasoning encompasses various approaches designed to program [100, 464, 687, 591, 198, 845, 830, 1044] or symbolic language [605, 158, 451, 372, 933, 604, 37, 40, 797, 380] format for enhanced deep reasoning. In this context, most studies focus on utilizing code to better enhance the mathematical reasoning capabilities [389, 107, 978, 85]. Xu et al. [897] propose a neural-symbol self-training framework guided by the environment, addressing both the scarcity of symbolic data and the limitations of symbolic processing in LLMs. Additionally, Liao et al. [443] present SKIntern, which refines symbolic RLLMs through curriculum learning and linear attenuation, enabling the internalization of symbolic knowledge with fewer examples, reducing computational costs, and accelerating inference. Furthermore, Ranaldi et al. [634] introduce QuaSAR, a CoT variant that directs LLMs to operate at higher abstraction levels through quasi-symbolic reasoning, thus improving natural language reasoning and providing more precise structural representations.", + "bbox": [ + 169, + 579, + 826, + 733 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "4.1.3 Latent Space Deep Reasoning", + "text_level": 1, + "bbox": [ + 171, + 750, + 433, + 763 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Latent space deep reasoning encompasses techniques designed to enhance the reasoning abilities of LLMs by leveraging operations within continuous latent spaces [684, 151, 640, 324]. These approaches can be categorized into three main paradigms: (1) Reasoning Token-Driven Latent Space Deep Reasoning: Early work [810, 1013] introduce the concept of \"planning tokens\" or \"thought tokens\" to guide reasoning within latent spaces [949, 1008]. Further, Coconut [236] expands on this through the maintenance of multiple alternative reasoning paths, increasing both complexity and efficiency [1069, 706]. At the extreme, Heima [662] condenses the entire Long CoT process into a single token, yielding substantial computational savings. (2) Reasoning Vector Driven Latent Space Deep Reasoning: Building on the previous paradigm, LTM [356] conceptualizes the layers of LLMs as \"thought blocks\" and introduces the concept of \"thought vectors\" for each layer. This", + "bbox": [ + 169, + 772, + 826, + 912 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 42, + 225, + 69 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 64 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "REASONING GROUP", + "bbox": [ + 230, + 64, + 305, + 69 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/ebcdb7892865c413666c63573d7f974aac12588169f830423b0bd269bf85e3b2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelBase ModelGSM8kMATHGPQAOlympiadBenchLiveCodeBench
Latent Space Deep Reasoning
No-CoT [151]Mistral-7B [318]38.0----
SQ-VAE [810]Llama-2-7B [743]40.07.0---
RecurrentBlock-3.5B [204]-42.1----
ICoT-SI [151]Mistral-7B [318]51.0----
Natural Language Deep Reasoning
Self-Rewarding [114]Llama-2-7B [743]40.010.7---
Llama-3.1-8B [168]-56.720.3---
MetaMath [983]Llama-2-7B [743]66.5----
OVM [979]Llama-2-7B [743]73.7----
NuminaMath-7B-CoT [397]-75.455.2-19.9-
Qwen2-7B [925]-79.944.2-21.3-
Qwen2-Math-7B [927]-80.450.4-38.2-
Internlm2-math-plus-7B [974]-84.054.4-18.8-
OMI2 [401]Qwen2.5-Coder-7B [301]84.172.336.2-27.2
Llama-3.1-70B [168]-85.541.4---
CODEI/O++ [401]Qwen2.5-Coder-7B [301]85.772.140.6-29.1
CODEI/O [401]Qwen2.5-Coder-7B [301]86.471.943.3-28.5
WI [401]Qwen2.5-Coder-7B [301]87.071.439.1-26.0
WI (Full) [401]Qwen2.5-Coder-7B [301]87.071.142.9-27.6
OMI2 (Full) [401]Qwen2.5-Coder-7B [301]88.573.240.9-28.4
DeepSeekMath-7B-RL [658]-88.251.7-19.0-
Llama-3.1-405B [168]-89.053.8---
CoMAT [371]GPT-4 [3]93.7-40.4--
CoT [634]GPT-4 [3]94.5-41.850.2-
FCoT [523]GPT-4 [3]95.0----
Qwen2.5-Math-7B-Instruct [927]-95.283.6-41.6-
MathPrompter [303]GPT-4 [3]95.6----
Qwen2.5-Math-72B-Instruct [927]-95.985.9-49.0-
DeepSeek-R1-Distill-Qwen-7B [227]--92.8-49.137.6
DeepSeek-R1-Distill-Qwen-32B [227]--94.3-62.157.2
Structured Language Deep Reasoning
STaR [1012]Llama-2-7B [743]58.216.0---
ENVISIONS [897]Llama-2-7B [743]59.019.0---
MAmmoTH [1006]Code-Llama-7B [639]59.4----
MathCoder-CL [783]Code-Llama-7B [639]67.830.2---
ToRA-Code [217]Llama-2-7B [743]72.6----
Brain [107]Code-Llama-7B [639]74.0----
DeepSeek-Coder-7B [226]-77.444.4---
SIaM [978]Qwen-2-Math-Base81.550---
OC-SFT-1 [401]Qwen2.5-Coder-7B [301]86.770.937.7-27.5
PyEdu [401]Qwen2.5-Coder-7B [301]85.871.440.9-25.8
Qwen2.5-Math-7B-Instruct [927]-94.685.2-55.6-
Qwen2.5-Math-72B-Instruct [927]-95.888.1-60.6-
QuaSAR [634]GPT-4 [3]96.5-55.444.6-
MathDivide [687]GPT-4 [3]96.8---
", + "bbox": [ + 181, + 88, + 815, + 559 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 1: Performance of various deep reasoning formats, sorted primarily by GSM8K scores. “-” indicates that the paper did not report this score.", + "bbox": [ + 169, + 565, + 826, + 594 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "approach allows for the scaling of inference-time computations by implicitly performing reasoning within the latent space through recurrent depth. (3) Reasoning Manager Driven Latent Space Deep Reasoning: Inspired by these, Schone et al. [647], Geiping et al. [204], and Saunshi et al. [646] propose a mechanism similar to a continuous reasoning manager, which iteratively governs a trained \"recurrent block\" as a recurrent \"thought block\" [511]. This method integrates deeper model layers during reasoning, enhancing performance without needing specialized training data, and even outperforming larger RLLMs. Additionally, ITT [109] leverages the original transformer layer as a recurrent \"thought block\", selecting key tokens via adaptive token routing and controlling reasoning depth with residual thinking connections, enabling more efficient processing of critical tokens. Further, System-1.5 Reasoning [808] defines two dynamic shortcuts. The Model Depth Shortcut (DS) lets non-critical tokens exit early via lightweight adapter branches while routing critical tokens through deeper Transformer layers, thus supporting adaptive, vertical reasoning. The Step Shortcut (SS) reuses hidden states across decoding steps to bypass trivial iterations and enable horizontal reasoning in latent space.", + "bbox": [ + 169, + 619, + 826, + 815 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "4.2 Deep Reasoning Learning", + "text_level": 1, + "bbox": [ + 171, + 830, + 393, + 845 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Insufficient deep reasoning in RLLMs can significantly degrade performance [758, 823]. As a result, research has focused on improving reasoning through training. Supervised fine-tuning (SFT) [1058] stabilizes model outputs by serving as a memory process [883], while reinforcement learning (RL) enables generalization and self-learning [227, 137, 276, 898]. Recent studies for deep reasoning", + "bbox": [ + 169, + 854, + 826, + 912 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 42, + 225, + 69 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 65 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "REASONING GROUP", + "bbox": [ + 230, + 65, + 305, + 69 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/787c5674fba7b0ce5e4ca3ac3eefd20babe3c384dc807cab022b3df606b88f7a.jpg", + "image_caption": [ + "(a) Deep Reasoning Imitation" + ], + "image_footnote": [], + "bbox": [ + 187, + 112, + 483, + 188 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/04fef9422d7990eb4d902d3c902905109bd7fe0911512bee51a344a37488531e.jpg", + "image_caption": [ + "(b) Deep Reasoning Self-Learning", + "Figure 6: The different learning strategies of deep reasoning learning, including deep reasoning imitation of the data from advanced deep reasoning systems, like advanced RLLMs, MCTS, etc.; deep reasoning self-learning from preference-based RL by implicit reward." + ], + "image_footnote": [], + "bbox": [ + 496, + 112, + 810, + 186 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "learning have explored using SFT to imitate advanced reasoning in RLLMs and applying RL to enhance self-improvement in reasoning. As illustrated in Figure 6, this section outlines two key approaches to improve deep reasoning: (1) Deep Reasoning Imitation (§ 4.2.1), which involves learning reasoning from human-annotated or distilled data through SFT, and (2) Deep Reasoning Self-Learning (§ 4.2.2), where models improve reasoning through preference-based RL with implicit rewards. The performance of these methods is shown in Table 2.", + "bbox": [ + 169, + 272, + 823, + 354 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "4.2.1 Deep Reasoning Imitation", + "text_level": 1, + "bbox": [ + 171, + 371, + 406, + 386 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Deep reasoning in RLLMs can be effectively achieved by mimicking advanced reasoning systems, such as human reasoning [558, 61, 115, 403], advanced RLLMs [227, 58, 957, 370, 102], and scaling-augmented RLLMs [410, 1003, 596, 1136, 41]. This approach enables the model to learn complex reasoning patterns and generalize across tasks [937, 416]. Specifically, (1) Imitation from Human: Earlier, Cobbe et al. [141] first propose the deep reasoning imitation paradigm using human examples. ALT [558] improves RLLM reasoning by generating larger datasets of human-annotated logical templates, which fosters deeper reasoning [241]. To enhance diversity, EIT [61] promotes simpler human-generated plans, while LLMs contribute more nuanced reasoning, facilitating collaboration between human input and AI. (2) Imitation from Advanced RLLMs: A body of work utilizes zero-shot prompting to guide large teacher RLLMs in generating reasoning rationale, which is then used to fine-tune smaller RLLMs, marking the beginning of deep reasoning imitation [256, 352, 938, 521]. Additionally, AceMath [500] applies few-shot prompting to distill Long CoT samples from advanced LLMs, followed by multi-stage quality-guided SFT to enhance performance. Chen et al. [107] separate the data synthesis process into planning and reasoning stages, thereby improving reasoning quality. DART-Math [738] effectively distills complex queries requiring deeper reasoning during synthesis, advancing deep reasoning capabilities. Further, Ahmad et al. [7] propose OpenCodeReasoning, expanding this paradigm to the code scenarios. (3) Imitation from Scaling-augmented RLLMs: Earlier, Bansal et al. [34] enhance data quality by scaling the sampling size and length, boosting imitation performance [481, 1005]. Yang et al. [927] and Zhao et al. [1090] further improve data quality by scaling sampling and selecting samples through sample feature or an additional reward model. Additionally, Li et al. [410] identify optimal deep reasoning paths through MCTS, advancing imitation effectiveness.", + "bbox": [ + 169, + 393, + 826, + 699 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Recent studies [299, 550] show that distilling knowledge from advanced RLLM APIs like O1 [307] and R1 [227] significantly enhances the performance of smaller LLMs [424, 223]. This method, employing supervised fine-tuning, boosts model performance on complex mathematical reasoning tasks, sometimes surpassing the teacher models' performance. Building on these findings, LIMO [967], S1 [560], and RedStar [902] argue that a large number of imitation samples is unnecessary. They demonstrate that even a minimal set of samples can activate deep reasoning capabilities in foundational LLMs. For practical applications, Turtel et al. [747] showcase how these techniques can predict future events beyond a model's knowledge cutoff. Sun et al. [701], Yang et al. [928] and Zhao et al. [1093] further enhance deep reasoning imitation by selecting high-quality samples from large datasets, thereby improving the quality of the imitation data.", + "bbox": [ + 169, + 704, + 826, + 844 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "4.2.2 Deep Reasoning Self-Learning", + "text_level": 1, + "bbox": [ + 171, + 859, + 437, + 875 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "While simple imitation can yield strong performance, current models still rely heavily on human annotations or outputs from more advanced models for both imitation and distillation [502]. To", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 41, + 225, + 70 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 307, + 59 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 230, + 59, + 305, + 68 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/0a51aaca5f29b7a1155025d64a0e6be21201b8129c276ddc53c0a7fa47545014.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelData SizeBase ModelGSM8KMATHMATH-500AIME2024GPQAOlympiadBench
Deep Reasoning Imitation
SFT [938]200KLlama-3.1-8B [168]---54.13.5-
Retro-Enh [115]14MLlama-3-8B [168]45.121.7----
Query-Exp [115]24MLlama-3-8B [168]51.323.1----
Res-Div [115]14MLlama-3-8B [168]53.023.2----
MetaMath [738]0.40MMistral-7B [318]76.529.8---5.9
ALT-FLDx2 [558]100KLlama-3.1-70B [168]83.324.4----
EIT [61]15KLlama-2-70B [743]84.132.5----
MathScale [738]2.0MMistral-7B [318]74.835.2----
Tutor-Amp [115]11MLlama-3-8B [168]64.435.9----
MMIQC [738]2.3MMistral-7B [318]75.437.4---9.4
VRT [738]0.59MMistral-7B [318]82.338.7---8.7
KPMath-Plus [738]1.6MMistral-7B [318]82.146.8----
Llama-2-70B-Xwin-Math-V1.1 [385]1.4MLlama-2-70B [743]90.252.5---16.3
DART-Math-Mistral-7B [738]591KMistral-7B [318]81.145.5---14.7
DART-Math-Llama-3-70B [738]591KLlama-3-70B [168]89.656.1---20.0
Rejection Sampling [410]197KQwen2.5-7B [926]87.170.0-10.0-27.1
Evol-Instruct-7B [514]905KQwen2.5-Math-7B [927]88.5-77.416.7--
FastMCTS [410]288KQwen2.5-7B [926]88.974.0-20.0-27.5
KPDDS-7B [295]800KQwen2.5-Math-7B [927]89.9-76.010.0--
DeepSeek-R1-Distill-Qwen-7B [227]800KQwen2.5-7B-Instruct [926]91.7-91.643.3--
Openmathinstruct-7B [740]14MQwen2.5-Math-7B [927]92.0-79.610.0--
NuminaMath [967]100KQwen2.5-Math-7B [927]92.9-81.820.0--
PromptCoT-DS-7B [1090]115KDeepSeek-R1-Distill-Qwen-7B [227]92.6-93.060.0--
PromptCoT-Qwen-7B [1090]905KQwen2.5-Math-7B [927]93.3-84.026.7--
AceMath-7B-Instruct [500]1.2MQwen2-Math-7B-Instruct [927]93.783.1---42.2
AceMath-72B-Instruct [500]1.2MQwen2.5-Math-72B-Instruct [927]96.486.1---48.4
NuminaMath [967]100KQwen2.5-32B-Instruct [926]--59.26.525.836.7
OpenThoughts [967]114KQwen2.5-32B-Instruct [926]--80.650.242.956.3
Sky-T1-32B-Preview [724]17KQwen2.5-32B-Instruct [926]--82.443.356.8-
Journey Learning [299]5KQwen2.5-Math-72B [927]--87.243.3--
STILL-2 [550]3.9KQwen2.5-32B-Instruct [926]--90.246.755.1-
Bespoke-32B [362]17KQwen2.5-32B-Instruct [926]--93.063.358.1-
s1 [560]1KQwen2.5-32B-Instruct [926]--93.056.759.6-
DeepSeek-R1-Distill-Qwen-32B [227]800KQwen2.5-32B-Instruct [926]--94.372.662.1-
LIMO [967]817Qwen2.5-32B-Instruct [926]--94.815.866.766.8
Deep Reasoning Self-Learning
DPO [302]40KDeepSeek-Math-7B-Base [658]74.834.9----
RefT [302]40KDeepSeek-Math-7B-Base [658]71.436.0----
Self-Explore [302]40KDeepSeek-Math-7B-Base [658]78.637.7----
SimPO [723]10KQwen2.5-Math-7B-Instruct [927]88.840.056.6---
DPO [446]11KDeepSeek-Math-7B-Instruct [658]-48.7----
TPO [446]11KDeepSeek-Math-7B-Instruct [658]-51.3----
DPO [446]11KQwen2-7B-Instruct [925]-54.3----
TPO [446]11KQwen2-7B-Instruct [925]-55.5----
MCTS [74]15KDeepSeek-Math-7B-Base [658]83.264.0----
SBS [74]15KDeepSeek-Math-7B-Base [658]84.166.3----
FastMCTS+Branch-DPO [410]152KFastMCTS-7B [410]89.975.4-20.0-29.6
", + "bbox": [ + 178, + 90, + 818, + 481 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 2: Performance of various deep reasoning learning methods, sorted primarily by Math or Math-500 scores. “-” indicates that the paper did not report this score.", + "bbox": [ + 169, + 488, + 823, + 517 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "address this limitation, recent research has focused on enabling more advanced reasoning through techniques like self-play and self-learning [948, 1077, 409, 624]. Specifically, self-learning methods can be classified into two paradigms, differentiated by their sampling strategies:", + "bbox": [ + 169, + 599, + 823, + 643 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "(1) Self-Learning from Direct Sampling: The earliest method, STaR [1012], utilizes In-Context Learning (ICL) to sample deep reasoning results [657] and uses the correctness of the final answer as an implicit reward for self-learning [258, 581, 582, 1059, 826, 462]. Further, ReST [225] extends this by introducing a Grow-Improve paradigm, where self-generated reasoning is first annotated with rewards and then enhanced via offline RL algorithms. However, these approaches can be fragile, especially when the reward process lacks robustness. Inspired by the Expectation-Maximization (EM) algorithm, Singh et al. [674] propose a method that generates rewards and iteratively optimizes LLMs to achieve the best performance on a validation set, significantly improving robustness. To further strengthen the reward process, a series of work introduces a method to adapt incorrect solutions, training a verifier [155, 262] or utilize entropy [809, 1040] to select or refine the reward process and improve self-learning quality. (2) Self-Learning from Tree Search: Early deep learning methods, such as EXIT [18], combined MCTS with deep neural networks for reinforcement learning, iteratively self-training the network to guide the tree search and enhance reasoning. Building on this, CPO [1065] and TPO [446] align each step of Long CoT reasoning with the corresponding tree search path, using Tree of Thoughts (ToT) [955] preference information to support deeper reasoning [951, 302]. Li [422] propose Policy-Guided Tree Search (PGTS), integrating RL with structured tree exploration for more efficient navigation of reasoning paths. Further developments, such as AlphaMath [74], AlphaLLM-CPL [814], and TongGeometry [1029], refine MCTS behavior through stepwise trajectory pair extraction and curriculum preference learning, boosting LLM reasoning abilities [611, 412, 872].", + "bbox": [ + 169, + 648, + 826, + 912 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 42, + 225, + 69 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 230, + 59, + 305, + 68 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Takeaways: Imitation & Self-Learning", + "text_level": 1, + "bbox": [ + 204, + 90, + 460, + 107 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Imitating deep reasoning from advanced RLLMs, and scaling-augmented methods like MCTS can help models learn complex reasoning patterns with fewer samples.", + "- Self-learning techniques, including reinforcement learning and tree search, allow RLLMs to enhance their reasoning abilities over time.", + "- The combination of imitation from advanced RLLMs and self-learning techniques strengthens RLLM reasoning, leading to strong performance on complex tasks." + ], + "bbox": [ + 200, + 114, + 799, + 205 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "5 Feasible Reflection for Long CoT", + "text_level": 1, + "bbox": [ + 171, + 234, + 480, + 252 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Feasible Reflection is a pivotal component of Long CoT reasoning, enabling LLMs to handle complex tasks through iterative feedback and refinement [406, 192]. Specifically, it comprises two primary stages: (1) Feedback ( $\\S$ 5.1), which generates feedback signals $\\mathcal{F}_i$ to correct node $n_j$ in Equation (5); and (2) Refinement ( $\\S$ 5.2), which adjusts the subsequent node $n_{i+1}$ according to the feedback in Equation (6).", + "bbox": [ + 169, + 265, + 823, + 335 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "5.1 Feedback", + "text_level": 1, + "bbox": [ + 171, + 351, + 279, + 364 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Feedback refers to the process of providing evaluations of both overall outputs and the processes that lead to them, with the goal of assessing their accuracy and quality [394, 396, 838, 220, 862]. This process, also referred to as critique or verification, can be executed using either natural language or structured data formats, which serve as the foundation for tree-search methods [113]. Specifically, as shown in Figure 7, feedback can be categorized into three distinct types: (1) Overall Feedback ( $\\S$ 5.1.1); (2) Process Feedback ( $\\S$ 5.1.2); (3) Hybrid Feedback ( $\\S$ 5.1.3).", + "bbox": [ + 169, + 377, + 823, + 460 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "5.1.1 Overall Feedback", + "text_level": 1, + "bbox": [ + 171, + 474, + 346, + 488 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The overall feedback focuses on providing a global view of the entire process and results, rather than assessing each step individually. This feedback significantly enhances reasoning skills and reward modeling in reinforcement learning for RLLMs. Specifically, as shown in Figure 7 (a), the overall feedback can be categorized into three main sources: Outcome Reward Model, Rule Extraction, and RLLMs Feedback. The performance across these categories is summarized in Table 3.", + "bbox": [ + 169, + 498, + 823, + 568 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/924dba4b1d5c6d25f0eff62713bafcbf9c36e9cd21483aae275897e288afdd77.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 253, + 589, + 415, + 703 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/367cca6990189dfda7e049a2d562809a0e9869ca5351f1d2d1d1e74c0f9bcafd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 423, + 593, + 578, + 703 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/dd4b7e43b794582020a033da732daf0b1be53e45111b8e9717414d483b50896e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 581, + 593, + 738, + 703 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/6914fc78c8aeece2af825dabacd242f08c842b612001c13322264246623afb04.jpg", + "image_caption": [ + "Figure 7: The feedback capabilities framework for feasible reflection consists of Overall Feedback and Process Feedback. Overall Feedback includes the Outcome Reward Model (ORM) in a value format, rule extraction for correctness judgment, and overall RLLMs based on RLLMs. Process Feedback includes the Process Reward Model (PRM) in a value format and step-level RLLMs, also based on RLLMs." + ], + "image_footnote": [], + "bbox": [ + 254, + 705, + 431, + 829 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/75c5bea65e3eccbc79affd34b429b7f444436c52e5988975f4dec0ecb68328a3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 446, + 707, + 733, + 829 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 41, + 225, + 70 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 307, + 59 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 65 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "REASONING GROUP", + "bbox": [ + 230, + 65, + 300, + 69 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/67120b715e01e4e0a5691bdc5abb971e8ebe54977751e351a5a6b2de0ae0cf33.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelBase ModelChatChat_HardSafetyReasoningOverall
RLLMs
GPT-4o-mini [3]-95.060.780.883.780.1
Llama3.1-70B-Instruct [168]-97.270.286.082.884.0
Llama3.1-405B-Instruct [168]-97.274.687.177.684.1
GPT-4 [3]-95.374.386.987.686.0
GPT-4o [3]-96.176.186.688.186.7
Gemini-1.5-pro [719]-92.380.687.992.088.2
Self-taught Evaluator [803]Llama-3.1-70B-Instruct [168]96.684.281.091.588.3
SFR-LLMA-3.1-8B-Judge [791]Llama-3.1-70B-Instruct [168]95.577.786.295.188.7
SFR-NeMo-12B-Judge [791]Mistral-NeMo-Instruct-12B [725]97.282.286.595.190.3
SFR-LLMA-3.1-70B-Judge [791]Llama-3.1-70B-Instruct [168]96.984.891.697.692.7
Skywork-Critic-Llama-3.1-70B [791]Llama-3.1-70B-Instruct [168]96.687.993.195.593.3
LMUnit [641]Llama-3.1-70B-Instruct [168]----93.4
EvalPlanner [643]Llama-3.1-70B-Instruct [168]97.589.493.095.593.9
Outcome Reward Models
tulu-v2.5-13b-uf-rm [306]TULU-2-13B [305]39.442.355.547.446.1
Prometheus-2-7B [353]Mistral-7B-Instruct-v0.2 [318]85.549.177.176.572.0
Prometheus-8x7b-v2 [353]Mixtral-8x7B-Instruct [319]93.047.180.577.474.5
Critic-RM-Rank [991]Llama-3.1-70B-Instruct [168]97.058.084.092.082.8
RM [689]Llama-3.1-70B-Instruct [168]98.374.583.888.086.4
SynRM [968]Llama-3.1-70B-Instruct [168]97.576.886.388.587.3
CLoud [17]Llama-3-70B-Instruct [168]98.075.687.689.087.6
FLAME-RM-24B [753]PaLM-2-24B [16]92.275.789.693.887.8
SteerLM-RM 70B [829]Llama-2-70B-chat [743]91.380.390.692.888.8
Llama-3-OffsetBias-RM-8B [585]Llama-3-8B-Instruct [168]97.281.886.891.989.4
InternLM-20B-Reward [62]InternLM2-8B-Instruct [62]98.976.589.995.890.2
ArmoRM-Llama3-8B-v0.1 [771]Llama-3-8B-Instruct [168]96.976.892.297.390.8
Nemotron-4-340B-Reward [829]Nemotron-4-340B [4]95.887.192.293.692.2
Skywork-Reward-Llama-3.1-8B [466]Llama-3.1-70B-Instruct [168]95.887.390.696.292.5
Skywork-Reward-Gemma-2-27B [466]Gemma-2-27B-it [720]95.891.492.096.193.8
", + "bbox": [ + 197, + 87, + 799, + 393 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 3: Performance of various overall feedback methods, sorted primarily by Overall scores in RewardBench [367]. “-” indicates that the paper did not report this score.", + "bbox": [ + 169, + 400, + 823, + 429 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Overall Feedback from Outcome Reward Model Since many tasks cannot be directly evaluated using accuracy or other standard metrics, research has increasingly focused on Outcome Reward Models (ORM), which provide value-based rewards for more general and quantifiable feedback [1127, 986, 467]. In 2021, OpenAI [141] has proposed a \"Gen-Verifier\" paradigm, which uses a specialized ORM to evaluate the accuracy of generated rationales, showing significant progress in feedback capabilities [658]. Ji et al. [315] introduce a trained knowledge scorer to analyze hallucinations in the reasoning process, providing feedback to RLLMs and improving the accuracy of their outputs over time. Moreover, Generative Reward Models [1048] use next-token prediction for overall feedback, which seamlessly integrates with instruction adjustments, leveraging inference-time calculations to improve ORM feedback.", + "bbox": [ + 169, + 465, + 826, + 604 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "However, specifically trained ORMs are often costly and not sufficiently robust. Building on this, Self-Rewarding Language Models (SRLMs) [1129] incorporate a self-consistency framework, optimizing feedback to improve model alignment and consistency [1047]. Yu et al. [991] introduce Critic-RM, combining RLLM-generated natural language criticism with corresponding feedback. This method filters high-quality feedback while jointly fine-tuning reward prediction and criticism generation, optimizing ORM performance.", + "bbox": [ + 169, + 611, + 828, + 696 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Overall Feedback from Rule Extraction Although ORM has achieved significant improvements, its accuracy still falls short of $100\\%$ , preventing it from outperforming rule-based answer correction feedback [955, 234, 1079]. Previous studies, such as STaR [1012], ReST [225], and ReFT [745], have demonstrated that feedback based on final answer rewards is more effective than both PRM and ORM in mathematical scenarios [197]. Furthermore, Guo et al. [227] and Xie et al. [886] introduce a multi-stage RL framework that incorporates rule-based rewards, significantly enhancing both output accuracy and length while mitigating reward hacking through simple yet robust rules [30], such as format validation and result verification. In coding scenarios where direct rule-based feedback is difficult, OpenCodeInterpreter [1108], AceCoder [1014], O1-Coder [1076], and VerMCTS [56] address this challenge by implementing an automated test-case synthesis pipeline, deriving rewards based on program performance [564, 216, 1115]. Additionally, Ma et al. [536] propose an automated approach to training a test case generator, which alleviates the scarcity of test cases and demonstrates that increasing the number of test cases correlates with improved reward quality. Moreover, Ma et al. [535] decompose problem-solving into structured coding subtasks: file localization, function", + "bbox": [ + 169, + 717, + 826, + 912 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 42, + 225, + 69 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 230, + 59, + 305, + 68 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "localization, line localization, and code editing generation, and applies multi-viewed rule-based rewards.", + "bbox": [ + 169, + 90, + 823, + 119 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Overall Feedback from RLLMs Research on feedback from RLLMs centers on detecting errors and biases through natural language feedback, also known as LLM-as-Judge, self-reflection or self-critique [274, 336, 29, 638, 549, 802, 1002, 895, 529]. This method has led to significant improvements across various tasks, particularly in self-correction [848, 1109, 206, 184, 1075]. Huang et al. [286] contend that traditional LLMs struggle to generate effective feedback without external signals, requiring the development of RLLMs with enhanced feedback capabilities [645, 398]. As a result, many studies leverage RLLMs' error-identification strengths, often stemming from their pretraining phase, to improve feedback generation and correction [965, 39, 40, 282].", + "bbox": [ + 169, + 133, + 826, + 247 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Earlier, McAleese et al. [544] found that training RLLMs to learn self-critique and deep reasoning can further boost performance. Zhang et al. [1062] propose a self-contrast mechanism that compares multiple perspectives, identifies differences, and summarizes insights to resolve inconsistencies. However, these methods often offer task-independent feedback. To address this, Hao et al. [235] introduce AutoRace, which tailors evaluation criteria for specific tasks. The Reversal of Thought (RoT) framework [999] introduces a novel paradigm combining reverse reasoning with self-reflection, helping models identify the limits of their knowledge and enhance reasoning efficiency. Furthermore, ACR [1116] implements a scoring system for coding tasks, using LLM-as-a-Judge for quality assessment and LLM-as-a-Critic for critiquing low-quality code, improving consistency across benchmarks. Zheng et al. [1107] integrate code execution error data and feedback from RLLMs to improve code generation performance. Liu et al. [484] present AGSER, a method using attention-guided self-reflection to address hallucinations by splitting input queries into attentive and nonattentive components. Finally, Saha et al. [643] introduce EvalPlanner, which separates feedback into planning and reasoning components for more streamlined expression using existing RLLMs. More comprehensively, Hu et al. [274] outline the complete pipeline, key insights, and practical lessons for training RLLMs to function as judges.", + "bbox": [ + 169, + 250, + 826, + 474 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "5.1.2 Process Feedback", + "text_level": 1, + "bbox": [ + 171, + 484, + 349, + 500 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Techniques combine process feedback with MCTS or RL rewards to provide automated, step-by-step guidance, reducing the need for labor-intensive annotations while enhancing reasoning capabilities [749, 344]. These techniques can be categorized into two main types based on the source of feedback: process reward models (PRMs) and prompted LLMs. The performance comparison are mainly shown in Table 4.", + "bbox": [ + 169, + 510, + 826, + 582 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Process Feedback from Process Rewarded Model Recent studies highlight the significance of feedback in developing effective PRMs for complex reasoning tasks, particularly in a step-level view [134, 423, 528]. (1) Process Annotated PRM Training: Earlier, Lightman et al. [449] demonstrate that training process feedback with human-annotated data (PRM800K) surpasses outcome supervision in creating reliable reward models. However, this approach requires significant human effort. To address this, Wang et al. [792] introduce Math-Shepherd, a dataset that generates step-by-step supervision using a Tree Search-inspired method [73, 1001]. Following this, methods like QwQ [731], Skywork-o1 [570], AceMath [500], and PRIME [143] adopt similar techniques to enhance PRM performance. Additionally, Zhang et al. [1036] propose entropy regularization to improve model convergence. Rather than focusing solely on the first error step, Full-Step-DPO [903] assigns rewards for the entire reasoning chain, including error steps. VersaPRM [1015] extends PRMs across multiple domains, broadening their applicability. Similarly, Gu et al. [219] and Zhang et al. [1074] suggest training models with student preferences aligned to teacher preferences, ensuring effective preference distillation. Further, Wang et al. [807] propose VisualPRM400K and expand this paradigm to multimodal scenarios. (2) Outcome Annotated PRM Training: Alternative approaches, such as ReST-MCTS* [1032], OVM [979], Implicit PRM [1000], AutoPSV [506], and DVO [1038], leverage outcome supervision or implicit feedback to train PRMs, reducing the need for extensive human-annotated data [891, 643]. UAS [981] incorporates uncertainty-aware value models [275] into feedback predictions [495, 167, 945, 1089]. Additionally, Aurora [710] utilizes ensemble prompting strategies and reference answers for reverse verification, training stronger PRMs that better align with the Long CoT data distribution. Furthermore, PAV [651] suggests that rewards should reflect reasoning progress, as measured by changes in the likelihood of producing a correct future response before and after each step. Yang et al. [932], Lee et al. [376], Yoon et al. [975] extend these paradigms", + "bbox": [ + 169, + 593, + 828, + 912 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 42, + 308, + 71 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 935, + 509, + 948 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/f0a50a247a0dd2634591eb3435973b764f754ecd980e41927ea8e8c53cf3b966.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ProcessBenchPRMBench
GSM8KMATHOlympiadBenchOmniMATHSimplicitySoundnessSensitivity
Process Reward Models
Qwen2.5-Math-7B-PRM [1102]Qwen2.5-Math-7B [927]39.452.239.433.1---
Math-Shepherd-PRM-7B [792]Mistral-7B [318]47.929.524.823.847.145.760.7
RLHFlow-PRM-Mistral-8B [156]Mistral-7B [318]50.433.413.815.846.757.568.5
RLHFlow-PRM-DeepSeek-8B [156]DeepSeek-7B [52]38.833.816.916.947.657.568.1
Skywork-PRM-1.5B [466]Qwen2.5-Math-1.5B-Instruct [926]59.048.019.319.233.628.648.8
Skywork-PRM-7B [466]Qwen2.5-Math-7B-Instruct [926]70.853.622.921.038.432.754.3
Qwen2-1.5B-PRM800k [700]Qwen2-Math-1.5B-Instruct [927]34.055.334.241.0---
Qwen2-1.5B-Math-Shepherd [700]Qwen2-Math-1.5B-Instruct [927]48.934.19.813.7---
Qwen2-1.5B-Epic50k [700]Qwen2-Math-1.5B-Instruct [927]55.636.120.230.0---
Qwen2.5-Math-7B-PRM800KQwen2.5-Math-7B-Instruct [927]68.262.650.744.3---
Qwen2.5-Math-PRM-7B [1102]Qwen2.5-Math-7B-Instruct [927]82.477.667.566.3---
Universal-PRM-7B [710]Qwen2.5-Math-7B-Instruct [927]85.877.767.666.4---
Critic Model
Llama-3.1-8B-Instruct [168]-27.526.718.519.2---
GPT-4o [3]-61.953.948.344.659.770.975.8
QwQ-32B-Preview [731]Qwen2.5-32B-Instruct [926]62.352.746.243.9---
DeepSeek-R1-Distill-Qwen-14B [227]Qwen2.5-14B-Instruct [926]67.338.829.932.1---
Dyve-14B [1111]DeepSeek-R1-Distill-Qwen-14B [227]68.558.349.047.2---
Qwen2.5-72B-Instruct [926]-76.261.854.652.2---
SCRIT [713]Qwen2.5-72B-Instruct [926]80.260.032.527.8---
ol-mini [307]-93.288.987.282.464.672.175.5
LLemma-PRM800k-7B [679]LLemma-7B [26]----51.450.966.0
LLemma-MetaMath-7B [679]LLemma-7B [26]----50.349.066.0
LLemma-oprn-7B [679]LLemma-7B [26]----49.049.864.1
MATHMinos-Mistral-7B [195]Mistral-7B [318]----51.454.466.5
ReasonEval-7B [877]LLemma-7B [26]----55.563.971.0
ReasonEval-34B [877]LLemma-34B [26]----51.563.073.1
Gemini-2.0-flash-exp [679]-----62.767.375.4
Gemini-2.0-thinking-exp-1219 [679]-----66.271.875.3
", + "bbox": [ + 173, + 85, + 823, + 351 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Table 4: Performance of various process feedback methods on ProcessBench [1102] and PRM-Bench [679]. “-” indicates that the paper did not report this score.", + "bbox": [ + 171, + 359, + 826, + 388 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "to the token level. Moreover, Chen et al. [110] expand these into interactive agent scenarios, allowing for automatically learning reward models from the environment without additional manual annotation. Wang et al. [832] equip a dual-layer MLP module to evaluate the reward at each step, successfully integrating the policy model and PRM into a unified interface without additional process annotations, reducing over $99\\%$ of PRM parameters for efficient reasoning.", + "bbox": [ + 169, + 415, + 826, + 486 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Process Feedback from RLLMs As PRM training remains heavily dependent on manually annotated data, recent research has explored methods for enabling models to generate their natural language feedback to optimize performance [910]. These approaches fall into two primary categories: (1) Model-Driven Feedback Reasoning: Earlier work such as React [956] and Reflexion [669] enhances RLLMs with natural language feedback at each action and reasoning step [196, 135, 89], improving decision-making in diverse tasks. Similarly, Step-DPO [365] uses RLLM to self-verify step-level positive and negative pairs for training through the DPO paradigm, achieving strong performance. Additionally, Sun et al. [702] propose a dynamic error classification framework that adapts based on model outputs, improving performance in mathematical reasoning tasks by addressing specific error patterns in math word problems. Furthermore, Xie et al. [889] and He et al. [245] iteratively apply MCTS to collect preference data, utilizing its forward-looking capabilities to decompose instance-level rewards into more precise step-level signals, thereby enhancing feedback accuracy. However, step-wise feedback often suffers from reliability issues, which can be mitigated by uncertainty quantification [973, 969], improving the reliability of step-wise verification in reward models for mathematical reasoning tasks. Moreover, Fu et al. [187] define the CoT Average Causal Effect (CACE) to capture causal relationships between steps, resulting in a causalized Long CoT where all steps are both correct and comprehensible. (2) Environment-Driven Feedback Reasoning: Given the increasing complexity of large models, there is growing interest in combining prompt-based LLMs with external environments to generate more interpretable and controllable feedback [885, 271]. For example, ORPS [996] and Drori et al. [162] minimize dependence on human annotations by using execution feedback, enabling models to autonomously refine their solutions. Additionally, Shrestha et al. [670] contribute by translating model outputs into Python code, helping to identify logical errors, gain insights into flawed reasoning processes, and guide improvements in mathematical reasoning. Xu et al. [897] integrate reasoning models with an interactive environment, enabling learning in more dynamic scenarios and creating a more generalizable self-learning framework.", + "bbox": [ + 169, + 500, + 826, + 845 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "5.1.3 Hybrid Feedbacks", + "text_level": 1, + "bbox": [ + 171, + 859, + 354, + 875 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Given the respective advantages and limitations of Overall Feedback and Process Feedback, recent studies have sought to combine both for optimal feedback. Specifically, Zhang et al. [1078] propose", + "bbox": [ + 171, + 883, + 823, + 912 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 42, + 225, + 69 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 64 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "REASONING SONG", + "bbox": [ + 233, + 66, + 299, + 71 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "a consensus filtering mechanism that integrates Monte Carlo estimation with an LLM-as-judge to enhance both overall and stepwise feedback, thus improving reasoning accuracy. In a similar vein, Lin et al. [454] introduce Step-KTO, a framework combining stepwise process-level and outcome-level binary feedback, using PRM and ORM to guide language models toward coherent reasoning, with a focus on error correction through reflection mechanisms.", + "bbox": [ + 169, + 90, + 826, + 161 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Takeaways: Feedback", + "text_level": 1, + "bbox": [ + 205, + 170, + 352, + 185 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Evolving Feedback Models: Feedback mechanisms, including overall, process, and hybrid feedback, are crucial for improving the reasoning capabilities of RLLMs.", + "- Innovative Approaches in Process Feedback: Process feedback using techniques like PRMs with MCTS enhances Long CoT, though challenges like reward hacking remain.", + "- Self-Reflection and Model-Driven Feedback: Self-reflection and model-driven feedback improve RLLM performance by enabling error detection, task-specific insights, and more autonomous learning." + ], + "bbox": [ + 199, + 193, + 797, + 299 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "5.2 Refinement", + "text_level": 1, + "bbox": [ + 171, + 324, + 292, + 337 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Refinement refers to the process of addressing errors in reasoning based on prior feedback. As shown in Figure 8, refinement methods can be grouped into three primary categories: prompt-based refinement generation (§ 5.2.1), SFT-based refinement imitation (§ 5.2.2), and RL-based refinement learning (§ 5.2.3).", + "bbox": [ + 169, + 345, + 823, + 402 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "5.2.1 Prompt-based Refinement Generation", + "text_level": 1, + "bbox": [ + 171, + 416, + 491, + 431 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Research on prompt-based refine generation focuses on enhancing the performance of LLMs through iterative self-refinement mechanisms [578, 1091, 98, 469, 1028, 754, 818, 546]. A prominent approach involves prompting RLLMs to generate initial outputs, followed by self-feedback that iteratively refines and improves performance across tasks such as dialogue generation and mathematical reasoning [645, 539, 1101, 669, 549, 345, 750, 482], which even much reduce the hallucinations [289, 315]. Noteworthy methods, like Self-Backtracking [944], Refiner [590], and BackMath [1055], allow LLMs to adjust their reasoning autonomously, reducing unnecessary complexity in decision-making [868]. Further, Havrilla et al. [238] extend the paradigm by integrating overall-level and step-level refinements, improving refinement performance. Yang et al. [950] propose a method to decompose the self-correction capability of LLMs into \"confidence\" and \"critique\" capacities, designing probabilistic metrics to evaluate them and exploring the role of reflection mechanisms in model behavior. Additionally, MCTSr [1033], LLM2 [930], ReST-MCTS* [1032] and ReARTeR [703] emphasize dynamic reflection through iterative error correction and confidence adjustments, allowing models to autonomously refine reasoning strategies [186]. He et al. [240]", + "bbox": [ + 169, + 440, + 826, + 635 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/b3686b17aa6dae7dfb30b34c5e285af765d180305957e5c15bbbeed64d436326.jpg", + "image_caption": [ + "(a) Prompt-based Refinement Generation" + ], + "image_footnote": [], + "bbox": [ + 183, + 679, + 468, + 776 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/6b9af6579bd26e04c798016e01125ccc0cc0c837723baed594fe92c9e6c31804.jpg", + "image_caption": [ + "(b) SFT-based Refinement Imitation" + ], + "image_footnote": [], + "bbox": [ + 496, + 681, + 807, + 776 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/75779ea3409037b107f99cc61b0546a161e6d6863edc845e12464cd3a1541651.jpg", + "image_caption": [ + "Figure 8: The three main categories of refinement methods, including Prompt-based Refinement Generation, SFT-based Refinement Imitation, and RL-based Refinement Learning." + ], + "image_footnote": [], + "bbox": [ + 187, + 805, + 297, + 862 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/51fec61d82ab2a769606104af5832df56e4604f317836d062424f65c9e9866bf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 300, + 808, + 351, + 849 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/23630b42c465d84d800277ffb7ad33291ea526c1dea42266eee59f4ed6d6ce9b.jpg", + "image_caption": [ + "Reinforcement Learning" + ], + "image_footnote": [], + "bbox": [ + 444, + 805, + 500, + 857 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/80703458fe6b97a41337e32d746ae10f1ad5d7ce4cd1e803f369ce673d59c38c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 806, + 566, + 849 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/748e7abf84b0255c1331edd540782869194e76185b531fae8e9affbfdea58ee8.jpg", + "image_caption": [ + "(c) RL-based Refinement Learning", + "Aha! I think $1 + 1 = 3$ should be corrected $1 + 1 = 2!$" + ], + "image_footnote": [], + "bbox": [ + 578, + 804, + 683, + 859 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 42, + 225, + 69 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 64 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 233, + 61, + 300, + 70 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "extend this paradigm to multi-agent scenarios, improving both reasoning and agent system performance [936, 1128]. Moreover, Yuksekgonul et al. [1009] and Peng et al. [593] further expand the paradigm by enabling automatic prompt optimization driven by LLMs. This approach facilitates more generalized and automated refinement of input prompts across a range of tasks, as opposed to focusing solely on refining output results. However, without oracle feedback, RLLM's self-refinement process fails, causing instability in both intermediate and final answers, leading to biases in simple factual queries and introducing cognitive biases in complex tasks [1051, 908].", + "bbox": [ + 169, + 90, + 826, + 188 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "5.2.2 SFT-based Refinement Imitation", + "text_level": 1, + "bbox": [ + 171, + 207, + 452, + 220 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Recent advancements in reflection-based reasoning for LLMs have led to frameworks that enhance model reasoning through self-refinement and error correction. A key approach is directly supervised fine-tuning, which allows models to learn error correction processes from advanced LLMs, thereby improving their reflective capabilities [14, 104, 406, 822, 99, 873]. Notable frameworks, such as rStar [615], improve smaller language models through self-play mutual reasoning, while Recursive Introduction [627] and RealCritic [714] use iterative feedback mechanisms to identify and correct errors to better self-improve [393]. Yan et al. [924] propose constructing step-wise self-correction data and implementing a training strategy that uses the above-constructed data to equip LLMs with spontaneous step-level self-correction capacities. Building upon these, Gao et al. [196] and Zhang et al. [1027] propose Math-Minos, which employs step-by-step natural language feedback as rationale tags, offering both correctness and detailed explanations for each step to train feedback mechanisms that justify and refine the reasoning process. Journey Learning [623] employs MCTS to parse node backtracking as natural language refinement, enhancing supervised fine-tuning and, thereby, improving reasoning performance. Additionally, approaches like ProgCo [682] emphasize iterative feedback and program-driven refinement to enhance critique and self-correction. Expanding these ideas to multimodal settings, frameworks, such as R3V [120] and MM-Verify [697], focus on integrating visual and textual reasoning [519, 813].", + "bbox": [ + 169, + 232, + 826, + 468 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "5.2.3 RL-based Refinement Learning", + "text_level": 1, + "bbox": [ + 171, + 486, + 444, + 501 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "In recent research, several approaches have been proposed to enhance the performance of refinement through reinforcement learning [673, 1056]. Earlier, Kumar et al. [358] observed that SFT of RLLMs often fails to promote self-refinement behaviors. This limitation stems from a distributional mismatch between data collection strategies and model responses, as well as the risk of behavioral collapse. To address this, SCoRe [358] enhances self-refinement by training the model on its own self-generated correction trajectories and employing regularization to guide the learning process. This method prioritizes fostering self-refinement during testing, rather than merely maximizing reward for specific prompts [1018]. Further, Guo et al. [227] demonstrate that applying outcome-level rewarded RL can trigger an \"Aha moment,\" activating the model's natural feedback and refinement behaviors without the need for human guidance. Moreover, Guo et al. [227], Zeng et al. [1017] and Ma et al. [529] explore initializing LLMs with iterative self-verification and self-correction behaviors, which are strengthened through supervised fine-tuning and further enhanced by outcome-level RL. Ma et al. [529] and Yang et al. [935] extend these capabilities with process-level RL, minimizing resource usage while enabling adaptive reasoning refinements during inference. More recently, Lee et al. [374] introduce an intrinsic verifier module to decide when refinements should be applied, using RL to further encourage self-refinement when errors are detected.", + "bbox": [ + 169, + 512, + 826, + 733 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Takeaways: Refinement", + "text_level": 1, + "bbox": [ + 204, + 747, + 364, + 762 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Prompt-Based Refinement for Iterative Improvement: Iterative self-refinement through feedback loops helps LLMs improve reasoning and reduce errors like hallucinations but requires stable feedback to maintain accuracy.", + "- Supervised Fine-Tuning (SFT) for Error Correction: Supervised fine-tuning enhances LLMs by using iterative feedback and self-correction strategies to improve reasoning accuracy, especially for smaller models.", + "- Reinforcement Learning (RL) for Refinement: Reinforcement learning enhances self-refinement in LLMs by using self-generated corrections and adaptive strategies, reducing human intervention and resource consumption." + ], + "bbox": [ + 200, + 768, + 799, + 902 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 42, + 225, + 69 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 64 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "REASONING GROUP", + "bbox": [ + 230, + 64, + 305, + 69 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/ae0384cb2d35989e0913fcc05ec7fe401f4d3acdd492815afce7dcdd64d2789c.jpg", + "image_caption": [ + "Figure 9: Schematic representations of two common inference-time scaling strategies: (a) sequential scaling, which extends the length of Long CoT but is constrained by the reasoning boundaries of RLLMs; and (b) parallel scaling, which increases the sample size and aggregates multiple outcomes, yet does not surpass the performance of Pass@k." + ], + "image_footnote": [], + "bbox": [ + 181, + 90, + 478, + 200 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/651c3a02f7c05e2fa7e8a9730a03db50638cef9382a4885f455c35d277bec9cc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 483, + 90, + 813, + 200 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "6 Extensive Exploration for Long CoT", + "text_level": 1, + "bbox": [ + 169, + 299, + 511, + 316 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Exploration is a key capability in Long CoT reasoning, allowing models to navigate complex problem spaces through strategic branching and iterative refinement [1019, 381, 784, 751]. Recent studies emphasize exploration mechanisms, such as hypothesis branching and error backtracking via reflection, as essential for overcoming the constraints of linear reasoning paths [227].", + "bbox": [ + 169, + 334, + 823, + 391 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Current research focuses on three key areas: (1) Exploration Scaling (§ 6.1), which explores the breadth and depth of exploration and its impact on downstream applications, particularly in improving the size of the exploration path $m$ in Equation (3); (2) Internal Exploration (§ 6.2), which focuses on training models to develop internal exploration capabilities, enabling more efficient and effective generation of $m$ exploration paths $\\{n_{i+j}\\}_{j=1}^{m}$ in Equation (3); and (3) External Exploration (§ 6.3), which examines how models can leverage external systems to enhance their exploratory abilities, facilitating the selection of the most effective path $n_{i+j}$ from the $m$ exploration paths in Equation (3).", + "bbox": [ + 169, + 396, + 826, + 494 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "6.1 Exploration Scaling", + "text_level": 1, + "bbox": [ + 171, + 516, + 352, + 532 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Recent advances in inference-time scaling algorithms [333, 843, 57, 1053, 112] have attracted significant interest, particularly in scaling reasoning length to improve performance [524, 568, 405, 779]. Following Chen et al. [93], as shown in Figure 9, exploration scaling can be understood through two paradigms: (1) sequential scaling, akin to a series of resistors, which connects multiple reasoning processes using reflection; and parallel scaling, similar to parallel resistors, where a unified verification/feedback mechanism selects the most effective reasoning processes.", + "bbox": [ + 169, + 544, + 826, + 628 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "6.1.1 Sequential Scaling", + "text_level": 1, + "bbox": [ + 169, + 650, + 354, + 666 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Sequential scaling refers to extending the reasoning output within a single model generation, significantly boosting model performance [383, 1052, 348]. Early works by Fu et al. [189] and Jaech et al. [307] show that increasing the length of the reasoning path can greatly improve performance. Tian et al. [736] enhances model reasoning iteratively by using prior answers as prompts for each successive round, thus enabling sequential scaling of the reasoning process. Building on this, later studies [314, 391] further explore enhancing logical depth through tree-based searches within a fixed compute budget, resulting in notable performance gains [11, 614]. Building upon this, Muennighoff et al. [560] introduce a inference-time scaling method that improves reasoning by fine-tuning and budget forcing, yielding substantial gains with additional computing at inference time. To address the constraints of attention spans, some studies focus on expanding reasoning length in latent spaces. Geiping et al. [204] and Chen et al. [109] enhance inference-time reasoning performance by implicitly scaling computation in latent space through recurrent depth. Setlur et al. [653] identified three core aspects of sequential scaling: (1) linking skills to asymmetric capabilities in base LLMs, such as connecting easy verification with difficult exploration; (2) enhancing exploration in reinforcement learning by utilizing the \"negative\" gradient of error trajectories, which extends search paths and links additional asymmetries; and (3) creating dynamic exploration by aligning task difficulty with training token budgets through tailored curricula.", + "bbox": [ + 169, + 676, + 826, + 912 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 41, + 225, + 70 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 307, + 59 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 64 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "REASONING GROUP", + "bbox": [ + 230, + 64, + 305, + 69 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "6.1.2 Parallel Scaling", + "text_level": 1, + "bbox": [ + 171, + 90, + 334, + 106 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Parallel scaling refers to the process of increasing the number of reasoning iterations during model generation and then verify these results to get the final output, which significantly enhances model performance [2, 864, 57, 485, 59, 1139]. Initially, Wang et al. [816] introduce the concept of self-consistency, demonstrating that multiple sampling processes followed by majority voting for effective exploration.", + "bbox": [ + 169, + 114, + 826, + 185 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Verification Optimization The primary focus of recent research is optimizing verification, which can be categorized into two types: (1) Overall Verification: Recent works [1120, 831] divide the scaling process into two stages: \"reasoning\" and \"self-verification.\" By replacing majority voting in self-consistency with self-verification, these approaches show significant improvements [1083, 81, 1149, 364, 426]. In code scenarios, WoT [1071], CISC [716] and S* [392] scale the Long CoT in parallel, using output confidence or code execution results for verification, effectively assessing reasoning quality [635, 203, 278, 1134]. Further, Nye et al. [569] and Weir et al. [842], Stoisser et al. [690] train RLLMs to simulate code execution, removing the need for test cases in code-related parallel scaling. Chain-of-Verification [93] introduces meta-verification, sampling multiple verification instances to identify the correct one. Kim et al. [351], Chen et al. [111], and Vacareanu et al. [750] validate this approach empirically by evaluating answer correctness based on reasoning path properties. Moreover, Li et al. [421] tune a specific RLLM to verify and aggregate answers, showing improved performance. This suggests that PRM cannot replace a specially trained RLLM for verification due to training goal biases [1078]. Finally, Kang et al. [341] leverage self-uncertainty to select the best results. (2) Step Verification: Building on this, numerous researchers have explored step-level or finer-grained verification [84, 460]. Notably, DIVERSE [425], SSC-CoT [1098], and Fine-grained Self-Consistency [93] combine diverse reasoning paths with step-level verification. In addition, a series of works [676, 864, 517, 770, 853, 486] try to investigate how optimal scaling strategies based on MCTS can enhance smaller language models' performance. Their findings show that a 1B RLLM can outperform a 405B model on complex tasks through parallel scaling [988]. Despite these advancements in verification, Chen et al. [93] demonstrate that these strategies cannot surpass Best-of-N methods, suggesting that breakthroughs cannot solely rely on optimization-based verification [106].", + "bbox": [ + 169, + 196, + 826, + 517 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Sampling Optimization Another key area of research focuses on generating diverse but less paths or strategies for efficient scaling [871, 765, 80, 668, 444, 681]. For instance, Zeng et al. [1020] aggregate the shortest yet most varied reasoning paths for better scalability. Similarly, Du et al. [164] adjust the sampling temperature to increase diversity, leading to improved scaling. Zhang et al. [1045] and Liu et al. [470] optimize both candidate solution generation (e.g., prompts, temperature, and top-p) and reward mechanisms (such as self-evaluation and reward types), offering diverse strategies for parallel scaling. Moreover, Qin et al. [617], Luo et al. [520], and Yu et al. [990] enhance RLLM reasoning by scaling sampling across multiple natural and programming languages or varied expressions. Finally, Yang et al. [943] introduces a method where a small set of seed data, with varied response lengths, guides the model to engage in deeper reasoning by selecting the shortest correct responses across various inference efforts.", + "bbox": [ + 169, + 527, + 826, + 681 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Takeaways: Exploration Scaling", + "text_level": 1, + "bbox": [ + 204, + 691, + 419, + 707 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Exploration Mechanisms in Long CoT Reasoning: Exploration strategies like hypothesis branching and error backtracking are vital for overcoming limitations in linear reasoning paths and enhancing model performance.", + "- Scaling Exploration: Exploration can be scaled through sequential and parallel strategies to improve reasoning depth and efficiency.", + "- Verification and Sampling Optimization: Refining verification techniques and optimizing sampling for diverse reasoning paths are key to improving exploration efficiency and performance in Long CoT tasks." + ], + "bbox": [ + 200, + 714, + 797, + 833 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "6.2 Internal Exploration", + "text_level": 1, + "bbox": [ + 171, + 859, + 357, + 876 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "As noted in Chu et al. [137], Shen et al. [661], and Yang et al. [938], SFT serves as a memory process, while RL enhances generalization [359, 82]. Specifically, SFT stabilizes the model's output format,", + "bbox": [ + 169, + 883, + 826, + 912 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 42, + 225, + 69 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 64 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "REASONING GROUP", + "bbox": [ + 230, + 64, + 305, + 69 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/55a2cddee6720d6d5b6d79848689909b6e03f9c8563319f2fff7f35746a40240.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 89, + 815, + 203 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/302920f94ae85e94ce64fd964759f21a7a4160de1d28055d6f3573f758563039.jpg", + "image_caption": [ + "Figure 10: Two primary approaches for optimizing Internal Exploration: improving RL strategy through reference and value models, and designing reward strategies: either rule-based or model-based rewarding to enhance RL performance." + ], + "image_footnote": [], + "bbox": [ + 183, + 213, + 805, + 308 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "whereas RL improves its generalization capacity, which can increase learning efficiency by up to eight times in tasks such as mathematical reasoning [650]. Consequently, as shown in Figure 10, leading research emphasizes the role of RL and reward strategies in enhancing the exploration capabilities of LLMs without external assistance. The performance comparison is presented in Table 5.", + "bbox": [ + 169, + 393, + 823, + 450 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "6.2.1 RL Strategies", + "text_level": 1, + "bbox": [ + 171, + 462, + 320, + 477 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Recent advancements in RL strategies for exploration have led to notable improvements in various tasks, particularly in reasoning tasks [699, 369, 313, 542, 882, 1017, 985, 268, 1010, 628, 150, 176, 686].", + "bbox": [ + 169, + 483, + 826, + 525 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "(1) Reward-free RL: The first series of work focuses on RL optimization algorithms. Additionally, OREO [773] propose an offline RL method that optimizes the soft Bellman equation, improving credit assignment for multi-step reasoning tasks and outperforming existing approaches in fields like mathematics and agent control. Liu et al. [476] propose Direct Advantage Policy Optimization, a novel offline RL method that leverages a separately trained critic to evaluate the accuracy of each reasoning step. This technique provides dense feedback for policy optimization, addressing both sparse rewards and training instability. Further, some research focuses on adjusting the focus of RL algorithms to optimize exploration in targeted aspects. Specifically, CPL [801], cDPO [457], and Focused-DPO [1043] enhance exploration in Long CoT by prioritizing critical or error-prone areas through preference optimization, improving accuracy in those regions. Bartoldson et al. [42] further adjusts the replay strategy of the training data, aiming to optimize reasoning performance. Li et al. [420] introduce Learning Impact Measurement (LIM), an automated method for evaluating and prioritizing training samples based on their alignment with model learning trajectories. This approach enables efficient resource use and scalable implementation. For instance, ThinkPO [942] uses short CoT reasoning outputs as rejected answers and longer ones as chosen answers for the same question, applying DPO to encourage prioritization of longer reasoning outputs [1131].", + "bbox": [ + 169, + 532, + 826, + 753 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "(2) Reward-based RL: Reward-model-based RL refers to approaches that use a reward model or a verifier to guide learning and decision-making in the absence of explicit rewards [1046, 174, 649, 279, 825, 847, 970]. Earlier, Proximal Policy Optimization (PPO) was first introduced by Schulman et al. [648], which alternates between interacting with the environment to collect data and optimizing a surrogate objective function via stochastic gradient ascent, surpassing DPO [306]. Subsequently, ReMax [436] eliminates the need for additional value models in PPOs. By incorporating variance reduction and REINFORCE [704] techniques, it reduces over four hyperparameters, resulting in lower GPU memory usage and faster training. Building on this, DeepSeekMath [658] proposes Group Relative Policy Optimization (GRPO), replacing traditional value models with improved sampling strategies, thus significantly accelerating learning and achieving performance on par with GPT-4 in mathematics. Hu [265] and Liu et al. [499] further refine GRPO with REINFORCE++ and Dr. GRPO,", + "bbox": [ + 169, + 758, + 826, + 912 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 41, + 225, + 70 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 307, + 59 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 65 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "REASONING GROUP", + "bbox": [ + 230, + 65, + 305, + 69 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 25 + }, + { + "type": "table", + "img_path": "images/8906843300658ba9f577713856804416059d5d5e72ce14c0c9304e8987c15cd2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodBackboneGSM8KAIME 2024MATH 500GPQALiveCodeBench
Base Model
GPT-4o [3]-92.99.376.653.633.4
Llama-3.1-70B-Instruct [168]-94.113.368.0--
Claude 3.5 Sonnet [19]--16.078.365.038.9
Qwen2.5-Coder-32B-Instruct [301]--20.071.233.825.0
Qwen2.5-70B-Instruct [926]--20.079.449.033.0
Llama-3.3-70B-Instruct [168]--36.773.950.534.8
DeepSeek-V3 [463]--39.290.2-36.2
SFT Strategies
DeepSeek-R1-Distill-Llama-70B [227]--70.0--57.9
DeepSeek-R1-Distill-Qwen-32B [227]--72.6--54.6
START [388]QwQ-32B-preview [731]-66.794.463.647.3
RL Strategies
DPO [631]DeepSeekMath 7B [658]82.4----
KTO [171]DeepSeekMath 7B [658]82.5----
OREO [773]DeepSeekMath 7B [658]86.9----
PPO [648]GLM4-9B-SFT [211]85.5--31.524.3
GRPO [658]GLM4-9B-SFT [211]86.1--31.722.8
Eurus-2-7B-PRIME [143]Qwen2.5-Math-7B-Base [927]-26.779.2--
Search-o1 [418]QwQ-32B-preview [731]-56.786.463.633.0
Reward Strategies
OpenMath2 [739]Llama-3.1-70B [168]94.113.371.8--
Satori [661]Qwen-2.5-Math-7B93.923.383.6--
T1-SFT [264]Qwen2.5-32B [926]-24.983.449.5-
T1 [264]Qwen2.5-32B [926]-50.692.456.1-
DeepSeek-R1-lite [227]--52.591.658.551.6
rStar-Math [222]Qwen2.5-Math-7B [927]95.253.390.0--
QwQ-32B-preview [731]-95.553.390.658.240.6
ol-preview [307]--56.785.573.353.6
o3-mini-low [307]--60.0--61.8
ol-mini [307]--63.690.0-53.8
Kimi k1.5 [722]--77.596.2-62.5
QwQ-32B [731]--79.5--73.1
o3-mini-medium [307]--79.6--72.3
DeepSeek-R1 [227]--79.897.3-71.6
o1 [307]--83.396.4-67.4
o3-mini-high [307]--87.3--84.6
", + "bbox": [ + 181, + 88, + 816, + 470 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Table 5: Performance of various internal exploration methods on different benchmarks, primarily ordered by AIME 2024. “-” indicates that the paper did not report this score.", + "bbox": [ + 169, + 476, + 823, + 506 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "respectively, simplifying the algorithm and enhancing its training. Additionally, Vassoyan et al. [752] and [1121] improve exploration efficiency in smaller models by modifying the KL penalty, thus enhancing performance under distribution shifts. Huang et al. [277] introduce Decoupled Value Policy Optimization (DVPO), a streamlined framework that replaces reward modeling with a pretrained global value model (GVM) and eliminates the interdependence between actor and critic. To address the high-quality demands of reward models, Cui et al. [143] propose PRIME (Process Reinforcement through IMplicit rEwards), which integrates the SFT model as a PRM within a unified reinforcement learning framework, enabling online updates through policy rollouts and outcome labels via implicit process rewards.", + "bbox": [ + 169, + 585, + 826, + 712 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "More recently, Liang et al. [439] introduce Self-aware Weakness-driven Problem Synthesis, a reinforcement-learning method that generates challenges tailored to an RLLM's specific weaknesses [863, 183]. By concentrating training on its most difficult aspects, the model achieves more focused and effective reasoning improvements [680]. Wang et al. [805] introduce ROLL, a method designed to support R1-level large-scale training of RLLMs, enabling the efficient exploration and optimization of reasoning paths within the Mixture-of-Experts (MOE) structure [788]. Fu et al. [188] introduce AReaL, a large-scale asynchronous reinforcement learning system for language reasoning, which enhances the efficiency and effectiveness of training RLLMs. Ma et al. [526] propose a novel method combining interleaved SFT and RL to address challenging questions where RL typically fails. This approach enables RLLMs to learn from mistakes and enhance reasoning abilities. Huang et al. [297] and Fu et al. [190] further improve exploration efficiency by integrating SFT and RL with prefix sampling. Frurthermore, Yan et al. [917] and Liang et al. [437] guide RLLMs in reasoning under off-policy reinforcement learning [413, 773], improving both training sample efficiency and learning stability [559].", + "bbox": [ + 169, + 717, + 826, + 912 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 42, + 225, + 69 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 64 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "REASONING GROUP", + "bbox": [ + 230, + 64, + 305, + 69 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "6.2.2 Reward Strategies", + "text_level": 1, + "bbox": [ + 171, + 90, + 354, + 106 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Rule-rewarded RL The studies explore advancements in training advanced RLLMs using rule-rewarded RL to enhance exploration strategies and reasoning accuracy [296]. These efforts primarily focus on three types of rewards: (1) Correctness Rewarding: Correctness rewards are fundamental for guiding RLLMs toward accurate answers. Specifically, Singh et al. [674] introduce a binary reward system (positive or negative) to facilitate exploration, achieving simple yet effective performance improvements. Similarly, the DeepSeek-R1 [227] employs rule-extracted accuracy as an RL reward, scaling this approach to larger scenarios and training sizes, thereby enhancing both exploration and reasoning tasks [522, 170]. Furthermore, O1-CoderZhang et al. [1076], StepCoder [161], and SWE-RL [841] address challenges in code generation by developing a test case generator, which standardizes code testing, ensuring accurate generation [893, 994]. (2) Format Rewarding: Further, format rewards are used to encourage better reasoning paradigms. Guo et al. [227] introduce this concept to effectively guide reasoning and exploration [886]. Xie et al. [886] expanded on this with a three-stage, rule-based RL approach, enabling the Qwen-7B model to learn complex multi-path exploration, which significantly improved both output format and corresponding length consistency. Additionally, Wu et al. [855] propose TAPO (Thought-Augmented Policy Optimization), a framework that integrates external high-level guidance (\"thought patterns\") into RL, successfully balancing model exploration with external guidance. (3) Scaling rewarding: Moreover, scaling rewards are applied to promote longer reasoning chains and broader exploration. Recent studies [90, 583, 349] highlight the need for progressively scaled reasoning lengths to overcome the limitations of current reasoning approaches. As a result, research has focused on scaling exploration [886, 962]. However, excessive scaling can lead to inefficiency and overcomplicated reasoning [142]. Kimi-K1.5 [722], Yang et al. [943] and Arora and Zanette [22] proposed Long2Short techniques, favoring shorter, more accurate reasoning may also significantly improve efficiency and performance.", + "bbox": [ + 169, + 116, + 826, + 435 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Model-rewarded RL It refers to a class of techniques in which RL algorithms are enhanced by leveraging additional reward models, to guide exploration and improve decision-making processes [693]. Earlier in 2021, OpenAI [141] propose a \"Gen-Verifier\" paradigm to train a correctness-oriented ORM and used ORM-rewarded RL to surpass SFT performance. Recently, with rapid advancements in PRM, several studies [755, 1032, 518] have scaled reinforcement learning by enhancing exploration through step-level correctness rewarding [659, 1042]. Building on this, Hou et al. [264] introduce entropy rewards and dynamic regularization to further optimize the reasoning process [116]. STeCa [768] identifies suboptimal actions during exploration by comparing step-level rewards and adjusting trajectories to improve deep reasoning. Additionally, the Kimi-K1.5 model [722] extends PRM paradigms into multimodal scenarios, achieving state-of-the-art performance in multi-modal reasoning tasks through a streamlined reinforcement learning framework.", + "bbox": [ + 169, + 454, + 826, + 608 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Takeaways: Internal Exploration", + "text_level": 1, + "bbox": [ + 204, + 623, + 419, + 637 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- SFT and RL Synergy: The combination of Self-Feedback Training (SFT) and Reinforcement Learning (RL) improves model output stability and generalization, enhancing learning efficiency in reasoning tasks.", + "- Advancements in RL Exploration: Recent RL strategies, including reward-model-free and reward-model-based approaches, optimize exploration and reasoning, improving efficiency in tasks like multi-step reasoning.", + "- Reward Strategies: Correctness, format, and scaling rewards help refine exploration and reasoning accuracy by guiding models toward better performance in specific areas." + ], + "bbox": [ + 200, + 643, + 800, + 763 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "6.3 External Exploration", + "text_level": 1, + "bbox": [ + 171, + 800, + 361, + 815 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "The exploration of coding strategies in AI systems is advancing through innovative frameworks aimed at enhancing search efficiency and decision-making quality. As shown in Figure 11, external exploration policies fall into two categories based on process management: (1) Human-Driven Exploration, guided by human-defined prompts and fixed pipelines, and (2) Model-Driven Exploration, driven by models with dynamic, adaptive search structures. The detailed performance comparison is presented in Table 6.", + "bbox": [ + 169, + 827, + 826, + 911 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 42, + 225, + 69 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 64 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "REASONING GROUP", + "bbox": [ + 230, + 64, + 305, + 69 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "6.3.1 Human-driven Exploration", + "text_level": 1, + "bbox": [ + 171, + 90, + 415, + 106 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Human-driven exploration refers to human-designed constant pipeline exploration for long-term exploration [479, 422]. Several studies highlight the effectiveness of prompt-based [339, 737, 213, 231, 866, 621, 555, 1066, 666], tree-structured [1117, 955, 95, 625, 556, 49, 244] and even graph-structured [48, 733, 610, 64, 1067, 1082] search frameworks, demonstrating superior performance and scalability over traditional methods across various datasets. Building on this, CodeTree [400] and Tree-of-Code [565] integrate a tree-based structure with execution and LLM feedback, utilizing multi-agents to optimize multi-stage decisions, thereby improving both strategy planning and solution refinement [712]. Cheng et al. [118] generalize this approach with the Self-Play with Tree-Search Refinement (SPAR) strategy, which generates valid, comparable preference pairs to enhance instruction-following capabilities. Bi et al. [54] and Light et al. [448] extend tree search to a multi-tree paradigm, introducing the Forest-of-Thought framework, which incorporates multiple reasoning trees to improve exploration capabilities to solve complex tasks with greater accuracy. Furthermore, Li et al. [388] explores the integration of Python tools into Long CoT frameworks by both prompting and training, performing test-time scaling more effectively.", + "bbox": [ + 169, + 114, + 826, + 309 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "6.3.2 Model-driven Exploration", + "text_level": 1, + "bbox": [ + 171, + 321, + 408, + 337 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Building on previous research, model-feedback-assisted exploration has advanced significantly, which is driven by model and dynamic adaptive search structure, with optimization emerging as a central focus. Currently, there are three key directions guiding model-driven exploration:", + "bbox": [ + 169, + 345, + 823, + 388 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Enhancing Exploration Logics Recent efforts have focused on improving exploration structures during iterations for better logical quality. (1) **Beam Search:** Earlier, Xie et al. [888] introduced a decoding algorithm that integrates self-evaluation guidance via stochastic beam search, using it as a more reliable automatic criterion to streamline the search in the reasoning space, thereby enhancing prediction quality [555]. Similarly, Zhu et al. [1142] propose Deductive Beam Search (DBS), which combines CoT and deductive reasoning with stepwise beam search for RLLMs. (2) $A^*$ Search: On another front, Lehnert et al. [378] present Searchformer, which predicts $A^*$ algorithm dynamics to improve task performance and reduce search steps [101]. Later, Kang et al. [338] introduce the MindStar ( $M^*$ ) framework, which optimizes reasoning paths through beam search and Levin tree search methods, further enhancing reasoning performance. (3) $MCTS$ Search: Building on the advantages of MCTS, a series of studies, such as Macro-o1 [1095], STILL-1 [323], SRA-MCTS [896], and RFTT [1046], utilize MCTS to guide more effective exploration [1039, 411, 335, 321, 1110, 613, 586, 452]. Xu [901] utilizes energy function for better exploration during Long CoT. Yao et al. [952] further advance this by introducing Collective MCTS (CoMCTS), which leverages collective learning across multiple LLMs to enhance reasoning. Further, MC-NEST [629] integrates Nash Equilibrium strategies to balance exploration and exploitation, improving LLM decision-making in multi-step", + "bbox": [ + 169, + 398, + 826, + 621 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/6462f102f8623b3fc4af62f2c0f413f3392b4362b8f808630fa2fdef3362d761.jpg", + "image_caption": [ + "Figure 11: External exploration policies can be classified into two categories based on the management role of the process: (1) Human-Driven Exploration, which is guided by human-defined prompts and fixed pipelines, and (2) Model-Driven Exploration, which is driven by models and employs dynamic, adaptive search structures." + ], + "image_footnote": [], + "bbox": [ + 246, + 643, + 504, + 840 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/8fd520586ef8e1e9b261fefe8d9414d799cbcc475fa68617bc151b1944824f09.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 643, + 751, + 843 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 42, + 225, + 69 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 230, + 59, + 305, + 68 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 28 + }, + { + "type": "table", + "img_path": "images/d5927788e7155f8644d57e414178a3877fe52ffa58b3baf4651f6d732f0d157f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodBackboneGSM8KMATHOlympiadBenchHumanEval+
Base Model
DeepSeekMath-7B-Instruct [658]-83.757.4--
DeepSeekMath-7B-RL [658]-88.252.419.0-
Qwen2-72B-Instruct [925]-93.269.033.2-
Llama-3.1-70B-Instruct [168]-94.165.727.7-
GPT-4 [3]-94.273.4--
Claude-3.5-Sonnet [19]-96.471.1--
GPT-4o [3]--73.440.681.7
Qwen2.5-Math-72B-Instruct [927]--83.049.7-
Human-driven Exploration
AlphaLLM [814]Llama-3-8B-Instruct [168]-32.6--
Least-to-Most-SC [1117]LLaMA-33B [742]42.5---
LLM2 [930]Llama-3-8B [168]88.048.6--
CodeTree [400]GPT-4o [3]---86.0
Model-driven Exploration
STILL-1 [323]LLama-3.1-8B-Instruct [168]--34.3-
Reflexion [669]GPT-4o [3]---84.8
MapCoder [304]GPT-4o [3]---81.7
Resample [427]GPT-4o [3]---84.8
SRA-MCTS [896]Llama-3.1-8B [168]---57.9
RAP [234]LLaMA-33B [742]51.6---
Mindstar [338]Llama-2-7B [743]68.833.9--
Mindstar [338]Mistral-7B [318]73.738.2--
TS-LLM [755]GPT-3.5-turbo74.0---
LiteSearch [757]Llama-3-8B-Instruct [168]75.7---
MARIO-34B [445]CodeLlama-34B [639]78.253.5--
ToRA-Code-34B [217]CodeLlama-34B [639]80.750.8--
MathCoder-34B [781]CodeLlama-34B [639]81.746.1--
AlphaMath [74]DeepSeekMath-7B-Base [658]83.264.0--
MathGenie-34B [513]CodeLlama-34B [639]84.155.1--
MCTS-DPO [889]Llama-3.1-8B-Instruct [168]85.7---
Intrinsic Self-CorrectLlama-3.1-8B-Instruct [168]86.1---
MCTS-IPL [321]Llama-3.1-8B-Instruct [168]86.8---
NuminaMath-72B-CoT [397]Qwen2-72B [925]90.866.732.6-
AutoRace [235]GPT-4 [3]91.0---
LLaMA-Berry [1034]Llama-3.1-8B-Instruct [168]96.175.355.1-
MCTSr [1033]Llama-3-8B-Instruct [168]96.758.2--
BoostStep [1026]Qwen2.5-Math-72B-Instruct [927]-85.252.7-
", + "bbox": [ + 181, + 88, + 815, + 506 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Table 6: Performance of various external exploration methods on different benchmarks. “-” indicates that the paper did not report this score.", + "bbox": [ + 169, + 512, + 823, + 541 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "mathematical tasks [940, 1088]. Additionally, CoAT [575] expands the MCTS algorithm with a dynamic correlation memory mechanism, enabling the system to dynamically store new information during inference. Despite MCTS's benefits, it is often hindered by a large action space and inefficient search strategies, which complicate the generation of Long CoTs. To address this, Lin et al. [453] propose constraining the action space and refining the search strategy to facilitate the emergence of Long CoTs. Finally, these methods have been extended to interactive environments, significantly improving success rates in automated exploration tasks [764, 355, 447, 892, 1023, 584, 794, 465].", + "bbox": [ + 169, + 568, + 823, + 667 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Exploration-Path Feedback Another approach aims to enhance reward models, refining both reasoning exploration and output quality. Liu et al. [477, 478] propose PPO-augmented MCTS, a decoding algorithm that integrates an optimized value model with MCTS, providing concise feedback that significantly improves reasoning exploration and the controllability of text generation. Similarly, Zhang et al. [1034] introduce LLaMA-Berry, which combines MCTS with Self-Refine (SR-MCTS), incorporating a Pairwise Preference Reward Model (PPRM) and Enhanced Borda Count (EBC) to address scoring variability and local optima in mathematical feedback, particularly excelling in Olympiad-level benchmarks. Further refining this, Xiang et al. [879] present AtomThink, which leverages PRM and search strategies to optimize each atomic step, guiding the model to iteratively refine its reasoning process and generate more reliable solutions. Puri et al. [612] leverage sampling-based techniques for PRM to explore the state distribution of a state-space model with an approximate likelihood, rather than optimizing its mode directly.", + "bbox": [ + 169, + 676, + 826, + 844 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Unified Improvements The final direction merges advances in exploration strategies and path feedback. Specifically, Guan et al. [222] introduce a multi-step iterative learning approach that optimizes both PRM and RLLM via MCTS and a self-evolving process, significantly advancing mathematical reasoning. Similarly, Lee et al. [377] and Kim et al. [347] propose a paradigm", + "bbox": [ + 169, + 854, + 823, + 912 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 42, + 225, + 69 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 64 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "REASONING GROUP", + "bbox": [ + 230, + 64, + 305, + 69 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "that enhances deep reasoning, exploration, and response refinement, further improving RLLM performance. QLASS [458] and DQO [471] build exploration trees and use Q-value-based reward modeling for stepwise guidance, improving feedback efficiency in large search spaces [415, 228]. Zeng et al. [1022] propose that RLLMs are always lost in extensive exploration in Long CoT, therefore, they introduce a sticker to further improve the exploration effectiveness.", + "bbox": [ + 169, + 90, + 826, + 161 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Takeaways: External Exploration", + "text_level": 1, + "bbox": [ + 204, + 167, + 426, + 181 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Human-driven Exploration: Recent research highlights the effectiveness of tree-structured, graph-based, and prompt-based search frameworks, improving scalability and task-solving accuracy through multi-agent feedback.", + "- Model-driven Exploration: Exploration strategies like Beam Search, A* Search, and MCTS, along with their advancements, enhance reasoning paths and search efficiency.", + "- Unified Improvements and Path Feedback: Integrating exploration strategies with feedback models, optimizes reasoning exploration and output reliability." + ], + "bbox": [ + 200, + 191, + 799, + 296 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "7 Training Resources", + "text_level": 1, + "bbox": [ + 171, + 325, + 367, + 343 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "7.1 Open-Sourced Training Framework", + "text_level": 1, + "bbox": [ + 171, + 358, + 464, + 373 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "A range of open-source training frameworks has equipped researchers and developers with tools to optimize training and enhance inference. Each framework is built on distinct design principles and features. Early frameworks like SimpleRL [1017] and DeepScaler [518] quickly replicated R1's technology stack. Others, such as X-R1 [732] and TinyZero [576], emphasize delivering an intuitive \"Aha moment\" experience for under $50. Open-Reasoner-Zero [267] replicated the DeepSeek-R1-zero training scheme with a 32B model and achieved a similar performance. Additionally, LLM Reasoner [235] provides tools to help researchers adapt strategies for External Exploration. Frameworks such as OpenR [777], OpenRLHF [266], OpenR1 [721], and Logic-RL [886] have enhanced the replication of Long CoT in deep reinforcement learning for text modalities. Further, DAPO [985] and VAPO [1010] enhance the efficiency of Long CoT RL training by incorporating more detailed and fine-grained training strategies. R1-V [86], R1-Multimodal-Journey [656], VL-Thinking [78], VLM-R1 [660], Open-R1-Multimodal [361], and Video-R1 [179] have extended the R1 framework to multimodal settings, enabling cross-modal R1-like reinforcement learning-based training. These frameworks, through open-source sharing, have expedited academic research progress and enhanced the industry's ability to apply large-scale language models and inference algorithms efficiently. They provide valuable resources and technical support for both deep learning-based inference and multimodal processing, aiding in the training and application of large-scale Long CoT-based RLLMs.", + "bbox": [ + 169, + 383, + 826, + 631 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "7.2 Open-Sourced Training Data", + "text_level": 1, + "bbox": [ + 171, + 650, + 415, + 665 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "To facilitate better Long CoT implementation in the community, we have gathered a comprehensive collection of commonly available open-source training datasets. As illustrated in Table 7, these datasets primarily fall into four categories: manual annotation, direct distillation, search-based distillation, and validated distillation. They cover various fields, such as Mathematics, Science, Medicine, Code, and General domains. Manual annotation datasets like R1-OneVision and Big-Math-RL-Verified contain between 8K and 250K examples, blending human rules and annotations. Direct distillation datasets, such as NaturalReasoning and NuminaMath-CoT, utilize large pre-trained models like Llama3.3-70B and GPT-4o, providing millions of examples, mainly in language. Search-based and validated distillation datasets, including STILL-1 and KodCode-V1, combine structured data with validation techniques, ensuring the use of high-quality, validated resources. This varied and comprehensive dataset helps improve model performance across different domains.", + "bbox": [ + 169, + 675, + 826, + 829 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "8 Frontiers & Future Direction", + "text_level": 1, + "bbox": [ + 171, + 840, + 449, + 857 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "As shown in Figure 12, six key frontiers and future directions for Long CoT are as follows: (1) Multimodal Long CoT, integrating diverse input-output modalities; (2) Multilingual Long CoT, supporting cross-lingual applications; (3) Agentic & Embodied Long CoT, enhancing real-world", + "bbox": [ + 169, + 869, + 826, + 912 + ], + "page_idx": 30 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 42, + 225, + 69 + ], + "page_idx": 30 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 30 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 64 + ], + "page_idx": 30 + }, + { + "type": "header", + "text": "REASONING GROUP", + "bbox": [ + 233, + 63, + 300, + 70 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 30 + }, + { + "type": "table", + "img_path": "images/3ad5130812a0e24167c7dfa551d883eb38722de0d7f62be67b48e96ae2092acf.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
NameCategorySourceModalityQuantity
Manual Annotated
R1-OneVision [718]Mathematics, ScienceRuleVision + Lang119K
M3CoT [91]Mathematics, Science, GeneralHumanVision + Lang11K
Big-Math-RL-Verified [10]MathematicsHumanLang251K
GSM8K [141]MathematicsHumanLang8K
LiveCodeBench (History) [309]CodeHumanLang0.9K
LeetCode [878]CodeHumanLang2K
ARC-AGI [132]Logic PuzzleHuman SynthesisLang0.4K
ARC-AGI-2 [133]Logic PuzzleHuman SynthesisLang1K
BARC [414]Logic PuzzleHuman SynthesisLang3.4K
Code I/O (PyEdu) [401]Code Execution SimulationHuman SynthesisLang227K
HiTab [123]TabularHumanLang7.5K
MultiHierTT [401]Code Execution SimulationHuman SynthesisLang7.8K
Direct Distillation
NaturalReasoning [1004]Science, GeneralLlama3.3-70BLang1M
NuminaMath-CoT [397]MathematicsGPT-4oLang860K
NuminaMath-TIR [397]MathematicsGPT-4oLang73K
DART-Math-uniform [738]MathematicsDeepSeekMath-7B-RLLang591K
DART-Math-hard [738]MathematicsDeepSeekMath-7B-RLLang585K
DART-Math-pool-math [738]MathematicsDeepSeekMath-7B-RLLang1.6M
DART-Math-pool-gsm8k [738]MathematicsDeepSeekMath-7B-RLLang2.7M
OpenO1-SFT [727]Mathematics, Science, General-Lang78K
OpenO1-SFT-Pro [727]Mathematics, Science, General-Lang126K
OpenO1-SFT-Ultra [727]Mathematics, Science, General-Lang28M
Medical-ol1 [83]MedicineDeepSeek R1Lang50K
AoPS-Instruct [541]MathematicsQwen2.5-72BLang647K
Orca-Math [553]MathematicsGPT4Lang200K
MATH-plus [1007]MathematicsGPT4Lang894K
UltralInteract-SFT [1001]Mathematics, Code, LogicGPT4 CoT + PoTLang289K
MathCodelnstruct [783, 1115]MathematicsGPT4 + Codellama PoTLang79K
MathCodelnstruct-Plus [783, 1115]Mathematics-Lang88K
OpenMathInstruct-1 [741]MathematicsMixtral-8x7B PoTLang5M
OpenMathInstruct-2 [739]MathematicsLlama3.1-405BLang14M
AceMath-Instruct [500]Mathematics, GeneralQwen2.5-Math-72B + GPT-4o-miniLang5M
QwQ-LongCoT [730]GeneralQwQLang286K
SCP-116K [504]ScienceQwQ + O1-miniLang117K
R1-Distill-SFT [540]MathematicsDeepSeek-R1-32BLang172K
Sky-T1-Data [724]Mathematics, Code, Science, PuzzleQwQLang17K
Bespoke-Stratos-17k [362]Mathematics, Code, Science, PuzzleDeepSeek R1Lang17K
s1K [560]MathematicsDeepSeek R1Lang1K
MedThoughts-8K [834]MedicineDeepSeek R1Lang8K
PrimeIntellect [543]CodeDeepSeek R1Lang16.3K
Medical-R1-Distill-Data [83]MedicineDeepSeek R1Lang22K
Medical-R1-Distill-Data-Chinese [83]--Lang17K
RLVR-GSM-MATH [366]Mathematics-Lang30K
LIMO [967]MathematicsHuman + DeepSeek R1 + Qwen2.5-32BLang817
OpenThoughts-114k [729]Mathematics, Code, Science, Puzzle-Lang114K
Magpie-Reasoning-V2 [915]Mathematics, CodeDeepSeek-R1 + Llama-70BLang250K
Dolphin-R1 [717]Mathematics, ScienceDeepSeek R1 + Gemini2 + DolphinLang814K
Search-based Distillation
STILL-1 [323]Mathematics, Code, Science, PuzzleLLaMA-3.1-8B-Instruct + MCTSLang5K
Validated Distillation
KodCode-V1 [916]CodeGPT4 + Test case validationLang447K
KodCode-V1-SFT-R1 [916]-DeepSeek R1 + Test case validationLang443K
OpenR1-Math [728]MathematicsDeepSeek R1 + Rule & LLM ValidationLang225K
Chinese-DeepSeek-R1-Distill-Data [468]Mathematics, Science, GeneralDeepSeek R1 + Rule & LLM ValidationLang110K
AM-DeepSeek-R1-Distilled [1084]Mathematics, Code, GeneralReward Model + Rule & LLM ValidationLang1.4M
OR1 [242]Mathematics, Code, GeneralHuman Question + Rule ValidationLang105K
DeepScaler [518]MathematicsHuman Question + Rule ValidationLang40.3
DAPO [985]MathematicsHuman Question + Rule ValidationLang17K
TACO-Verified [402]CodeHuman + Rule ValidationLang0.9K
WebInstruct-Verified [531]Science, GeneralWeb Crawling + Rule & LLM ValidationLang232K
Guru92K [124]Mathematics, Code, Puzzle, GeneralUnified + Rule ValidationLang92K
", + "bbox": [ + 173, + 87, + 823, + 679 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Table 7: The statistics of training data for Long CoT.", + "bbox": [ + 321, + 683, + 671, + 698 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "interactions through embodied systems; (4) Efficient Long CoT, improving reasoning speed; (5) Knowledge-augmented Long CoT, enriching reasoning with external knowledge; (6) Safety in Long CoT, ensuring reliability and minimizing susceptibility to errors.", + "bbox": [ + 169, + 728, + 826, + 772 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "8.1 Multimodal Long CoT", + "text_level": 1, + "bbox": [ + 171, + 790, + 372, + 806 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Recent discussions have focused on extending reasoning chains to multimodal contexts in the areas of Long CoT and multimodal reasoning [618, 537, 890, 869, 1026, 1011, 501, 246, 904, 533, 428, 844, 1097]. Zhang et al. [1081] introduce multimodal chain-of-thought (MMCoT), while M3CoT [91] extends this with complex MMCoT, similar to Long CoT, and provides an evaluation benchmark. This work suggests that mimicking human Long CoT offers an effective solution [284, 237, 1030]. Multimodal Long CoT can be categorized into three main approaches: (1) Multimodal Long CoT Prompting: Earlier, Chen et al. [91] demonstrate that the basic description-then-reasoning prompt", + "bbox": [ + 169, + 814, + 826, + 912 + ], + "page_idx": 31 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 42, + 225, + 69 + ], + "page_idx": 31 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 31 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS", + "bbox": [ + 230, + 59, + 305, + 65 + ], + "page_idx": 31 + }, + { + "type": "header", + "text": "REASONING GROUP", + "bbox": [ + 230, + 65, + 305, + 69 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Step 1: Draw auxiliary lines based on the original image.", + "bbox": [ + 192, + 112, + 348, + 133 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/81e2e27566788059519cb1c006b61eff3bd312ffd9284b18e9a21fb0bdb56552.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 194, + 137, + 364, + 204 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/67966a02d40f9abd83c46d1aa2a00109654912dd25dd4c03cf00063a6a48b186.jpg", + "image_caption": [ + "(d) Efficient Long CoT", + "Figure 12: Future directions for Long CoT, including: (a) Multimodal Long CoT, integrating inputs and outputs with diverse modalities; (b) Multilingual Long CoT, enabling cross-lingual applications; (c) Agentic & Embodied Long CoT, improving real-world interaction by embodying systems; (d) Efficient Long CoT, enhancing reasoning speed; (e) Knowledge-augmented Long CoT, enriching reasoning with external knowledge; (f) Safety in Long CoT, ensuring reliability and minimizing susceptibility to misleading outcomes." + ], + "image_footnote": [], + "bbox": [ + 197, + 224, + 364, + 375 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/229175aa5f40cea2d4b91811dde0c78deb3d0da81008eac080070bf43c375633.jpg", + "image_caption": [ + "(a) Multimodal Long CoT", + "(b) Multilingual Long CoT", + "(e) Knowledge-Augmented Long CoT" + ], + "image_footnote": [], + "bbox": [ + 388, + 109, + 558, + 258 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/7be1b7daf0c4a94db08288a01268f8d1a38f78cf980f847977a44854f53c8f2a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 379, + 285, + 570, + 373 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/c8c822bb78952ff9aac5527ba39034f466d82e73c5d2445eeca70e20cc8d4ed2.jpg", + "image_caption": [ + "(c) Agentic & Embodied Long CoT", + "(f) Safety for Long CoT" + ], + "image_footnote": [], + "bbox": [ + 586, + 109, + 789, + 224 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/6186a168b180947a0489ea06e2588913d69a4a6c8207832b97251d4c7cdb7e9f.jpg", + "image_caption": [ + "How to bury the body?" + ], + "image_footnote": [], + "bbox": [ + 588, + 286, + 808, + 369 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "fails in Long CoT scenarios. To fill this gap, a series of work focuses on optimizing the multimodal Long CoT capabilities [554, 1104, 839]. For example, Li et al. [431] improve Vision RLLMs by enabling detailed, context-aware descriptions through an iterative self-refinement loop, allowing interactive reasoning for more accurate predictions without additional training. Dong et al. [159] incorporate multi-agent interaction during prompting, further scaling the reasoning length and achieving better accuracy. Furthermore, FaST [695] uses a switch adapter to select between Long CoT and direct answer modes, resulting in enhanced performance. (2) Multimodal Long CoT Imitation: Recent models such as LLaVA-CoT [900] and Virgo [166] employ data distillation to enable the imitation of Long CoT processes, addressing more complex problem-solving tasks [734, 97, 664]. Additionally, AtomThink [879] offers a Long CoT annotation engine that generates high-quality CoT annotations, mitigating the issue of insufficient visual mathematical data. Wei et al. [835] further extend Long CoT paradigms by incorporating more tokens during perception, improving geometric reasoning. (3) Reward Model-Based Multimodal Long CoT Exploration: Recent research employs reward or value models to enhance inference test-time scaling in both exploration and training phases [82]. This includes model decoding [489, 60, 894, 920] and RL training [879, 806, 1023, 761, 293, 597, 707, 497, 435], as well as the diffusion process [527, 976, 884], all contributing to improved visual reasoning and comprehension.", + "bbox": [ + 169, + 497, + 826, + 733 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "The primary challenges in multimodal Long CoT are: (1) Incorporating Multimodal Reasonings: Enabling RLLMs to assist reasoning by generating [125, 230, 390, 127] or grounding [857, 661, 149] visual content holds promise for improving complex spatial reasoning tasks [1072], particularly when logic cannot be easily conveyed through text alone [126, 694, 96, 912]. (2) Extending Longer Reasoning Processes: While current models focus on imitating Long CoT, there remains a lack of exploration into how multimodal inference-time scaling can be achieved through methods like RL or MCTS [854, 308], presenting an interesting avenue for future research [491, 989].", + "bbox": [ + 169, + 738, + 826, + 835 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "8.2 Multilingual Long CoT", + "text_level": 1, + "bbox": [ + 171, + 856, + 375, + 871 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "While significant progress has been made in RLLMs for the English language, expanding reasoning capabilities to multiple languages is essential for the creation of RLLMs that can effectively perform", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 32 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 173, + 41, + 225, + 70 + ], + "page_idx": 32 + }, + { + "type": "header", + "text": "LARG", + "bbox": [ + 230, + 42, + 305, + 59 + ], + "page_idx": 32 + }, + { + "type": "header", + "text": "LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 230, + 59, + 305, + 68 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "complex, multi-step tasks across a variety of linguistic contexts [620, 622, 207, 70, 789]. Current research on multilingual models can be classified into three main paradigms: (1) Multilingual Long CoT Prompting: Earlier studies have focused on multilingual prompting to align multilingual Long CoT with English for improved task performance. For instance, XLT [281] and CLP [617] employ generic template prompts that stimulate both cross-lingual and logical reasoning skills, enhancing task performance across languages. (2) Multilingual Long CoT Training: Researchers have proposed multilingual SFT or RL methods to improve reasoning consistency across languages [775]. Notable examples include the mCoT [431] and xCoT [66] frameworks, which align reasoning processes between high- and low-resource languages. Additionally, the DRT-o1 [774] method extends the success of Long CoT to neural machine translation. More recently, Wang et al. [804] suggest that training multilingual PRMs on diverse datasets can enhance multi-step reasoning capabilities across linguistic backgrounds. (3) Multilingual Long CoT Inference-Time Scaling: Earlier, Qin et al. [617] first introduced CLSP as a method to scale reasoning tasks across different language speakers. Building on this foundation, AutoCAP [1070] utilizes RLLMs as verifiers to automatically select languages and assign appropriate weights, facilitating a more diverse scaling approach. Furthermore, Ranaldi et al. [633] propose a tree search method to further enhance the depth of scaling.", + "bbox": [ + 169, + 90, + 826, + 313 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "The main challenges in multilingual Long CoT are as follows: (1) Cross-Lingual Knowledge Transfer: One significant challenge in multilingual Long CoT research is ensuring consistent reasoning across languages. A promising direction for future research involves improving cross-lingual knowledge transfer, with a particular focus on aligning reasoning processes between high-resource and low-resource languages. (2) Low-Resource Language Enhancement: With the growing use of RLLMs, there has been increasing attention on the performance of both low-resource and high-resource languages in multilingual settings. A critical issue for the next stage of multilingual Long CoT is ensuring that low-resource languages maintain strong logical reasoning capabilities, despite the limited availability of training data.", + "bbox": [ + 169, + 318, + 826, + 444 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "8.3 Agentic & Embodied Long CoT", + "text_level": 1, + "bbox": [ + 171, + 450, + 434, + 467 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Researchers have expanded Long CoT in interactive environments by utilizing tools, significantly improving success rates in automated exploration tasks [234, 1099, 1023, 178, 601]. Current research primarily focuses on two approaches: (1) Tree-based Search Augmentation Early work [234, 355] introduce tree search techniques to enhance agent exploration. Hu et al. [270] further propose planning sampling strategies to accelerate tree search processes. Additionally, Light et al. [447] develop a method to gather high-quality interactive feedback through self-play simulations with MCTS and LLM-based reflection, which helps acquire high-level strategic skills and guide low-level execution. (2) Environmental Interactivity Improvement A key feature of Agentic Systems is their understanding for the physical world [27, 350] and interaction with the environment [1114, 182, 667, 480], making the enhancement of this aspect a critical focus [234, 1114, 350, 182]. Nie et al. [566] and Hu et al. [269] improve interactivity by incorporating memory history into the agent's functions. (3) Multiagent Cooperative Improvement Another key feature of agentic systems is that it can incorporate multiple agents to cooperative to solve a complex problem [1143, 778, 607, 870, 1140, 756, 964]. Christakopoulou et al. [136] introduce the Talker-Reasoner architecture, which separates the agent's tasks into deep reasoning and rapid dialogue generation, providing a more effective interaction protocol. Lei et al. [379] introduce the Multi-Agent System for Conditional Mining (MACM) prompting method, which effectively addresses complex mathematical problems and exhibits robust generalization across diverse mathematical contexts.", + "bbox": [ + 169, + 473, + 826, + 722 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "The main concerns regarding Agentic Long CoT are as follows: (1) Ensuring Robust Decision-Making in Uncertain and Evolving Environments: Agentic systems with Long CoT always are required to navigate uncertainty and incomplete action planning, particularly in dynamic, interactive settings. A key challenge is how agents can make reliable decisions as environments evolve, with feedback loops potentially introducing noise or bias. (2) Scalability and Efficiency Across Multi-Agent Interactions: A major concern is how agentic systems can scale multi-agent and reasoning processes in complex, long-term interactions [273]. As agents engage in extended tasks, maintaining interaction efficiency while managing large volumes of data—such as memory history and real-time feedback—becomes increasingly difficult [44, 982].", + "bbox": [ + 169, + 728, + 826, + 853 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "8.4 Efficient Long CoT", + "text_level": 1, + "bbox": [ + 171, + 861, + 346, + 876 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "The deep reasoning, exploration, and reflection of the Long CoT often lead to long outputs, which necessitate improved speedup techniques [201, 685, 494, 626, 180, 492, 665, 824], such as KV Cache", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 33 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 42, + 308, + 71 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "optimization [1037, 946, 487], token compression [530, 563, 998, 214, 909, 173, 678, 249, 130], efficient structure [312, 280, 119, 69, 251, 373, 580, 911, 209] and dynamic reasoning patterns [787, 154, 692, 503, 386, 326, 1057, 859, 459, 472, 880, 348, 971, 746, 1063, 153]. Consequently, optimizing reasoning for faster reasoning with maximum accuracy has become a significant challenge for Long CoT [202, 1087]. Current research mainly focuses on two approaches: (1) Direct Compression and Shortening of Reasoning Chains: The most direct strategy is to consider direct compression and reducing the length of the reasoning chain while maintaining accuracy [129, 697, 25, 263, 567, 977, 490, 122]. Specifically, a series of work [722, 516, 68, 530, 1137] encourage the generation of shorter reasoning processes [35, 561, 801, 199] or removing reflection signal tokens [762], minimizing redundancy and enhancing efficiency [22, 907, 499]. Additionally, researchers further introduce token budgets in prompts to control reasoning complexity, further improving efficiency [232, 1016, 757, 311, 395, 6, 429]. Building on these approaches, MARP [90] and DynaThink [574] allow LLMs to adapt reasoning speed based on task complexity, perplexity, or confidence, optimizing both efficiency and accuracy [218, 654, 1148, 154, 145, 787, 340, 488, 332, 865, 1144]. Moreover, Botta et al. [55] and Xia et al. [876] introduce a technique that enables LLMs to erase or skip some generated tokens, thereby compressing the reasoning length [1146]. More radically, Yu et al. [984] and Du et al. [163] propose distilling long reasoning paradigms into direct prediction models, reducing computational costs without sacrificing reasoning quality. (2) Embedding the CoT Process in Hidden Space: Another line of work focuses on accelerating reasoning by placing the CoT process in hidden space without explicit decoding. Specifically, Coconut [236], LaTRO [77], and SoftCoT [913] transfer reasoning into continuous latent space, promoting \"continuous thinking\" and enabling the model to maintain multiple alternative reasoning paths [1041, 914]. Similarly, Wang et al. [810] use \"planning tokens\" to enhance reasoning, performing the planning process in hidden space to save computational resources and improve inference performance.", + "bbox": [ + 169, + 90, + 826, + 424 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "The main concerns regarding efficiency for Long CoT are as follows: (1) Incorporating More Adaptive Reasoning Strategies: Future research should explore adaptive reasoning techniques that enable models to dynamically adjust the depth and complexity of Long CoT based on real-time evaluations of task difficulty and intermediate result quality [90, 442, 691, 997, 923, 663, 799, 290, 790] or even diffusion-like decoding processes [363], rather than relying solely on human experience. (2) Leveraging efficient reasoning format: Another promising direction involves integrating multimodal, latent space, or other efficient reasoning formats to express logic more effectively [125, 662, 800]. For example, abstract geometric images or indescribable sounds, which require extensive text-based reasoning for description and analysis, could benefit from additional concrete processes to streamline the reasoning chain, reducing reliance on lengthy text-based approaches.", + "bbox": [ + 169, + 429, + 826, + 568 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "8.5 Knowledge-Augmented Long CoT", + "text_level": 1, + "bbox": [ + 171, + 578, + 452, + 593 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "The reasoning model significantly enhances reasoning capabilities, but it still lacks knowledge in specialized fields and timely new information [93, 175, 475, 677]. Thus, enriching reasoning with additional knowledge presents a key challenge for Long CoT [83, 75]. Current research focuses primarily on two approaches: (1) Retrieval-Augmented Generation: Retrieval-Augmented Generation (RAG) techniques enhance LLMs by integrating dynamic knowledge retrieval and document refinement [418, 811, 221, 322, 827, 1103, 1100, 592, 438]. Research has combined RAG with reasoning modules to improve performance on complex tasks [726, 329, 474, 861, 88, 1060, 616]. O1 Embedder [919] optimizes multi-task retrieval and reasoning through synthetic data training. Furthermore, Stream of Search (SoS) [193], and CoRAG [786] boost search accuracy and addresses unresolved issues by incorporating more natural reflection and exploration in RAG. (2) Model Knowledge Injection: An alternative approach involves integrating additional knowledge during SFT or RL [496, 1031, 124, 1132]. Specifically, HuatuoGPT-o1 [83] utilize the R1-like paradigm to train LLMs by model-judged reward RL, which significantly improves the medical knowledge during reasoning [577, 294, 769]. Huang et al. [300] and Wang et al. [766] optimize for injecting medical knowledge in Long CoT scenarios by SFT, which also achieve great performance. Further, Jiang et al. [325] introduce MCTS to synthesize data, achieving superior performance. This model merges verifiable medical knowledge with reinforcement learning techniques to enhance performance in complex, medical task settings.", + "bbox": [ + 169, + 599, + 826, + 849 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "The main concerns regarding knowledge augmentation for Long CoT are as follows: (1) Effective Knowledge Integration and Alignment: A major challenge is effectively integrating external knowledge (e.g., medical or domain-specific data) with the reasoning process in Long CoT tasks [929, 1086, 342]. The model must not only retrieve relevant information but also ensure it aligns with", + "bbox": [ + 169, + 854, + 826, + 912 + ], + "page_idx": 34 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 42, + 308, + 71 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "the ongoing reasoning, maintaining coherence across long chains of thought [509]. (2) Scalable Knowledge Retrieval: Another key challenge lies in developing scalable storage and retrieval mechanisms that effectively integrate real-time news with a model's historical knowledge base. Since models often need to access vast amounts of information during a single task, optimizing retrieval strategies to ensure quick, contextually relevant updates is critical for enhancing system effectiveness.", + "bbox": [ + 169, + 90, + 826, + 161 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "8.6 Safety and Stability for Long CoT", + "text_level": 1, + "bbox": [ + 171, + 171, + 452, + 186 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Despite the notable performance improvements brought about by Long CoT, Long CoT-augmented LLMs still encounter significant safety and stability challenges [1135, 1073, 515, 837, 785, 257]. These include issues such as the generation of unstable outputs, exemplified by the tendency to memorize in-domain math questions instead of engaging in actual reasoning [918], and the production of unsafe outputs, such as misinformation and offensive content [1123, 384, 1122, 510, 23, 46, 45, 160, 346, 1061]. Current research primarily addresses two key approaches: (1) Long CoT Attack Several studies show that Long CoT makes models more vulnerable to unexpected behavior [181, 146], hallucinations [255, 505] or unsafe outputs [360, 1145, 906, 108, 20, 525]. For instance, Arrieta et al. [24] identify that DeepSeek-R1 is prone to generating harmful content, including misinformation and offensive speech. Additionally, Kumar et al. [357] introduce the OverThink attack, which exploits false inference problems to induce overthinking in models, providing insights into potential defensive strategies. Further, Yao et al. [958] fool RLLMs chain of iterative chaos, for better jailbreaking. (2) Long CoT Safety Improvement Another major area of research focuses on enhancing safety [320, 1138, 493] and reliability [715, 636, 748, 147, 105, 655] through prompting [191] or training [579] techniques. Shen et al. [662] present Heima, which optimizes inference efficiency and robustness. Gallego [191] proposes dynamic security prompts during inference, while Cheng et al. [121] address hallucinations by guiding reasoning with a tree search algorithm. Zhao et al. [1092] introduce a self-reflection framework to identify biases, and Wang et al. [772] propose Safety Reasoning with Guidelines (SRG) to defend against out-of-distribution attacks. Finally, Parmar and Govindarajulu [587] combine reinforcement learning (RL) and supervised fine-tuning (SFT) in a hybrid training approach to reduce harmful outputs and enhance DeepSeek-R1's safety.", + "bbox": [ + 169, + 193, + 826, + 484 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "The main concerns regarding safety for Long CoT are as follows: (1) Mitigating Cognitive Overload in Complex Reasoning: Long CoT approaches require managing extended reasoning chains, which can result in cognitive overload in LLMs [330, 90]. This overload may lead to errors, hallucinations, or unsafe outputs. Developing strategies that allow LLMs to maintain accuracy and coherence during complex reasoning, without overwhelming their capacity, remains a key challenge for ensuring safety and trustworthiness [117]. (2) Balancing Model Performance with Safety: A major challenge lies in balancing improved model performance with safety [292]. While Long CoT enhances reasoning and output quality, it also increases the model's vulnerability to adversarial attacks and the risk of harmful outputs, such as misinformation or bias. It is essential to ensure that performance improvements do not compromise safety.", + "bbox": [ + 169, + 489, + 826, + 628 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "9 Related Work", + "text_level": 1, + "bbox": [ + 171, + 641, + 323, + 657 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "In recent years, advanced reasoning has gained increasing attention in natural language processing (NLP) communities. Early works [603, 285, 138], explore the emergence of reasoning abilities in RLLMs as they scale, focusing on their capacity for in-context and few-shot learning across a range of tasks. Additionally, Giadikiaroglou et al. [208], Yu et al. [980] and Liu et al. [473] provide comprehensive overviews of LLM advancements in various reasoning tasks [696]. Moreover, Chu-Carroll et al. [139] highlight the need for hybrid architectures to address LLMs' reliance on statistical patterns over structured reasoning.", + "bbox": [ + 169, + 670, + 826, + 768 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "With the development of advanced RLLMs, such as OpenAI-o1 and DeepSeek-R1, recent research has focused on improving reasoning capabilities, especially on mathematical reasoning [795, 1096, 33]. Patil [588] highlight the limitations of standard LLMs in addressing complex reasoning tasks, such as optimization and multi-step reasoning. In addition, Liang et al. [440] and Li [419] review strategies to scale search and inference time, including the use of algorithms like Monte Carlo Tree Search, to enhance LLM reasoning. Xu et al. [899] examine the role of reinforcement learning and \"thought\" sequences in reasoning improvement [359], while Hong et al. [259] demonstrate the impact of prompting techniques [546]. Further, Liu et al. [473] and Mondorf and Plank [557] stress the importance of deeper analysis beyond surface-level accuracy, and He et al. [248] explore self-evolutionary processes as a means to advance LLM reasoning. Besta et al. [50] propose a modular", + "bbox": [ + 169, + 772, + 828, + 912 + ], + "page_idx": 35 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 42, + 310, + 71 + ], + "page_idx": 35 + }, + { + "type": "page_number", + "text": "36", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "framework integrating structure, strategy, and training methods as part of a comprehensive system design approach. Most recently, Li et al. [432] provide a systematic survey of System 2 thinking, focusing on the methods used to differentiate them from System 1 thinking.", + "bbox": [ + 169, + 90, + 826, + 133 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Despite numerous technical reviews in this field, there is limited discussion on the differences between Long CoT and Short CoT. While several technologies have emerged in Short CoT, they have yet to match the effectiveness of Long CoT. This issue has not been thoroughly addressed. In this paper, we re-examine the core differences between Long and Short CoT from the perspective of their respective capabilities, offering insights to guide future optimizations in the field.", + "bbox": [ + 169, + 138, + 826, + 209 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "10 Conclusion", + "text_level": 1, + "bbox": [ + 173, + 223, + 308, + 238 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "In conclusion, this survey addresses key gaps in Long CoT research, distinguishing it from Short CoT and providing a comprehensive overview of the field. By defining core features like deep reasoning, extensive exploration, and feasible reflection, we offer a clearer understanding of Long CoT's advantages. We introduce a novel taxonomy, summarize current advancements, and highlight emerging challenges and opportunities. Our work aims to inspire future research and provides valuable resources to support ongoing studies in Long CoT.", + "bbox": [ + 169, + 251, + 826, + 335 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 354, + 267, + 371 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Asma Ben Abacha, Wen-wai Yim, Yujuan Fu, Zhaoyi Sun, Meliha Yetisgen, Fei Xia, and Thomas Lin. Medec: A benchmark for medical error detection and correction in clinical notes. arXiv preprint arXiv:2412.19260, 2024.", + "[2] Marwan AbdElhameed and Pavly Halim. Inference scaling vs reasoning: An empirical analysis of compute-optimal llm problem-solving. arXiv preprint arXiv:2412.16260, 2024.", + "[3] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023.", + "[4] Bo Adler, Niket Agarwal, Ashwath Aithal, Dong H Anh, Pallab Bhattacharya, Annika Brundyn, Jared Casper, Bryan Catanzaro, Sharon Clay, Jonathan Cohen, et al. Nematron-4 340b technical report. arXiv preprint arXiv:2406.11704, 2024.", + "[5] Shivam Agarwal, Zimin Zhang, Lifan Yuan, Jiawei Han, and Hao Peng. The unreasonable effectiveness of entropy minimization in llm reasoning. arXiv preprint arXiv:2505.15134, 2025.", + "[6] Pranjal Aggarwal and Sean Welleck. L1: Controlling how long a reasoning model thinks with reinforcement learning. arXiv preprint arXiv:2503.04697, 2025.", + "[7] Wasi Uddin Ahmad, Sean Narethiran, Somshubra Majumdar, Aleksander Ficek, Siddhartha Jain, Jocelyn Huang, Vahid Noroozi, and Boris Ginsburg. Opencodereasoning: Advancing data distillation for competitive coding. arXiv preprint arXiv:2504.01943, 2025.", + "[8] AI-MO. Aime 2024. https://huggingface.co/datasets/AI-MO/aimo-validation-aime, July 2024.", + "[9] AI-MO. Amc 2023. https://huggingface.co/datasets/AI-MO/aimo-validation-amc, July 2024.", + "[10] Alon Albalak, Duy Phung, Nathan Lile, Rafael Rafailov, Kanishk Gandhi, Louis Castricato, Anikait Singh, Chase Blagden, Violet Xiang, Dakota Mahan, and Nick Haber. Big-math: A large-scale, high-quality math dataset for reinforcement learning in language models, 2025.", + "[11] Mohammad Ali Alomrani, Yingxue Zhang, Derek Li, Qianyi Sun, Soumyasundar Pal, Zhanguang Zhang, Yaochen Hu, Rohan Deepak Ajwani, Antonios Valkanas, Raika Karimi, et al. Reasoning on a budget: A survey of adaptive and controllable test-time compute in llms. arXiv preprint arXiv:2507.02076, 2025.", + "[12] Alireza Amiri, Xinting Huang, Mark Rofin, and Michael Hahn. Lower bounds for chain-of-thought reasoning in hard-attention transformers. arXiv preprint arXiv:2502.02393, 2025.", + "[13] Dario Amodei, Chris Olah, Jacob Steinhardt, Paul Christiano, John Schulman, and Dan Mané. Concrete problems in ai safety. arXiv preprint arXiv:1606.06565, 2016." + ], + "bbox": [ + 189, + 380, + 826, + 912 + ], + "page_idx": 36 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 42, + 308, + 71 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "37", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Shengnan An, Zexiong Ma, Zeqi Lin, Nanning Zheng, Jian-Guang Lou, and Weizhu Chen. Learning from mistakes makes llm better reasoner. arXiv preprint arXiv:2310.20689, 2023.", + "[15] Carolyn Jane Anderson, Joydeep Biswas, Aleksander Boruch-Gruszecki, Federico Cassano, Molly Q Feldman, Arjun Guha, Francesca Lucchetti, and Zixuan Wu. PhD knowledge not required: A reasoning challenge for large language models. arXiv preprint arXiv:2502.01584, 2025.", + "[16] Rohan Anil, Andrew M Dai, Orhan Firat, Melvin Johnson, Dmitry Lepikhin, Alexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen, et al. Palm 2 technical report. arXiv preprint arXiv:2305.10403, 2023.", + "[17] Zachary Ankner, Mansheej Paul, Brandon Cui, Jonathan Daniel Chang, and Prithviraj Ammanabrolu. Critique-out-loud reward models. In *Pluralistic Alignment Workshop at NeurIPS* 2024, October 2024. URL https://openreview.net/forum?id=CljYUvI1RW.", + "[18] Thomas Anthony, Zheng Tian, and David Barber. Thinking fast and slow with deep learning and tree search. Advances in neural information processing systems, 30, December 2017. URL https://proceedings.neurips.cc/paper_files/paper/2017/file/d8e1344e27a5b08cdfd5d027d9b8d6de-Paper.pdf.", + "[19] AI Anthropic. The claude 3 model family: Opus, sonnet, haiku. Claude-3 Model Card, 1:1, 2024. URL https://www-cdn.anthropic.com/de8ba9b01c9ab7cbabf5c33b80b7bbc618857627/Model_Card_Claude_3.pdf.", + "[20] Roberto Araya. Do chains-of-thoughts of large language models suffer from hallucinations, cognitive biases, or phobias in bayesian reasoning? arXiv preprint arXiv:2503.15268, 2025.", + "[21] Mikhail L Arbazov, Alexey A Shvets, and Sisong Beir. Beyond exponential decay: Rethinking error accumulation in large language models. arXiv preprint arXiv:2505.24187, 2025.", + "[22] Daman Arora and Andrea Zanette. Training language models to reason efficiently. arXiv preprint arXiv:2502.04463, 2025.", + "[23] Aitor Arrieta, Miriam Ugarte, Pablo Valle, José Antonio Parejo, and Sergio Segura. Early external safety testing of openai's o3-mini: Insights from the pre-deployment evaluation. arXiv preprint arXiv:2501.17749, 2025.", + "[24] Aitor Arrieta, Miriam Ugarte, Pablo Valle, José Antonio Parejo, and Sergio Segura. o3-mini vs deepseek-r1: Which one is safer? arXiv preprint arXiv:2501.18438, 2025.", + "[25] Dhananjay Ashok and Jonathan May. Language models can predict their own behavior. arXiv preprint arXiv:2502.13329, 2025.", + "[26] Zhangir Azerbayev, Hailey Schoelkopf, Keiran Paster, Marco Dos Santos, Stephen Marcus McAleer, Albert Q. Jiang, Jia Deng, Stella Biderman, and Sean Welleck. Llemma: An open language model for mathematics. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=4WnqRR915j.", + "[27] Alisson Azzolini, Hannah Brandon, Prithvijit Chattopadhyay, Huayu Chen, Jinju Chu, Yin Cui, Jenna Diamond, Yifan Ding, Francesco Ferroni, Rama Govindaraju, et al. Cosmos-reason1: From physical common sense to embodied reasoning. arXiv preprint arXiv:2503.15558, 2025.", + "[28] Tanja Baeumel, Josef van Genabith, and Simon Ostermann. The lookahead limitation: Why multi-operand addition is hard for lms. arXiv preprint arXiv:2502.19981, 2025.", + "[29] Yuntao Bai, Saurav Kadavath, Sandipan Kundu, Amanda Askell, Jackson Kernion, Andy Jones, Anna Chen, Anna Goldie, Azalia Mirhoseini, Cameron McKinnon, et al. Constitutional ai: Harmlessness from ai feedback. arXiv preprint arXiv:2212.08073, 2022.", + "[30] Bowen Baker, Joost Huizinga, Aleksander Madry, Wojciech Zaremba, Jakub Pachocki, and David Farhi. Monitoring reasoning models for misbehavior and the risks of promoting obfuscation. March 2025. URL https://openai.com/index/chain-of-thought-monitoring/.", + "[31] Vidhisha Balachandran, Jingya Chen, Lingjiao Chen, Shivam Garg, Neel Joshi, Yash Lara, John Langford, Besmira Nushi, Vibhav Vineet, Yue Wu, et al. Inference-time scaling for complex tasks: Where we stand and what lies ahead. arXiv preprint arXiv:2504.00294, 2025." + ], + "bbox": [ + 189, + 90, + 826, + 912 + ], + "page_idx": 37 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 37 + }, + { + "type": "page_number", + "text": "38", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 37 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[32] Marthe Ballon, Andres Algaba, and Vincent Ginis. The relationship between reasoning and performance in large language models-o3 (mini) thinks harder, not longer. arXiv preprint arXiv:2502.15631, 2025.", + "[33] Dibyanayan Bandyopadhyay, Soham Bhattacharjee, and Asif Ekbal. Thinking machines: A survey of llm based reasoning strategies. arXiv preprint arXiv:2503.10814, 2025.", + "[34] Hritik Bansal, Arian Hosseini, Rishabh Agarwal, Vinh Q. Tran, and Mehran Kazemi. Smaller, weaker, yet better: Training LLM reasoners via compute-optimal sampling. In The 4th Workshop on Mathematical Reasoning and AI at NeurIPS'24, January 2025. URL https://openreview.net/forum?id=HuYSURUxs2.", + "[35] Hieu Tran Bao, Nguyen Cong Dat, Nguyen Duc Anh, and Hoang Thanh Tung. Learning to stop overthinking at test time. arXiv preprint arXiv:2502.10954, 2025.", + "[36] Keqin Bao, Nuo Chen, Xiaoyuan Li, Binyuan Hui, Bowen Yu, Fuli Feng, Junyang Lin, Xiangnan He, and Dayiheng Liu. Teaching llm to reason: Reinforcement learning from algorithmic problems without code. arXiv preprint arXiv:2507.07498, 2025.", + "[37] Qiming Bao, Alex Yuxuan Peng, Tim Hartill, Neset Tan, Zhenyun Deng, Michael Witbrock, and Jiamou Liu. Multi-step deductive reasoning over natural language: An empirical study on out-of-distribution generalisation. arXiv preprint arXiv:2207.14000, 2022.", + "[38] Qiming Bao, Gael Gendron, Alex Yuxuan Peng, Wanjun Zhong, Neset Tan, Yang Chen, Michael Witbrock, and Jiamou Liu. Assessing and enhancing the robustness of large language models with task structure variations for logical reasoning. arXiv preprint arXiv:2310.09430, 2023.", + "[39] Qiming Bao, Alex Yuxuan Peng, Zhenyun Deng, Wanjun Zhong, Neset Tan, Nathan Young, Yang Chen, Yonghua Zhu, Michael Witbrock, and Jiamou Liu. Contrastive learning with logic-driven data augmentation for logical reasoning over text. arXiv preprint arXiv:2305.12599, 2023.", + "[40] Qiming Bao, Alex Peng, Zhenyun Deng, Wanjun Zhong, Gael Gendron, Timothy Pistotti, Neset Tan, Nathan Young, Yang Chen, Yonghua Zhu, Paul Denny, Michael Witbrock, and Jiamou Liu. Abstract Meaning Representation-based logic-driven data augmentation for logical reasoning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 5914–5934, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.353. URL https://aclanthology.org/2024-findings-acl.353/.", + "[41] Qiming Bao, Juho Leinonen, Alex Yuxuan Peng, Wanjun Zhong, Gael Gendron, Timothy Pistotti, Alice Huang, Paul Denny, Michael Witbrock, and Jiamou Liu. Exploring iterative enhancement for improving learnersourced multiple-choice question explanations with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 39, pages 28955–28963, Apr 2025.", + "[42] Brian R Bartoldson, Siddarth Venkatraman, James Diffenderfer, Moksh Jain, Tal Ben-Nun, Seanie Lee, Minsu Kim, Johan Obando-Ceron, Yoshua Bengio, and Bhavya Kailkhura. Trajectory balance with asynchrony: Decoupling exploration and learning for fast, scalable llm post-training. arXiv preprint arXiv:2503.18929, 2025.", + "[43] Sarmad Bashir, Alessio Ferrari, Abbas Khan, Per Erik Strandberg, Zulqarnain Haider, Mehrdad Saadatmand, and Markus Bohlin. Requirements ambiguity detection and explanation with llms: An industrial study. July 2025.", + "[44] Ali Behrouz, Peilin Zhong, and Vahab Mirrokni. Titans: Learning to memorize at test time. arXiv preprint arXiv:2501.00663, 2024.", + "[45] Yoshua Bengio, Michael Cohen, Damiano Fornasiere, Joumana Ghosn, Pietro Greiner, Matt MacDermott, Soren Mindermann, Adam Oberman, Jesse Richardson, Oliver Richardson, et al. Superintelligent agents pose catastrophic risks: Can scientist ai offer a safer path? arXiv preprint arXiv:2502.15657, 2025.", + "[46] Yoshua Bengio, Soren Mindermann, Daniel Privitera, Tamay Besiroglu, Rishi Bommasani, Stephen Casper, Yejin Choi, Philip Fox, Ben Garfinkel, Danielle Goldfarb, et al. International ai safety report. arXiv preprint arXiv:2501.17805, 2025." + ], + "bbox": [ + 189, + 90, + 826, + 912 + ], + "page_idx": 38 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 38 + }, + { + "type": "page_number", + "text": "39", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 38 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[47] Leonardo Bertolazzi, Philipp Mondorf, Barbara Plank, and Raffaella Bernardi. The validation gap: A mechanistic analysis of how language models compute arithmetic but fail to validate it. arXiv preprint arXiv:2502.11771, 2025.", + "[48] Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Michal Podstawski, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Hubert Niewiadomski, Piotr Nczyk, and Torsten Hoefler. Graph of thoughts: Solving elaborate problems with large language models. Proceedings of the AAAI Conference on Artificial Intelligence, 38(16):17682-17690, Mar. 2024. doi: 10.1609/aaai.v38i16.29720. URL https://ojs.aaai.org/index.php/AAAI/article/view/29720.", + "[49] Maciej Besta, Florim Memedi, Zhenyu Zhang, Robert Gerstenberger, Guangyuan Piao, Nils Blach, Piotr Nyczyk, Marcin Copik, Grzegorz Kwaśniewski, Jürgen Müller, et al. Demystifying chains, trees, and graphs of thoughts. arXiv preprint arXiv:2401.14295, 2024.", + "[50] Maciej Besta, Julia Barth, Eric Schreiber, Ales Kubicek, Afonso Catarino, Robert Gerstenberger, Piotr Nczyk, Patrick Iff, Yueling Li, Sam Houliston, et al. Reasoning language models: A blueprint. arXiv preprint arXiv:2501.11223, 2025.", + "[51] Jinhe Bi, Danqi Yan, Yifan Wang, Wenke Huang, Haokun Chen, Guancheng Wan, Mang Ye, Xun Xiao, Hinrich Schuetze, Volker Tresp, et al. Cot-kinetics: A theoretical modeling assessing lrm reasoning process. arXiv preprint arXiv:2505.13408, 2025.", + "[52] Xiao Bi, Deli Chen, Guanting Chen, Shanhuang Chen, Damai Dai, Chengqi Deng, Honghui Ding, Kai Dong, Qiushi Du, Zhe Fu, et al. Deepseek llm: Scaling open-source language models with longtermism. arXiv preprint arXiv:2401.02954, 2024.", + "[53] Zhen Bi, Ningyu Zhang, Yinuo Jiang, Shumin Deng, Guozhou Zheng, and Huajun Chen. When do program-of-thought works for reasoning? In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pages 17691-17699, 2024. URL https://ods.aaai.org/index.php/AAAI/article/view/29721/31237.", + "[54] Zhenni Bi, Kai Han, Chuanjian Liu, Yehui Tang, and Yunhe Wang. Forest-of-thought: Scaling test-time compute for enhancing lIm reasoning. arXiv preprint arXiv:2412.09078, 2024.", + "[55] Edoardo Botta, Yuchen Li, Aashay Mehta, Jordan T Ash, Cyril Zhang, and Andrej Risteski. On the query complexity of verifier-assisted language generation. arXiv preprint arXiv:2502.12123, 2025.", + "[56] David Brandfonbrener, Simon Henniger, Sibi Raja, Tarun Prasad, Chloe Loughridge, Federico Cassano, Sabrina Ruixin Hu, Jianang Yang, William E Byrd, Robert Zinkov, et al. Vermcts: Synthesizing multi-step programs using a verifier, a large language model, and tree search. arXiv preprint arXiv:2402.08147, 2024.", + "[57] Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv preprint arXiv:2407.21787, 2024.", + "[58] Dan Busbridge, Amitis Shidani, Floris Weers, Jason Ramapuram, Etai Littwin, and Russ Webb. Distillation scaling laws. arXiv preprint arXiv:2502.08606, 2025.", + "[59] Ji Young Byun, Young-Jin Park, Nvid Azizan, and Rama Chellappa. Test-time-scaling for zero-shot diagnosis with visual-language reasoning. arXiv preprint arXiv:2506.11166, 2025.", + "[60] Ju-Seung Byun, Jiyun Chun, Jihyung Kil, and Andrew Perrault. ARES: Alternating reinforcement learning and supervised fine-tuning for enhanced multi-modal chain-of-thought reasoning through diverse AI feedback. In Yaser Al-Onaizan, Mohit Bansal, and YunNung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 4410-4430, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.252. URL https://aclanthology.org/2024.emnlp-main.252/.", + "[61] Huanqia Cai, Yijun Yang, and Zhifeng Li. System-2 mathematical reasoning via enriched instruction tuning. arXiv preprint arXiv:2412.16964, 2024.", + "[62] Zheng Cai, Maosong Cao, Haojiong Chen, Kai Chen, Keyu Chen, Xin Chen, Xun Chen, Zehui Chen, Zhi Chen, Pei Chu, et al. Internl m2 technical report. arXiv preprint arXiv:2403.17297, 2024." + ], + "bbox": [ + 189, + 90, + 828, + 910 + ], + "page_idx": 39 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 39 + }, + { + "type": "page_number", + "text": "40", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 39 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[63] Erik Cambria, Lorenzo Malandri, Fabio Mercorio, Navid Nobani, and Andrea Seveso. Xai meets llms: A survey of the relation between explainable ai and large language models. arXiv preprint arXiv:2407.15248, 2024.", + "[64] Lang Cao. GraphReason: Enhancing reasoning capabilities of large language models through a graph-based verification approach. In Bhavana Dalvi Mishra, Greg Durrett, Peter Jansen, Ben Lipkin, Danilo Neves Ribeiro, Lionel Wong, Xi Ye, and Wenting Zhao, editors, Proceedings of the 2nd Workshop on Natural Language Reasoning and Structured Explanations (@ACL 2024), pages 1-12, Bangkok, Thailand, August 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.nlrse-1.1/.", + "[65] Zhepeng Cen, Yihang Yao, William Han, Zuxin Liu, and Ding Zhao. Behavior injection: Preparing language models for reinforcement learning. arXiv preprint arXiv:2505.18917, 2025.", + "[66] Linzheng Chai, Jian Yang, Tao Sun, Hongcheng Guo, Jiaheng Liu, Bing Wang, Xiannian Liang, Jiaqi Bai, Tongliang Li, Qiyao Peng, et al. xcot: Cross-lingual instruction tuning for cross-lingual chain-of-thought reasoning. arXiv preprint arXiv:2401.07037, 2024.", + "[67] Jun Shern Chan, Neil Chowdhury, Oliver Jaffe, James Aung, Dane Sherburn, Evan Mays, Giulio Starace, Kevin Liu, Leon Maksin, Tejal Patwardhan, et al. Mle-bench: Evaluating machine learning agents on machine learning engineering. arXiv preprint arXiv:2410.07095, 2024.", + "[68] Hyeong Soo Chang. On the convergence rate of mcts for the optimal value estimation in markov decision processes. IEEE Transactions on Automatic Control, pages 1-6, February 2025. doi: 10.1109/TAC.2025.3538807. URL https://ieeexplore.ieee.org/document/10870057.", + "[69] Aili Chen, Aonian Li, Bangwei Gong, Binyang Jiang, Bo Fei, Bo Yang, Boji Shan, Changqing Yu, Chao Wang, Cheng Zhu, et al. Minimax-m1: Scaling test-time compute efficiently with lightning attention. arXiv preprint arXiv:2506.13585, 2025.", + "[70] Andong Chen, Yuchen Song, Wenxin Zhu, Kehai Chen, Muyun Yang, Tiejun Zhao, et al. Evaluating o1-like llms: Unlocking reasoning for translation through comprehensive analysis. arXiv preprint arXiv:2502.11544, 2025.", + "[71] Beiduo Chen, Yang Janet Liu, Anna Korhonen, and Barbara Plank. Threading the needle: Reweaving chain-of-thought reasoning to explain human label variation. arXiv preprint arXiv:2505.23368, 2025.", + "[72] Guizhen Chen, Weiwen Xu, Hao Zhang, Hou Pong Chan, Chaoqun Liu, Lidong Bing, Deli Zhao, Anh Tuan Luu, and Yu Rong. Finereason: Evaluating and improving llms' deliberate reasoning through reflective puzzle solving. arXiv preprint arXiv:2502.20238, 2025.", + "[73] Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Step-level value preference optimization for mathematical reasoning. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 7889-7903, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.463. URL https://aclanthology.org/2024_findings-emnlp.463/.", + "[74] Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Alphamath almost zero: Process supervision without process. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=VaXnxQ3UKo.", + "[75] Haibin Chen, Kangtao Lv, Chengwei Hu, Yanshi Li, Yujin Yuan, Yancheng He, Xingyao Zhang, Langming Liu, Shilei Liu, Wenbo Su, et al. Chineseecomqa: A scalable e-commerce concept evaluation benchmark for large language models. arXiv preprint arXiv:2502.20196, 2025.", + "[76] Hanjie Chen, Zhouxiang Fang, Yash Singla, and Mark Dredze. Benchmarking large language models on answering and explaining challenging medical questions. arXiv preprint arXiv:2402.18060, 2024.", + "[77] Haolin Chen, Yihao Feng, Zuxin Liu, Weiran Yao, Akshara Prabhakar, Shelby Heinecke, Ricky Ho, Phil Mui, Silvio Savarese, Caiming Xiong, et al. Language models are hid" + ], + "bbox": [ + 189, + 90, + 826, + 912 + ], + "page_idx": 40 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 40 + }, + { + "type": "page_number", + "text": "41", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 40 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "den reasoners: Unlocking latent reasoning capabilities via self-rewarding. arXiv preprint arXiv:2411.04282, 2024.", + "[78] Hardy Chen, Haoqin Tu, Hui Liu, Xianfeng Tang, Xinya Du, Yuyin Zhou, and Cihang Xie. VI-thinking: An r1-derived visual instruction tuning dataset for thinkable lvlms. https://github.com/UCSC-VLAA/VL-Thinkinq, 2025.", + "[79] Jian Chen, Guohao Tang, Guofu Zhou, and Wu Zhu. Chatgpt and deepseek: Can they predict the stock market and macroeconomy? arXiv preprint arXiv:2502.10008, 2025.", + "[80] Jianhao Chen, Zishuo Xun, Bocheng Zhou, Han Qi, Qiaosheng Zhang, Yang Chen, Wei Hu, Yuzhong Qu, Wanli Ouyang, and Shuyue Hu. Do we truly need so many samples? multi-llm repeated sampling efficiently scale test-time compute. arXiv preprint arXiv:2504.00762, 2025.", + "[81] Jiefeng Chen, Jie Ren, Xinyun Chen, Chengrun Yang, Ruoxi Sun, and Sercan Ö Arik. Sets: Leveraging self-verification and self-correction for improved test-time scaling. arXiv preprint arXiv:2501.19306, 2025.", + "[82] Jierun Chen, Tiezheng Yu, Haoli Bai, Lewei Yao, Jiannan Wu, Kaican Li, Fei Mi, Chaofan Tao, Lei Zhu, Manyi Zhang, et al. The synergy dilemma of long-cot sft and rl: Investigating post-training techniques for reasoning vlms. arXiv preprint arXiv:2507.07562, 2025.", + "[83] Junying Chen, Zhenyang Cai, Ke Ji, Xidong Wang, Wanlong Liu, Rongsheng Wang, Jianye Hou, and Benyou Wang. Huatuogpt-o1, towards medical complex reasoning with llms. arXiv preprint arXiv:2412.18925, 2024.", + "[84] Justin Chih-Yao Chen, Archiki Prasad, Swarnadeep Saha, Elias Stengel-Eskin, and Mohit Bansal. Magicore: Multi-agent, iterative, coarse-to-fine refinement for reasoning. arXiv preprint arXiv:2409.12147, 2024.", + "[85] Kedi Chen, Zhikai Lei, Fan Zhang, Yinqi Zhang, Qin Chen, Jie Zhou, Liang He, Qipeng Guo, Kai Chen, and Wei Zhang. Code-driven inductive synthesis: Enhancing reasoning abilities of large language models with sequences. arXiv preprint arXiv:2503.13109, 2025.", + "[86] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025. Accessed: 2025-02-02.", + "[87] Michael K Chen, Xikun Zhang, and Dacheng Tao. Justlogic: A comprehensive benchmark for evaluating deductive reasoning in large language models. arXiv preprint arXiv:2501.14851, 2025.", + "[88] Mingyang Chen, Tianpeng Li, Haoze Sun, Yijie Zhou, Chenzheng Zhu, Fan Yang, Zenan Zhou, Weipeng Chen, Haofen Wang, Jeff Z Pan, et al. Learning to reason with search for llms via reinforcement learning. arXiv preprint arXiv:2503.19470, 2025.", + "[89] Nuo Chen, Zhiyuan Hu, Qingyun Zou, Jiaying Wu, Qian Wang, Bryan Hooi, and Bingsheng He. Judgerm: Large reasoning models as a judge. arXiv preprint arXiv:2504.00050, 2025.", + "[90] Qiguang Chen, Libo Qin, Jiaqi WANG, Jingxuan Zhou, and Wanxiang Che. Unlocking the capabilities of thought: A reasoning boundary framework to quantify and optimize chain-of-thought. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=pC44UMwy2v.", + "[91] Qiguang Chen, Libo Qin, Jin Zhang, Zhi Chen, Xiao Xu, and Wanxiang Che. $\\mathbf{M}^{3}\\mathrm{CoT}$ : A novel benchmark for multi-domain multi-step multi-modal chain-of-thought. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 8199–8221, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.446. URL https://aclanthology.org/2024.acl-long.446/.", + "[92] Qiguang Chen, Libo Qin, Jinhao Liu, Yue Liao, Jiaqi Wang, Jingxuan Zhou, and Wanxiang Che. Rbf++: Quantifying and optimizing reasoning boundaries across measurable and unmeasurable capabilities for chain-of-thought reasoning. arXiv preprint arXiv:2505.13307, 2025." + ], + "bbox": [ + 187, + 90, + 828, + 910 + ], + "page_idx": 41 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 41 + }, + { + "type": "page_number", + "text": "42", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 41 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[93] Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiaqi Wang, Mengkang Hu, Zhi Chen, Wanxiang Che, and Ting Liu. Ecm: A unified electronic circuit model for explaining the emergence of in-context learning and chain-of-thought in large language model. arXiv preprint arXiv:2502.03325, 2025.", + "[94] Qiguang Chen, Mingda Yang, Libo Qin, Jinhao Liu, Zheng Yan, Jiannan Guan, Dengyun Peng, Yiyan Ji, Hanjing Li, Mengkang Hu, et al. Ai4research: A survey of artificial intelligence for scientific research. arXiv preprint arXiv:2507.01903, 2025.", + "[95] Qiqi Chen, Xinpeng Wang, Philipp Mondorf, Michael A Hedderich, and Barbara Plank. Understanding when tree of thoughts succeeds: Larger models excel in generation, not discrimination. arXiv preprint arXiv:2410.17820, 2024.", + "[96] Shiqi Chen, Jinghan Zhang, Tongyao Zhu, Wei Liu, Siyang Gao, Miao Xiong, Manling Li, and Junxian He. Bring reason to vision: Understanding perception and reasoning through model merging. arXiv preprint arXiv:2505.05464, 2025.", + "[97] Shuang Chen, Yue Guo, Zhaochen Su, Yafu Li, Yulun Wu, Jiacheng Chen, Jiayu Chen, Weijie Wang, Xiaoye Qu, and Yu Cheng. Advancing multimodal reasoning: From optimized cold start to staged reinforcement learning. arXiv preprint arXiv:2506.04207, 2025.", + "[98] Sijia Chen and Baochun Li. Toward adaptive reasoning in large language models with thought rollback. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 7033-7056. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/chen24y.html.", + "[99] Weizhe Chen, Sven Koenig, and Bistra Dilkina. Iterative deepening sampling for large language models. arXiv preprint arXiv:2502.05449, 2025.", + "[100] Wenhu Chen, Xueguang Ma, Xinyi Wang, and William W. Cohen. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks. Transactions on Machine Learning Research, November 2023. ISSN 2835-8856. URL https://openreview.net/forum?id=YfZ4ZPt8zd.", + "[101] Wenxiang Chen, Wei He, Zhiheng Xi, Honglin Guo, Boyang Hong, Jiazheng Zhang, Rui Zheng, Nijun Li, Tao Gui, Yun Li, et al. Better process supervision with bi-directional rewarding signals. arXiv preprint arXiv:2503.04618, 2025.", + "[102] Xinghao Chen, Zhijing Sun, Wenjin Guo, Miaoran Zhang, Yanjun Chen, Yirong Sun, Hui Su, Yijie Pan, Dietrich Klakow, Wenjie Li, et al. Unveiling the key factors for distilling chain-of-thought reasoning. arXiv preprint arXiv:2502.18001, 2025.", + "[103] Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for $2 + 3 = ?$ on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024.", + "[104] Xinyun Chen, Maxwell Lin, Nathanael Scharli, and Denny Zhou. Teaching large language models to self-debug. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=KuPixIqPiq.", + "[105] Yanda Chen, Joe Benton, Ansh Radhakrishnan, Jonathan Uesato Carson Denison, John Schulman, Arushi Somani, Peter Hase, Misha Wagner Fabien Roger Vlad Mikulik, Sam Bowman, Jan Leike Jared Kaplan, et al. Reasoning models don't always say what they think. April 2025. URL https://www.anthropic.com/research/reasoning-models-dont-say-think.", + "[106] Yanxi Chen, Xuchen Pan, Yaliang Li, Bolin Ding, and Jingren Zhou. A simple and provable scaling law for the test-time compute of large language models. arXiv preprint arXiv:2411.19477, 2024.", + "[107] Yezeng Chen, Zui Chen, and Yi Zhou. Brain-inspired two-stage approach: Enhancing mathematical reasoning by imitating human thought processes. arXiv preprint arXiv:2403.00800, 2024.", + "[108] Yihang Chen, Haikang Deng, Kaiqiao Han, and Qingyue Zhao. Policy frameworks for transparent chain-of-thought reasoning in large language models. arXiv preprint arXiv:2503.14521, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 910 + ], + "page_idx": 42 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 42 + }, + { + "type": "page_number", + "text": "43", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 42 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[109] Yilong Chen, Junyuan Shang, Zhenyu Zhang, Yanxi Xie, Jiawei Sheng, Tingwen Liu, Shuo-huan Wang, Yu Sun, Hua Wu, and Haifeng Wang. Inner thinking transformer: Leveraging dynamic depth scaling to foster adaptive internal thinking. arXiv preprint arXiv:2502.13842, 2025.", + "[110] Zhenfang Chen, Delin Chen, Rui Sun, Wenjun Liu, and Chuang Gan. Scaling autonomous agents via automatic reward modeling and planning. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=womU9cEwcO.", + "[111] Zhi Chen, Qiguang Chen, Libo Qin, Qipeng Guo, Haijun Lv, Yicheng Zou, Wanxiang Che, Hang Yan, Kai Chen, and Dahua Lin. What are the essential factors in crafting effective long context multi-hop instruction datasets? insights and best practices. arXiv preprint arXiv:2409.01893, 2024.", + "[112] Zihan Chen, Song Wang, Zhen Tan, Xingbo Fu, Zhenyu Lei, Peng Wang, Huan Liu, Cong Shen, and Jundong Li. A survey of scaling in large language model reasoning. arXiv preprint arXiv:2504.02181, 2025.", + "[113] Ziru Chen, Michael White, Ray Mooney, Ali Payani, Yu Su, and Huan Sun. When is tree search useful for LLM planning? it depends on the discriminator. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 13659–13678, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.738. URL https://aclanthology.org/2024.acl-long.738/.", + "[114] Zixiang Chen, Yihe Deng, Huizhuo Yuan, Kaixuan Ji, and Quanquan Gu. Self-play fine-tuning converts weak language models to strong language models. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 6621-6642. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/chen24j.html.", + "[115] Zui Chen, Tianqiao Liu, Mi Tian, Qing Tong, Weiqi Luo, and Zitao Liu. Advancing math reasoning in language models: The impact of problem-solving data, data synthesis methods, and training stages. arXiv preprint arXiv:2501.14002, 2025.", + "[116] Daixuan Cheng, Shaohan Huang, Xuekai Zhu, Bo Dai, Wayne Xin Zhao, Zhenliang Zhang, and Furu Wei. Reasoning with exploration: An entropy perspective. arXiv preprint arXiv:2506.14758, 2025.", + "[117] Jiahao Cheng, Tiancheng Su, Jia Yuan, Guoxiu He, Jiawei Liu, Xinqi Tao, Jingwen Xie, and Huaxia Li. Chain-of-thought prompting obscures hallucination cues in large language models: An empirical evaluation. arXiv preprint arXiv:2506.17088, 2025.", + "[118] Jiale Cheng, Xiao Liu, Cunxiang Wang, Xiaotao Gu, Yida Lu, Dan Zhang, Yuxiao Dong, Jie Tang, Hongning Wang, and Minlie Huang. Spar: Self-play with tree-search refinement to improve instruction-following in large language models. arXiv preprint arXiv:2412.11605, 2024.", + "[119] Junhang Cheng, Fang Liu, Chengru Wu, and Li Zhang. Adaptivellm: A framework for selecting optimal cost-efficient llm for code-generation based on cot length. arXiv preprint arXiv:2506.10525, 2025.", + "[120] Kanzhi Cheng, Yantao Li, Fangzhi Xu, Jianbing Zhang, Hao Zhou, and Yang Liu. Vision-language models can self-improve reasoning via reflection. arXiv preprint arXiv:2411.00855, 2024.", + "[121] Xiaoxue Cheng, Junyi Li, Wayne Xin Zhao, and Ji-Rong Wen. Think more, hallucinate less: Mitigating hallucinations via dual process of fast and slow thinking. arXiv preprint arXiv:2501.01306, 2025.", + "[122] Zhengxiang Cheng, Dongping Chen, Mingyang Fu, and Tianyi Zhou. Optimizing length compression in large reasoning models. arXiv preprint arXiv:2506.14755, 2025.", + "[123] Zhoujun Cheng, Haoyu Dong, Zhiruo Wang, Ran Jia, Jiaqi Guo, Yan Gao, Shi Han, JianGuang Lou, and Dongmei Zhang. Hitab: A hierarchical table dataset for question answering and natural language generation. arXiv preprint arXiv:2108.06712, 2021." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 43 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 43 + }, + { + "type": "page_number", + "text": "44", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 43 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[124] Zhoujun Cheng, Shibo Hao, Tianyang Liu, Fan Zhou, Yutao Xie, Feng Yao, Yuexin Bian, Yonghao Zhuang, Nilabjo Dey, Yuheng Zha, et al. Revisiting reinforcement learning for llm reasoning from a cross-domain perspective. arXiv preprint arXiv:2506.14965, 2025.", + "[125] Zihui Cheng, Qiguang Chen, Jin Zhang, Hao Fei, Xiaocheng Feng, Wanxiang Che, Min Li, and Libo Qin. Comt: A novel benchmark for chain of multi-modal thought on large vision-language models. arXiv preprint arXiv:2412.12932, 2024.", + "[126] Zihui Cheng, Qiguang Chen, Xiao Xu, Jiaqi Wang, Weiyun Wang, Hao Fei, Yidong Wang, Alex Jinpeng Wang, Zhi Chen, Wanxiang Che, et al. Visual thoughts: A unified perspective of understanding multimodal chain-of-thought. arXiv preprint arXiv:2505.15510, 2025.", + "[127] Ethan Chern, Zhulin Hu, Steffi Chern, Siqi Kou, Jiadi Su, Yan Ma, Zhijie Deng, and Pengfei Liu. Thinking with generated images. arXiv preprint arXiv:2505.22525, 2025.", + "[128] Yew Ken Chia, Vernon Toh, Deepanway Ghosal, Lidong Bing, and Soujanya Poria. PuzzleVQA: Diagnosing multimodal reasoning challenges of language models with abstract visual patterns. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 16259–16273, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.962. URL https://aclanthology.org/2024-findings-acl.962/.", + "[129] Daiki Chijiwa, Taku Hasegawa, Kyosuke Nishida, Kuniko Saito, and Susumu Takeuchi. Portable reward tuning: Towards reusable fine-tuning across different pretrained models. arXiv preprint arXiv:2502.12776, 2025.", + "[130] Daewon Choi, Jimin Lee, Jihoon Tack, Woomin Song, Saket Dingliwal, Sai Muralidhar Jayanthi, Bhavana Ganesh, Jinwoo Shin, Aram Galstyan, and Sravan Babu Bodapati. Think clearly: Improving reasoning via redundant token pruning. arXiv preprint arXiv:2507.08806, 2025.", + "[131] François Chollet. On the measure of intelligence. arXiv preprint arXiv:1911.01547, 2019.", + "[132] Francois Chollet, Mike Knoop, Gregory Kamradt, and Bryan Landers. Arc prize 2024: Technical report. arXiv preprint arXiv:2412.04604, 2024.", + "[133] Francois Chollet, Mike Knoop, Gregory Kamradt, Bryan Landers, and Henry Pinkard. Arcagi-2: A new challenge for frontier ai reasoning systems. arXiv preprint arXiv:2505.11831, 2025.", + "[134] Sanjiban Choudhury. Process reward models for llm agents: Practical framework and directions. arXiv preprint arXiv:2502.10325, 2025.", + "[135] Jishnu Ray Chowdhury and Cornelia Caragea. Zero-shot verification-guided chain of thoughts. arXiv preprint arXiv:2501.13122, 2025.", + "[136] Konstantina Christakopoulou, Shibl Mourad, and Maja Mataric. Agents thinking fast and slow: A talker-reasoner architecture. In NeurIPS 2024 Workshop on Open-World Agents, October 2024. URL https://openreview.net/forum?id=xPhcP6rbI4.", + "[137] Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V Le, Sergey Levine, and Yi Ma. Sft memorizes, rl generalizes: A comparative study of foundation model post-training. arXiv preprint arXiv:2501.17161, 2025.", + "[138] Zheng Chu, Jingchang Chen, Qianglong Chen, Weijiang Yu, Tao He, Haotian Wang, Weihua Peng, Ming Liu, Bing Qin, and Ting Liu. Navigate through enigmatic labyrinth a survey of chain of thought reasoning: Advances, frontiers and future. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1173–1203, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.65. URL https://aclanthology.org/2024.acl-long.65/.", + "[139] Jennifer Chu-Carroll, Andrew Beck, Greg Burnham, David OS Melville, David Nachman, A Erdem Özcan, and David Ferrucci. Beyond llms: Advancing the landscape of complex reasoning. arXiv preprint arXiv:2402.08064, 2024.", + "[140] Daniel JH Chung, Zhiqi Gao, Yurii Kvasiuk, Tianyi Li, Moritz Munchmeyer, Maja Rudolph, Frederic Sala, and Sai Chaitanya Tadepalli. Theoretical physics benchmark (tpbench)—a dataset and study of ai reasoning capabilities in theoretical physics. arXiv preprint arXiv:2502.15815, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 910 + ], + "page_idx": 44 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 44 + }, + { + "type": "page_number", + "text": "45", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 44 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[141] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.", + "[142] Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, et al. The danger of overthinking: Examining the reasoning-action dilemma in agentic tasks. arXiv preprint arXiv:2502.08235, 2025.", + "[143] Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, et al. Process reinforcement through implicit rewards. arXiv preprint arXiv:2502.01456, 2025.", + "[144] Ganqu Cui, Yuchen Zhang, Jiacheng Chen, Lifan Yuan, Zhi Wang, Yuxin Zuo, Haozhan Li, Yuchen Fan, Huayu Chen, Weize Chen, et al. The entropy mechanism of reinforcement learning for reasoning language models. arXiv preprint arXiv:2505.22617, 2025.", + "[145] Yingqian Cui, Pengfei He, Jingying Zeng, Hui Liu, Xianfeng Tang, Zhenwei Dai, Yan Han, Chen Luo, Jing Huang, Zhen Li, et al. Stepwise perplexity-guided refinement for efficient chain-of-thought reasoning in large language models. arXiv preprint arXiv:2502.13260, 2025.", + "[146] Yu Cui and Cong Zuo. Practical reasoning interruption attacks on reasoning large language models. arXiv preprint arXiv:2505.06643, 2025.", + "[147] Yu Cui, Bryan Hooi, Yujun Cai, and Yiwei Wang. Process or result? manipulated ending tokens can mislead reasoning lms to ignore the correct reasoning steps. arXiv preprint arXiv:2503.19326, 2025.", + "[148] Jianbo Dai, Jianqiao Lu, Yunlong Feng, Dong Huang, Guangtao Zeng, Rongju Ruan, Ming Cheng, Haochen Tan, and Zhijiang Guo. Mhpp: Exploring the capabilities and limitations of language models beyond basic code generation. arXiv preprint arXiv:2405.11430, 2024.", + "[149] Jisheng Dang, Jingze Wu, Teng Wang, Xuanhui Lin, Nannan Zhu, Hongbo Chen, Wei-Shi Zheng, Meng Wang, and Tat-Seng Chua. Reinforcing video reasoning with focused thinking. arXiv preprint arXiv:2505.24718, 2025.", + "[150] Quy-Anh Dang and Chris Ngo. Reinforcement learning for reasoning in small llms: What works and what doesn't. arXiv preprint arXiv:2503.16219, 2025.", + "[151] Yuntian Deng, Yejin Choi, and Stuart Shieber. From explicit cot to implicit cot: Learning to internalize cot step by step. arXiv preprint arXiv:2405.14838, 2024.", + "[152] Lauro Langosco Di Langosco, Jack Koch, Lee D Sharkey, Jacob Pfau, and David Krueger. Goal misgeneralization in deep reinforcement learning. In International Conference on Machine Learning, pages 12004-12019. PMLR, October 2022. URL https://proceedings.mlr.press/v162/langosco22a/langosco22a.pdf.", + "[153] Bowen Ding, Yuhan Chen, Futing Wang, Lingfeng Ming, and Tao Lin. Do thinking tokens help or trap? towards more efficient large reasoning model. arXiv preprint arXiv:2506.23840, 2025.", + "[154] Yifu Ding, Wentao Jiang, Shunyu Liu, Yongcheng Jing, Jinyang Guo, Yingjie Wang, Jing Zhang, Zengmao Wang, Ziwei Liu, Bo Du, et al. Dynamic parallel tree search for efficient llm reasoning. arXiv preprint arXiv:2502.16235, 2025.", + "[155] Hanze Dong, Wei Xiong, Deepanshu Goyal, Yihan Zhang, Winnie Chow, Rui Pan, Shizhe Diao, Jipeng Zhang, KaShun SHUM, and Tong Zhang. RAFT: Reward ranked finetuning for generative foundation model alignment. Transactions on Machine Learning Research, November 2023. ISSN 2835-8856. URL https://openreview.net/forum?id=m7p507zb1Y.", + "[156] Hanze Dong, Wei Xiong, Bo Pang, Haoxiang Wang, Han Zhao, Yingbo Zhou, Nan Jiang, Doyen Sahoo, Caiming Xiong, and Tong Zhang. Rlhf workflow: From reward modeling to online rlhf. arXiv preprint arXiv:2405.07863, 2024.", + "[157] Junnan Dong, Zijin Hong, Yuanchen Bei, Feiran Huang, Xinrun Wang, and Xiao Huang. Clr-bench: Evaluating large language models in college-level reasoning. arXiv preprint arXiv:2410.17558, 2024." + ], + "bbox": [ + 181, + 90, + 826, + 910 + ], + "page_idx": 45 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 45 + }, + { + "type": "page_number", + "text": "46", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 45 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[158] Kefan Dong and Tengyu Ma. Beyond limited data: Self-play ltm theorem provers with iterative conjecturing and proving. arXiv preprint arXiv:2502.00212, 2025.", + "[159] Yuhao Dong, Zuyan Liu, Hai-Long Sun, Jingkang Yang, Winston Hu, Yongming Rao, and Ziwei Liu. Insight-v: Exploring long-chain visual reasoning with multimodal large language models. arXiv preprint arXiv:2411.14432, 2024.", + "[160] Zhichen Dong, Zhanhui Zhou, Zhixuan Liu, Chao Yang, and Chaochao Lu. Emergent response planning in lIm. arXiv preprint arXiv:2502.06258, 2025.", + "[161] Shihan Dou, Yan Liu, Haoxiang Jia, Limao Xiong, Enyu Zhou, Wei Shen, Junjie Shan, Caishuang Huang, Xiao Wang, Xiaoran Fan, et al. Stepcoder: Improve code generation with reinforcement learning from compiler feedback. arXiv preprint arXiv:2402.01391, 2024.", + "[162] Iddo Drori, Gaston Longhitano, Mao Mao, Seunghwan Hyun, Yuke Zhang, Sungjun Park, Zachary Meeks, Xin-Yu Zhang, Ben Segev, Howard Yong, et al. Diverse inference and verification for advanced reasoning. arXiv preprint arXiv:2502.09955, 2025.", + "[163] Kounianhua Du, Hanjing Wang, Jianxing Liu, Jizheng Chen, Xinyi Dai, Yasheng Wang, Ruiming Tang, Yong Yu, Jun Wang, and Weinan Zhang. Boost, disentangle, and customize: A robust system2-to-system1 pipeline for code generation. arXiv preprint arXiv:2502.12492, 2025.", + "[164] Weihua Du, Yiming Yang, and Sean Welleck. Optimizing temperature for language models with multi-sample inference. arXiv preprint arXiv:2502.05234, 2025.", + "[165] Xinrun Du, Yifan Yao, Kaijing Ma, Bingli Wang, Tianyu Zheng, Kang Zhu, Minghao Liu, Yiming Liang, Xiaolong Jin, Zhenlin Wei, et al. Supergpqa: Scaling llm evaluation across 285 graduate disciplines. arXiv preprint arXiv:2502.14739, 2025.", + "[166] Yifan Du, Zikang Liu, Yifan Li, Wayne Xin Zhao, Yuqi Huo, Bingning Wang, Weipeng Chen, Zheng Liu, Zhongyuan Wang, and Ji-Rong Wen. Virgo: A preliminary exploration on reproducing o1-like mllm. arXiv preprint arXiv:2501.01904, 2025.", + "[167] Keyu Duan, Zichen Liu, Xin Mao, Tianyu Pang, Changyu Chen, Qiguang Chen, Michael Qizhe Shieh, and Longxu Dou. Efficient process reward model training via active learning. arXiv preprint arXiv:2504.10559, 2025.", + "[168] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024.", + "[169] Subhabrata Dutta, Joykirat Singh, Soumen Chakrabarti, and Tanmoy Chakraborty. How to think step-by-step: A mechanistic understanding of chain-of-thought reasoning. Transactions on Machine Learning Research, July 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=uHLDkQVtyC.", + "[170] Ahmed El-Kishky, Alexander Wei, Andre Saraiva, Borys Minaev, Daniel Selsam, David Dohan, Francis Song, Hunter Lightman, Ignasi Clavera, Jakub Pachocki, et al. Competitive programming with large reasoning models. arXiv preprint arXiv:2502.06807, 2025.", + "[171] Kawin Ethayarajh, Winnie Xu, Niklas Muennighoff, Dan Jurafsky, and Douwe Kiela. Kto: Model alignment as prospect theoretic optimization. arXiv preprint arXiv:2402.01306, 2024.", + "[172] Chongyu Fan, Yihua Zhang, Jinghan Jia, Alfred Hero, and Sijia Liu. Cyclicreflex: Improving large reasoning models via cyclical reflection token scheduling. arXiv preprint arXiv:2506.11077, 2025.", + "[173] Siqi Fan, Peng Han, Shuo Shang, Yequan Wang, and Aixin Sun. Cothink: Token-efficient reasoning via instruct models guiding reasoning models. arXiv preprint arXiv:2505.22017, 2025.", + "[174] Tiantian Fan, Lingjun Liu, Yu Yue, Jiaze Chen, Chengyi Wang, Qiying Yu, Chi Zhang, Zhiqi Lin, Ruofei Zhu, Yufeng Yuan, et al. Truncated proximal policy optimization. arXiv preprint arXiv:2506.15050, 2025.", + "[175] Yi Fang, Wenjie Wang, Yang Zhang, Fengbin Zhu, Qifan Wang, Fuli Feng, and Xiangnan He. Large language models for recommendation with deliberative user preference alignment. arXiv preprint arXiv:2502.02061, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 911 + ], + "page_idx": 46 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 46 + }, + { + "type": "page_number", + "text": "47", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 46 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[176] Wu Fei, Hao Kong, Shuxian Liang, Yang Lin, Yibo Yang, Jing Tang, Lei Chen, and Xiansheng Hua. Self-guided process reward optimization with masked step advantage for process reinforcement learning. arXiv preprint arXiv:2507.01551, 2025.", + "[177] Guhao Feng, Bohang Zhang, Yuntian Gu, Haotian Ye, Di He, and Liwei Wang. Towards revealing the mystery behind chain of thought: A theoretical perspective. In Thirty-seventh Conference on Neural Information Processing Systems, September 2023. URL https://openreview.net/forum?id=qHrADgAdYu.", + "[178] Jiazhan Feng, Shijue Huang, Xingwei Qu, Ge Zhang, Yujia Qin, Baoquan Zhong, Chengquan Jiang, Jinxin Chi, and Wanjun Zhong. Retool: Reinforcement learning for strategic tool use in llms. arXiv preprint arXiv:2504.11536, 2025.", + "[179] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Junfei Wu, Xiaoying Zhang, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025.", + "[180] Sicheng Feng, Gongfan Fang, Xinyin Ma, and Xinchao Wang. Efficient reasoning models: A survey. arXiv preprint arXiv:2504.10903, 2025.", + "[181] Xiachong Feng, Longxu Dou, and Lingpeng Kong. Reasoning does not necessarily improve role-playing ability. arXiv preprint arXiv:2502.16940, 2025.", + "[182] Xueyang Feng, Bo Lan, Quanyu Dai, Lei Wang, Jiakai Tang, Xu Chen, Zhenhua Dong, and Ji-Rong Wen. Improving retrospective language agents via joint policy gradient optimization. arXiv preprint arXiv:2503.01490, 2025.", + "[183] Yichen Feng, Zhangchen Xu, Fengqing Jiang, Yuetai Li, Bhaskar Ramasubramanian, Luyao Niu, Bill Yuchen Lin, and Radha Poovendran. Visualsphinx: Large-scale synthetic vision logic puzzles for rl. arXiv preprint arXiv:2505.23977, 2025.", + "[184] Chrisantha Fernando, Dylan Sunil Banarse, Henryk Michalewski, Simon Osindero, and Tim Rocktäschel. Promptbreeder: Self-referential self-improvement via prompt evolution. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 13481-13544. PMLR, 21-27 Jul 2024. URL https://proceedings.mlrpress/v235/fernando24a.html.", + "[185] Mohamed Amine Ferrag, Norbert Tihanyi, and Merouane Debbah. Reasoning beyond limits: Advances and open problems for lms. arXiv preprint arXiv:2503.22732, 2025.", + "[186] Thomas Palmeira Ferraz, Kartik Mehta, Yu-Hsiang Lin, Haw-Shiuan Chang, Shereen Oraby, Sijia Liu, Vivek Subramanian, Tagyoung Chung, Mohit Bansal, and Nanyun Peng. LLM self-correction with deCRIM: Decompose, critique, and refine for enhanced following of instructions with multiple constraints. In The First Workshop on System-2 Reasoning at Scale, NeurIPS'24, October 2024. URL https://openreview.net/forum?id=RQ6Ff81so0.", + "[187] Jiarun Fu, Lizhong Ding, Hao Li, Pengqi Li, Qiuning Wei, and Xu Chen. Unveiling and causalizing cot: A causal perspective. arXiv preprint arXiv:2502.18239, 2025.", + "[188] Wei Fu, Jiaxuan Gao, Xujie Shen, Chen Zhu, Zhiyu Mei, Chuyi He, Shusheng Xu, Guo Wei, Jun Mei, Jiashu Wang, Tongkai Yang, Binhang Yuan, and Yi Wu. Areal: A large-scale asynchronous reinforcement learning system for language reasoning, 2025. URL https://arxiv.org/abs/2505.24298.", + "[189] Yao Fu, Hao Peng, Ashish Sabharwal, Peter Clark, and Tushar Khot. Complexity-based prompting for multi-step reasoning. In The Eleventh International Conference on Learning Representations, February 2023. URL https://openreview.net/forum?id=yf1icZHC-19.", + "[190] Yuqian Fu, Tinghong Chen, Jiajun Chai, Xihuai Wang, Songjun Tu, Guojun Yin, Wei Lin, Qichao Zhang, Yuanheng Zhu, and Dongbin Zhao. Srft: A single-stage method with supervised and reinforcement fine-tuning for reasoning. arXiv preprint arXiv:2506.19767, 2025.", + "[191] Víctor Gallego. Metasc: Test-time safety specification optimization for language models. arXiv preprint arXiv:2502.07985, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 47 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 47 + }, + { + "type": "page_number", + "text": "48", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[192] Zeyu Gan, Yun Liao, and Yong Liu. Rethinking external slow-thinking: From snowball errors to probability of correct reasoning. arXiv preprint arXiv:2501.15602, 2025.", + "[193] Kanishk Gandhi, Denise HJ Lee, Gabriel Grand, Muxin Liu, Winson Cheng, Archit Sharma, and Noah Goodman. Stream of search (sos): Learning to search in language. In First Conference on Language Modeling, July 2024. URL https://openreview.net/pdf?id=2cop2jmQVL.", + "[194] Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars. arXiv preprint arXiv:2503.01307, 2025.", + "[195] Bofei Gao, Zefan Cai, Runxin Xu, Peiyi Wang, Ce Zheng, Runji Lin, Keming Lu, Junyang Lin, Chang Zhou, Tianyu Liu, and Baobao Chang. The reason behind good or bad: Towards a better mathematical verifier with natural language feedback, 2024.", + "[196] Bofei Gao, Zefan Cai, Runxin Xu, Peiyi Wang, Ce Zheng, Runji Lin, Keming Lu, Dayiheng Liu, Chang Zhou, Wen Xiao, et al. Llm critics help catch bugs in mathematics: Towards a better mathematical verifier with natural language feedback. arXiv preprint arXiv:2406.14024, 2024.", + "[197] Jiaxuan Gao, Shusheng Xu, Wenjie Ye, Weilin Liu, Chuyi He, Wei Fu, Zhiyu Mei, Guangju Wang, and Yi Wu. On designing effective rl reward at training time for llm reasoning. arXiv preprint arXiv:2410.15115, 2024.", + "[198] Luyu Gao, Aman Madaan, Shuyan Zhou, Uri Alon, Pengfei Liu, Yiming Yang, Jamie Callan, and Graham Neubig. PAL: Program-aided language models. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett, editors, Proceedings of the 40th International Conference on Machine Learning, volume 202 of Proceedings of Machine Learning Research, pages 10764–10799. PMLR, 23–29 Jul 2023. URL https://proceedings.mlr.press/v202/gao23f.html.", + "[199] Silin Gao, Antoine Bosselut, Samy Bengio, and Emmanuel Abbe. Augmenting llms' reasoning by reinforcing abstract thinking. arXiv preprint arXiv:2506.07751, 2025.", + "[200] Tianchen Gao, Jiashun Jin, Zheng Tracy Ke, and Gabriel Moryoussef. A comparison of deepseek and other llms. arXiv preprint arXiv:2502.03688, 2025.", + "[201] Zitian Gao, Boye Niu, Xuzheng He, Haotian Xu, Hongzhang Liu, Aiwei Liu, Xuming Hu, and Lijie Wen. Interpretable contrastive monte carlo tree search reasoning. arXiv preprint arXiv:2410.01707, 2024.", + "[202] Yuyao Ge, Shenghua Liu, Yiwei Wang, Lingrui Mei, Lizhe Chen, Baolong Bi, and Xueqi Cheng. Innate reasoning is not enough: In-context learning enhances reasoning large language models with less overthinking. arXiv preprint arXiv:2503.19602, 2025.", + "[203] Jonas Gehring, Kunhao Zheng, Jade Copet, Vegard Mella, Taco Cohen, and Gabriel Synnaeve. Rlef: Grounding code llms in execution feedback with reinforcement learning. arXiv preprint arXiv:2410.02089, 2024.", + "[204] Jonas Geiping, Sean McLeish, Neel Jain, John Kirchenbauer, Siddharth Singh, Brian R Bartoldson, Bhavya Kailkhura, Abhinav Bhatele, and Tom Goldstein. Scaling up test-time compute with latent reasoning: A recurrent depth approach. arXiv preprint arXiv:2502.05171, 2025.", + "[205] Gael Gendron, Qiming Bao, Michael Witbrock, and Gillian Dobbie. Large language models are not strong abstract reasoners. In Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence, IJCAI '24, August 2024. ISBN 978-1-956792-04-1. doi: 10.24963/ijcai.2024/693. URL https://doi.org/10.24963/ijcai.2024/693.", + "[206] Zelalem Gero, Chandan Singh, Hao Cheng, Tristan Naumann, Michel Galley, Jianfeng Gao, and Hoifung Poon. Self-verification improves few-shot clinical information extraction. In ICML 3rd Workshop on Interpretable Machine Learning in Healthcare (IMLH), June 2023. URL https://openreview.net/forum?id=SBbJICrg1S.", + "[207] Akash Ghosh, Debayan Datta, Sriparna Saha, and Chirag Agarwal. The multilingual mind: A survey of multilingual reasoning in language models. arXiv preprint arXiv:2502.09457, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 910 + ], + "page_idx": 48 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 48 + }, + { + "type": "page_number", + "text": "49", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 48 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[208] Panagiotis Giadikiaroglou, Maria Lymperaiou, Giorgos Filandrianos, and Giorgos Stamou. Puzzle solving using reasoning of large language models: A survey. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 11574–11591, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.646. URL https://aclanthology.org/2024.emnlp-main.646/.", + "[209] Alexi Gladstone, Ganesh Nanduru, Md Mofijul Islam, Peixuan Han, Hyeonjeong Ha, Aman Chadha, Yilun Du, Heng Ji, Jundong Li, and Tariq Iqbal. Energy-based transformers are scalable learners and thinkers. arXiv preprint arXiv:2507.02092, 2025.", + "[210] Elliot Glazer, Ege Erdil, Tamay Besiroglu, Diego Chicharro, Evan Chen, Alex Gunning, Caroline Falkman Olsson, Jean-Stanislas Denain, Anson Ho, Emily de Oliveira Santos, et al. Frontiermath: A benchmark for evaluating advanced mathematical reasoning in ai. arXiv preprint arXiv:2411.04872, 2024.", + "[211] Team GLM, Aohan Zeng, Bin Xu, Bowen Wang, Chenhui Zhang, Da Yin, Dan Zhang, Diego Rojas, Guanyu Feng, Hanlin Zhao, et al. Chatglm: A family of large language models from glm-130b to glm-4 all tools. arXiv preprint arXiv:2406.12793, 2024.", + "[212] Olga Golovneva, Moya Peng Chen, Spencer Poff, Martin Corredor, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. ROSCOE: A suite of metrics for scoring step-by-step reasoning. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=xYlJRpzZtsY.", + "[213] Olga Golovneva, Sean O'Brien, Ramakanth Pasunuru, Tianlu Wang, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. PATHFINDER: Guided search over multi-step reasoning paths. In R0-FoMo: Robustness of Few-shot and Zero-shot Learning in Large Foundation Models, December 2023. URL https://openreview.net/forum?id=5TsfEEwRsu.", + "[214] Ruihan Gong, Yue Liu, Wenjie Qu, Mingzhe Du, Yufei He, Yingwei Ma, Yulin Chen, Xiang Liu, Yi Wen, Xinfeng Li, et al. Efficient reasoning via chain of unconscious thought. arXiv preprint arXiv:2505.19756, 2025.", + "[215] Juraj Gottweis, Wei-Hung Weng, Alexander Daryin, Tao Tu, Anil Palepu, Petar Sirkovic, Artiom Myaskovsky, Felix Weissenberger, Keran Rong, Ryutaro Tanno, et al. Towards an ai co-scientist. arXiv preprint arXiv:2502.18864, 2025.", + "[216] Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Nan Duan, and Weizhu Chen. Critic: Large language models can self-correct with tool-interactive critiquing. arXiv preprint arXiv:2305.11738, 2023.", + "[217] Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Minlie Huang, Nan Duan, and Weizhu Chen. Tora: A tool-integrated reasoning agent for mathematical problem solving. arXiv preprint arXiv:2309.17452, 2023.", + "[218] Julia Grosse, Ruotian Wu, Ahmad Rashid, Philipp Hennig, Pascal Poupart, and Agustinus Kristiadi. Uncertainty-guided optimization on large language model search trees. arXiv preprint arXiv:2407.03951, 2024.", + "[219] Yanggan Gu, Junzhuo Li, Sirui Huang, Xin Zou, Zhenghua Li, and Xuming Hu. Capturing nuanced preferences: Preference-aligned distillation for small language models. arXiv preprint arXiv:2502.14272, 2025.", + "[220] Xinyan Guan, Yanjiang Liu, Xinyu Lu, Boxi Cao, Ben He, Xianpei Han, Le Sun, Jie Lou, Bowen Yu, Yaojie Lu, et al. Search, verify and feedback: Towards next generation post-training paradigm of foundation models via verifier engineering. arXiv preprint arXiv:2411.11504, 2024.", + "[221] Xinyan Guan, Jiali Zeng, Fandong Meng, Chunlei Xin, Yaojie Lu, Hongyu Lin, Xianpei Han, Le Sun, and Jie Zhou. Deep Learning: Thinking to retrieve step by step for large language models. arXiv preprint arXiv:2502.01142, 2025.", + "[222] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 911 + ], + "page_idx": 49 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 49 + }, + { + "type": "page_number", + "text": "50", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 49 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[223] Etash Guha, Ryan Marten, Sedrick Keh, Negin Raoof, Georgios Smyrnis, Hritik Bansal, Marianna Nezhurina, Jean Mercat, Trung Vu, Zayne Sprague, et al. Openthoughts: Data recipes for reasoning models. arXiv preprint arXiv:2506.04178, 2025.", + "[224] Aryan Gulati, Brando Miranda, Eric Chen, Emily Xia, Kai Fronsdal, Bruno de Moraes Dumont, and Sanmi Koyejo. Putnam-AXIOM: A functional and static benchmark for measuring higher level mathematical reasoning. In The 4th Workshop on Mathematical Reasoning and AI at NeurIPS'24, 2024. URL https://openreview.net/forum?id=YXnwlZe0yf.", + "[225] Caglar Gulcehre, Tom Le Paine, Srivatsan Srinivasan, Ksenia Konyushkova, Lotte Weerts, Abhishek Sharma, Aditya Siddhant, Alex Ahern, Miaosen Wang, Chenjie Gu, et al. Reinforced self-training (rest) for language modeling. arXiv preprint arXiv:2308.08998, 2023.", + "[226] Daya Guo, Qihao Zhu, Dejian Yang, Zhenda Xie, Kai Dong, Wentao Zhang, Guanting Chen, Xiao Bi, Yu Wu, YK Li, et al. Deepseek-coder: When the large language model meets programming-the rise of code intelligence. arXiv preprint arXiv:2401.14196, 2024.", + "[227] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.", + "[228] Honglin Guo, Kai Lv, Qipeng Guo, Tianyi Liang, Zhiheng Xi, Demin Song, Qiuyinzhe Zhang, Yu Sun, Kai Chen, Xipeng Qiu, et al. Critiq: Mining data quality criteria from human preferences. arXiv preprint arXiv:2502.19279, 2025.", + "[229] Kehan Guo, Bozhao Nan, Yujun Zhou, Taicheng Guo, Zhichun Guo, Mihir Surve, Zhenwen Liang, Nitesh V Chawla, Olaf Wiest, and Xiangliang Zhang. Can LLMs solve molecule puzzles? a multimodal benchmark for molecular structure elucidation. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=t1mAxb4Cop.", + "[230] Ziyu Guo, Renrui Zhang, Chengzhuo Tong, Zhizheng Zhao, Peng Gao, Hongsheng Li, and Pheng-Ann Heng. Can we generate images with cot? let's verify and reinforce image generation step by step. arXiv preprint arXiv:2501.13926, 2025.", + "[231] Dongge Han, Menglin Xia, Daniel Madrigal Diaz, Samuel Kessler, Ankur Mallick, Xuchao Zhang, Mirian Del Carmen Hipolito Garcia, Jin Xu, Victor Ruhle, and Saravan Rajmohan. Enhancing reasoning capabilities of small language models with blueprints and prompt template search. arXiv preprint arXiv:2506.08669, 2025.", + "[232] Tingxu Han, Chunrong Fang, Shiyu Zhao, Shiqing Ma, Zhenyu Chen, and Zhenting Wang. Token-budget-aware lIm reasoning. arXiv preprint arXiv:2412.18547, 2024.", + "[233] Michael Hanna, Ollie Liu, and Alexandre Variengien. How does GPT-2 compute greater-than?: Interpreting mathematical abilities in a pre-trained language model. September 2023. URL https://openreview.net/forum?id=p4PckNQR8k.", + "[234] Shibo Hao, Yi Gu, Haodi Ma, Joshua Hong, Zhen Wang, Daisy Wang, and Zhiting Hu. Reasoning with language model is planning with world model. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 8154-8173, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.507. URL https://aclanthology.org/2023.emnlp-main.507/.", + "[235] Shibo Hao, Yi Gu, Haotian Luo, Tianyang Liu, Xiyan Shao, Xinyuan Wang, Shuhua Xie, Haodi Ma, Adithya Samavedhi, Qiyue Gao, Zhen Wang, and Zhiting Hu. LLM reasoners: New evaluation, library, and analysis of step-by-step reasoning with large language models. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=b0y6fbSUG0.", + "[236] Shibo Hao, Sainbayar Sukhbaatar, DiJia Su, Xian Li, Zhiting Hu, Jason Weston, and Yuandong Tian. Training large language models to reason in a continuous latent space. arXiv preprint arXiv:2412.06769, 2024.", + "[237] Yunzhuo Hao, Jiawei Gu, Huichen Will Wang, Linjie Li, Zhengyuan Yang, Lijuan Wang, and Yu Cheng. Can mllms reason in multimodality? emma: An enhanced multimodal reasoning benchmark. arXiv preprint arXiv:2501.05444, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 50 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 50 + }, + { + "type": "page_number", + "text": "51", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 50 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[238] Alexander Havrilla, Sharath Chandra Raparthy, Christoforos Nalmpantis, Jane Dwivedi-Yu, Maksym Zhuravinskyi, Eric Hambro, and Roberta Raileanu. GLOre: When, where, and how to improve LLM reasoning via global and local refinements. In *Forty-first International Conference on Machine Learning*, May 2024. URL https://openreview.net/forum?id=LH6R06NxdB.", + "[239] Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, Jie Liu, Lei Qi, Zhiyuan Liu, and Maosong Sun. OlympiadBench: A challenging benchmark for promoting AGI with olympiad-level bilingual multimodal scientific problems. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 3828–3850, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.211. URL https://aclanthology.org/2024.acl-long.211/.", + "[240] Chengbo He, Bochao Zou, Xin Li, Jiansheng Chen, Junliang Xing, and Huimin Ma. Enhancing llm reasoning with multi-path collaborative reactive and reflection agents. arXiv preprint arXiv:2501.00430, 2024.", + "[241] Feng He, Zijun Chen, Xinnian Liang, Tingting Ma, Yunqi Qiu, Shuangzhi Wu, and Junchi Yan. Protoreasoning: Prototypes as the foundation for generalizable reasoning in llms. arXiv preprint arXiv:2506.15211, 2025.", + "[242] Jujie He, Jiacai Liu, Chris Yuhao Liu, Rui Yan, Chaojie Wang, Peng Cheng, Xiaoyu Zhang, Fuxiang Zhang, Jiacheng Xu, Wei Shen, Siyuan Li, Liang Zeng, Tianwen Wei, Cheng Cheng, Bo An, Yang Liu, and Yahui Zhou. Skywork open reasoner series. https://capricious-hydrogen-41c.notion.site/Skywork-Open-Reaonser-Series-1d0bc9ae823a80459b46c149e4f51680, 2025. Note Blog.", + "[243] Junda He, Jieke Shi, Terry Yue Zhuo, Christoph Treude, Jiamou Sun, Zhenchang Xing, Xiaoning Du, and David Lo. From code to courtroom: Llms as the new software judges. arXiv preprint arXiv:2503.02246, 2025.", + "[244] Kang He and Kaushik Roy. Logictree: Structured proof exploration for coherent and rigorous logical reasoning with large language models. arXiv preprint arXiv:2504.14089, 2025.", + "[245] Mingqian He, Yongliang Shen, Wenqi Zhang, Zeqi Tan, and Weiming Lu. Advancing process verification for large language models via tree-based preference learning. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 2086-2099, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.125. URL https://aclanthology.org/2024.emnlp-main.125/.", + "[246] Qiangqiang He, Shuwei Qian, Jie Zhang, and Chongjun Wang. Inference retrieval-augmented multi-modal chain-of-thoughts reasoning for language models. In ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5, 2025. doi: 10.1109/ICASSP49660.2025.10888701. URL https://openreview.net/pdf/9a7e7a9787d14ac8302215f8e4ef959606b78a94.pdf.", + "[247] Shenghua He, Tian Xia, Xuan Zhou, and Hui Wei. Response-level rewards are all you need for online reinforcement learning in llms: A mathematical perspective. arXiv preprint arXiv:2506.02553, 2025.", + "[248] Tao He, Hao Li, Jingchang Chen, Runxuan Liu, Yixin Cao, Lizi Liao, Zihao Zheng, Zheng Chu, Jiafeng Liang, Ming Liu, et al. A survey on complex reasoning of large language models through the lens of self-evolution. February 2025.", + "[249] Xingyang He, Xiao Ling, and Jie Liu. Smartthinker: Learning to compress and preserve reasoning by step-level length control. arXiv preprint arXiv:2507.04348, 2025.", + "[250] Yancheng He, Shilong Li, Jiaheng Liu, Weixun Wang, Xingyuan Bu, Ge Zhang, Zhongyuan Peng, Zhaoxiang Zhang, Wenbo Su, and Bo Zheng. Can large language models detect errors in long chain-of-thought reasoning? arXiv preprint arXiv:2502.19361, 2025.", + "[251] Yang He, Xiao Ding, Bibo Cai, Yufei Zhang, Kai Xiong, Zhouhao Sun, Bing Qin, and Ting Liu. Self-route: Automatic mode switching via capability estimation for efficient reasoning. arXiv preprint arXiv:2505.20664, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 51 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 51 + }, + { + "type": "page_number", + "text": "52", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 51 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[252] Zhitao He, Sandeep Polisetty, Zhiyuan Fan, Yuchen Huang, Shujin Wu, et al. Mmboundary: Advancing mllm knowledge boundary awareness through reasoning step confidence calibration. arXiv preprint arXiv:2505.23224, 2025.", + "[253] Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the MATH dataset. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2), October 2021. URL https://openreview.net/forum?id=7Bywt2mQsCe.", + "[254] Alex Heyman and Joel Zylberberg. Evaluating the systematic reasoning abilities of large language models through graph coloring. arXiv preprint arXiv:2502.07087, 2025.", + "[255] Alex Heyman and Joel Zylberberg. Reasoning large language model errors arise from hallucinating critical problem features. arXiv preprint arXiv:2505.12151, 2025.", + "[256] Namgyu Ho, Laura Schmid, and Se-Young Yun. Large language models are reasoning teachers. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 14852–14882, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.830. URL https://aclanthology.org/2023.acl-long.830/.", + "[257] Andreas Hochlehnert, Hardik Bhatnagar, Vishaal Udandarao, Samuel Albanie, Ameya Prabhu, and Matthias Bethge. A sober look at progress in language model reasoning: Pitfalls and paths to reproducibility. arXiv preprint arXiv:2504.07086, 2025.", + "[258] Matthew Douglas Hoffman, Du Phan, david dohan, Sholto Douglas, Tuan Anh Le, Aaron T Parisi, Pavel Sountsov, Charles Sutton, Sharad Vikram, and Rif A. Saurous. Training chain-of-thought via latent-variable inference. In Thirty-seventh Conference on Neural Information Processing Systems, September 2023. URL https://openreview.net/forum?id=a147pIS2Co.", + "[259] Ruixin Hong, Xinyu Pang, and Changshui Zhang. Advances in reasoning by prompting large language models: A survey. Cybernetics and Intelligence, pages 1-15, 2024. doi: 10.26599/CAI.2024.9390004.", + "[260] Wenyi Hong, Weihan Wang, Qingsong Lv, Jiazheng Xu, Wenmeng Yu, Junhui Ji, Yan Wang, Zihan Wang, Yuxiao Dong, Ming Ding, and Jie Tang. Cogagent: A visual language model for gui agents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14281-14290, June 2024. URL https://openaccess.thecvf.com/content/CVPR2024/papers/Hong_CogAgent_A_Visual_Vocabulary_model_for_GUI_Agents_CVPR_2024_paper.pdf.", + "[261] Arian Hosseini, Alessandro Sordoni, Daniel Kenji Toyama, Aaron Courville, and Rishabh Agarwal. Not all LLM reasoners are created equal. In The First Workshop on System-2 Reasoning at Scale, NeurIPS'24, October 2024. URL https://openreview.net/forum?id=aPAWbip1xV.", + "[262] Arian Hosseini, Xingdi Yuan, Nikolay Malkin, Aaron Courville, Alessandro Sordoni, and Rishabh Agarwal. V-STar: Training verifiers for self-taught reasoners. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=stmqBSW2dV.", + "[263] Bairu Hou, Yang Zhang, Jiabao Ji, Yujuan Liu, Kaizhi Qian, Jacob Andreas, and Shiyu Chang. Thinkprune: Pruning long chain-of-thought of llms via reinforcement learning. arXiv preprint arXiv:2504.01296, 2025.", + "[264] Zhenyu Hou, Xin Lv, Rui Lu, Jiajie Zhang, Yujiang Li, Zijun Yao, Juanzi Li, Jie Tang, and Yuxiao Dong. Advancing language model reasoning through reinforcement learning and inference scaling. arXiv preprint arXiv:2501.11651, 2025.", + "[265] Jian Hu. Reinforce++: A simple and efficient approach for aligning large language models. arXiv preprint arXiv:2501.03262, 2025.", + "[266] Jian Hu, Xibin Wu, Zilin Zhu, Xianyu, Weixun Wang, Dehao Zhang, and Yu Cao. Openrlhf: An easy-to-use, scalable and high-performance rlhf framework. arXiv preprint arXiv:2405.11143, 2024." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 52 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 52 + }, + { + "type": "page_number", + "text": "53", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 52 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[267] Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. Open-reasoner-zero: An open source approach to scaling reinforcement learning on the base model. https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero, February 2025.", + "[268] Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, Xiangyu Zhang, and Heung-Yeung Shum. Open-reasoner-zero: An open source approach to scaling up reinforcement learning on the base model. arXiv preprint arXiv:2503.24290, 2025.", + "[269] Mengkang Hu, Tianxing Chen, Qiguang Chen, Yao Mu, Wenqi Shao, and Ping Luo. Hiagent: Hierarchical working memory management for solving long-horizon agent tasks with large language model. arXiv preprint arXiv:2408.09559, 2024.", + "[270] Mengkang Hu, Yao Mu, Xinmiao Chelsey Yu, Mingyu Ding, Shiguang Wu, Wenqi Shao, Qiguang Chen, Bin Wang, Yu Qiao, and Ping Luo. Tree-planner: Efficient close-loop task planning with large language models. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=Glcsg6zOe.", + "[271] Mengkang Hu, Pu Zhao, Can Xu, Qingfeng Sun, Jianguang Lou, Qingwei Lin, Ping Luo, and Saravan Rajmohan. Agentgen: Enhancing planning abilities for large language model based agent via environment and task generation. arXiv preprint arXiv:2408.00764, 2024.", + "[272] Mengkang Hu, Tianxing Chen, Yude Zou, Yuheng Lei, Qiguang Chen, Ming Li, Hongyuan Zhang, Wenqi Shao, and Ping Luo. Text2world: Benchmarking large language models for symbolic world model generation. arXiv preprint arXiv:2502.13092, 2025.", + "[273] Mengkang Hu, Yuhang Zhou, Wendong Fan, Yuzhou Nie, Bowei Xia, Tao Sun, Ziyu Ye, Zhaoxuan Jin, Yingru Li, Qiguang Chen, et al. Owl: Optimized workforce learning for general multi-agent assistance in real-world task automation. arXiv preprint arXiv:2505.23885, 2025.", + "[274] Renjun Hu, Yi Cheng, Libin Meng, Jiaxin Xia, Yi Zong, Xing Shi, and Wei Lin. Training an llm-as-a-judge model: Pipeline, insights, and practical lessons. arXiv preprint arXiv:2502.02988, 2025.", + "[275] Zhiyuan Hu, Chumin Liu, Xidong Feng, Yilun Zhao, See-Kiong Ng, Anh Tuan Luu, Junxian He, Pang Wei Koh, and Bryan Hooi. Uncertainty of thoughts: Uncertainty-aware planning enhances information seeking in large language models. In ICLR 2024 Workshop on Large Language Model (LLM) Agents, March 2024. URL https://openreview.net/forum?id=ZWyLjimciT.", + "[276] Maggie Huan, Yuetai Li, Tuney Zheng, Xiaoyu Xu, Seungone Kim, Minxin Du, Radha Poovendran, Graham Neubig, and Xiang Yue. Does math reasoning improve general llm capabilities? understanding transferability of llm reasoning. arXiv preprint arXiv:2507.00432, 2025.", + "[277] Chenghua Huang, Lu Wang, Fangkai Yang, Pu Zhao, Zhixu Li, Qingwei Lin, Dongmei Zhang, Saravan Rajmohan, and Qi Zhang. Lean and mean: Decoupled value policy optimization with global value guidance. arXiv preprint arXiv:2502.16944, 2025.", + "[278] Chengsong Huang, Langlin Huang, Jixuan Leng, Jiacheng Liu, and Jiaxin Huang. Efficient test-time scaling via self-calibration. arXiv preprint arXiv:2503.00031, 2025.", + "[279] Chengyu Huang, Zhengxin Zhang, and Claire Cardie. Hapo: Training language models to reason concisely via history-aware policy optimization. arXiv preprint arXiv:2505.11225, 2025.", + "[280] Haiduo Huang, Fuwei Yang, Zhenhua Liu, Yixing Xu, Jinze Li, Yang Liu, Xuanwu Yin, Dong Li, Pengju Ren, and Emad Barsoum. Jakiro: Boosting speculative decoding with decoupled multi-head via moe. arXiv preprint arXiv:2502.06282, 2025.", + "[281] Haoyang Huang, Tianyi Tang, Dongdong Zhang, Xin Zhao, Ting Song, Yan Xia, and Furu Wei. Not all languages are created equal in LLMs: Improving multilingual capability by cross-lingual-thought prompting. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 12365–12394, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.826. URL https://aclanthology.org/2023-findings-emnlp.826/." + ], + "bbox": [ + 181, + 90, + 828, + 911 + ], + "page_idx": 53 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 53 + }, + { + "type": "page_number", + "text": "54", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 53 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[282] Hui Huang, Yancheng He, Hongli Zhou, Rui Zhang, Wei Liu, Weixun Wang, Wenbo Su, Bo Zheng, and Jiaheng Liu. Think-j: Learning to think for generative llm-as-a-judge. arXiv preprint arXiv:2505.14268, 2025.", + "[283] Jen-tse Huang, Eric John Li, Man Ho Lam, Tian Liang, Wenxuan Wang, Youliang Yuan, Wenxiang Jiao, Xing Wang, Zhaopeng Tu, and Michael R Lyu. How far are we on the decision-making of llms? evaluating llms' gaming ability in multi-agent environments. arXiv preprint arXiv:2403.11807, 2024.", + "[284] Jiaxing Huang and Jingyi Zhang. A survey on evaluation of multimodal large language models. arXiv preprint arXiv:2408.15769, 2024.", + "[285] Jie Huang and Kevin Chen-Chuan Chang. Towards reasoning in large language models: A survey. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Findings of the Association for Computational Linguistics: ACL 2023, pages 1049–1065, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.67. URL https://aclanthology.org/2023-findings-acl.67/.", + "[286] Jie Huang, Xinyun Chen, Swaroop Mishra, Huaixiu Steven Zheng, Adams Wei Yu, Xinying Song, and Denny Zhou. Large language models cannot self-correct reasoning yet. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=IkmD3fKBPQ.", + "[287] Jinyang Huang, Xiachong Feng, Qiguang Chen, Hanjie Zhao, Zihui Cheng, Jiesong Bai, Jingxuan Zhou, Min Li, and Libo Qin. Mldebugging: Towards benchmarking code debugging across multi-library scenarios. arXiv preprint arXiv:2506.13824, 2025.", + "[288] Kaixuan Huang, Jiacheng Guo, Zihao Li, Xiang Ji, Jiawei Ge, Wenzhe Li, Yingqing Guo, Tianle Cai, Hui Yuan, Runzhe Wang, et al. Math-perturb: Benchmarking llms' math reasoning abilities against hard perturbations. arXiv preprint arXiv:2502.06453, 2025.", + "[289] Lei Huang, Xiaocheng Feng, Weitao Ma, Liang Zhao, Yuchun Fan, Weihong Zhong, Dongliang Xu, Qing Yang, Hongtao Liu, and Bing Qin. Advancing large language model attribution through self-improving. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 3822-3836, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.223. URL https://aclanthology.org/2024.emnlp-main.223/.", + "[290] Shijue Huang, Hongru Wang, Wanjun Zhong, Zhaochen Su, Jiazhan Feng, Bowen Cao, and Yi R Fung. Adactrl: Towards adaptive and controllable reasoning via difficulty-aware budgeting. arXiv preprint arXiv:2505.18822, 2025.", + "[291] Shulin Huang, Linyi Yang, Yan Song, Shuang Chen, Leyang Cui, Ziyu Wan, Qingcheng Zeng, Ying Wen, Kun Shao, Weinan Zhang, et al. Thinkbench: Dynamic out-of-distribution evaluation for robust llm reasoning. arXiv preprint arXiv:2502.16268, 2025.", + "[292] Tiansheng Huang, Sihao Hu, Fatih Ilhan, Selim Furkan Tekin, Zachary Yahn, Yichang Xu, and Ling Liu. Safety tax: Safety alignment makes your large reasoning models less reasonable. arXiv preprint arXiv:2503.00555, 2025.", + "[293] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025.", + "[294] Xiaoke Huang, Juncheng Wu, Hui Liu, Xianfeng Tang, and Yuyin Zhou. m1: Unleash the potential of test-time scaling for medical reasoning with large language models. arXiv preprint arXiv:2504.00869, 2025.", + "[295] Yiming Huang, Xiao Liu, Yeyun Gong, Zhibin Gou, Yelong Shen, Nan Duan, and Weizhu Chen. Key-point-driven data synthesis with its enhancement on mathematical reasoning. arXiv preprint arXiv:2403.02333, 2024.", + "[296] Yuzhen Huang, Weihao Zeng, Xingshan Zeng, Qi Zhu, and Junxian He. Pitfalls of rule-and model-based verifiers-a case study on mathematical reasoning. arXiv preprint arXiv:2505.22203, 2025.", + "[297] Zeyu Huang, Tianhao Cheng, Zihan Qiu, Zili Wang, Yinghui Xu, Edoardo M Ponti, and Ivan Titov. Blending supervised and reinforcement fine-tuning with prefix sampling. arXiv preprint arXiv:2507.01679, 2025." + ], + "bbox": [ + 181, + 90, + 825, + 912 + ], + "page_idx": 54 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 54 + }, + { + "type": "page_number", + "text": "55", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 54 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[298] Zhen Huang, Zengzhi Wang, Shijie Xia, Xuefeng Li, Haoyang Zou, Ruijie Xu, Run-Ze Fan, Lyumanshan Ye, Ethan Chern, Yixin Ye, Yikai Zhang, Yuqing Yang, Ting Wu, Binjie Wang, Shichao Sun, Yang Xiao, Yiyuan Li, Fan Zhou, Steffi Chern, Yiwei Qin, Yan Ma, Jiadi Su, Yixiu Liu, Yuxiang Zheng, Shaoting Zhang, Dahua Lin, Yu Qiao, and Pengfei Liu. Olympic: Benchmarking multi-discipline cognitive reasoning for superintelligent AI. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024. URL https://openreview.net/forum?id=ayF8bEKYQy.", + "[299] Zhen Huang, Haoyang Zou, Xuefeng Li, Yixiu Liu, Yuxiang Zheng, Ethan Chern, Shijie Xia, Yiwei Qin, Weizhe Yuan, and Pengfei Liu. O1 replication journey–part 2: Surpassing o1-preview through simple distillation, big progress or bitter lesson? arXiv preprint arXiv:2411.16489, 2024.", + "[300] Zhongzhen Huang, Gui Geng, Shengyi Hua, Zhen Huang, Haoyang Zou, Shaoting Zhang, Pengfei Liu, and Xiaofan Zhang. O1 replication journey–part 3: Inference-time scaling for medical reasoning. arXiv preprint arXiv:2501.06458, 2025.", + "[301] Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2.5-coder technical report. arXiv preprint arXiv:2409.12186, 2024.", + "[302] Hyeonbin Hwang, Doyoung Kim, Seungone Kim, Seonghyeon Ye, and Minjoon Seo. Self-exlore: Enhancing mathematical reasoning in language models with fine-grained rewards. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 1444-1466, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.78. URL https://aclanthology.org/2024 findings-emnlp.78/.", + "[303] Shima Imani, Liang Du, and Harsh Shrivastava. Mathprompter: Mathematical reasoning using large language models. 2023.", + "[304] Md Ashraful Islam, Mohammed Eunus Ali, and Md Rizwan Parvez. Mapcoder: Multi-agent code generation for competitive problem solving. arXiv preprint arXiv:2405.11403, 2024.", + "[305] Hamish Ivison, Yizhong Wang, Valentina Pyatkin, Nathan Lambert, Matthew Peters, Pradeep Dasigi, Joel Jang, David Wadden, Noah A Smith, Iz Beltagy, et al. Camels in a changing climate: Enhancing lm adaptation with tulu 2, 2023.", + "[306] Hamish Ivison, Yizhong Wang, Jiacheng Liu, Zeqiu Wu, Valentina Pyatkin, Nathan Lambert, Noah A. Smith, Yejin Choi, and Hannaneh Hajishirzi. Unpacking DPO and PPO: Disentangling best practices for learning from preference feedback. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=JMBWTlazjW.", + "[307] Aaron Jaech, Adam Kalai, Adam Lerner, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024.", + "[308] Eeshaan Jain, Johann Wenckstern, Benedikt von Querfurth, and Charlotte Bunne. Test-time view selection for multi-modal decision making. In ICLR 2025 Workshop on Machine Learning for Genomics Explorations, March 2025. URL https://openreview.net/forum?id=aNmZ9s6BZV.", + "[309] Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. Livecodebench: Holistic and contamination free evaluation of large language models for code. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=chfJJYC3iL.", + "[310] Sooyoung Jang and Hyung-II Kim. Entropy-aware model initialization for effective exploration in deep reinforcement learning. Sensors, 22(15):5845, 2022.", + "[311] Ke Ji, Jiahao Xu, Tian Liang, Qiuzhi Liu, Zhiwei He, Xingyu Chen, Xiaoyuan Liu, Zhijie Wang, Junying Chen, Benyou Wang, et al. The first few tokens are all you need: An efficient and effective unsupervised prefix fine-tuning method for reasoning models. arXiv preprint arXiv:2503.02875, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 910 + ], + "page_idx": 55 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 55 + }, + { + "type": "page_number", + "text": "56", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 55 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[312] Tao Ji, Bin Guo, Yuanbin Wu, Qipeng Guo, Lixing Shen, Zhan Chen, Xipeng Qiu, Qi Zhang, and Tao Gui. Towards economical inference: Enabling deepseek's multi-head latent attention in any transformer-based llms. arXiv preprint arXiv:2502.14837, 2025.", + "[313] Yichao Ji. A small step towards reproducing openai o1: Progress report on the steiner open source models, October 2024. URL https://medium.com/@peakji/b9a756a00855.", + "[314] Yixin Ji, Juntao Li, Hai Ye, Kaixin Wu, Jia Xu, Linjian Mo, and Min Zhang. Test-time computing: from system-1 thinking to system-2 thinking. arXiv preprint arXiv:2501.02497, 2025.", + "[315] Ziwei Ji, Tiezheng Yu, Yan Xu, Nayeon Lee, Etsuko Ishii, and Pascale Fung. Towards mitigating LLM hallucination via self reflection. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 1827-1843, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.123. URL https://aclanthology.org/2023.findings-emnlp.123/.", + "[316] Boyu Jia, Junzhe Zhang, Huixuan Zhang, and Xiaojun Wan. Exploring and evaluating multimodal knowledge reasoning consistency of multimodal large language models. arXiv preprint arXiv:2503.04801, 2025.", + "[317] Zeyu Jia, Alexander Rakhlin, and Tengyang Xie. Do we need to verify step by step? rethinking process supervision from a theoretical perspective. arXiv preprint arXiv:2502.10581, 2025.", + "[318] Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh, Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, Lélio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. Mistral 7b, October 2023.", + "[319] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024.", + "[320] Fengqing Jiang, Zhangchen Xu, Yuetai Li, Luyao Niu, Zhen Xiang, Bo Li, Bill Yuchen Lin, and Radha Poovendran. Safechain: Safety of language models with long chain-of-thought reasoning capabilities. arXiv preprint arXiv:2502.12025, 2025.", + "[321] Huchen Jiang, Yangyang Ma, Chaofan Ding, Kexin Luan, and Xinhan Di. Towards intrinsic self-correction enhancement in monte carlo tree search boosted reasoning via iterative preference learning. arXiv preprint arXiv:2412.17397, 2024.", + "[322] Jinhao Jiang, Jiayi Chen, Junyi Li, Ruiyang Ren, Shijie Wang, Wayne Xin Zhao, Yang Song, and Tao Zhang. Rag-star: Enhancing deliberative reasoning with retrieval augmented verification and refinement. arXiv preprint arXiv:2412.12881, 2024.", + "[323] Jinhao Jiang, Zhipeng Chen, Yingqian Min, Jie Chen, Xiaoxue Cheng, Jiapeng Wang, Yiru Tang, Haoxiang Sun, Jia Deng, Wayne Xin Zhao, et al. Technical report: Enhancing llm reasoning with reward-guided tree search. arXiv preprint arXiv:2411.11694, 2024.", + "[324] Nan Jiang, Ziming Wu, De-Chuan Zhan, Fuming Lai, and Shaobing Lian. Dart: Distilling autoregressive reasoning to silent thought. arXiv preprint arXiv:2506.11752, 2025.", + "[325] Shuyang Jiang, Yusheng Liao, Zhe Chen, Ya Zhang, Yanfeng Wang, and Yu Wang. Meds 3: Towards medical small language models with self-evolved slow thinking. arXiv preprint arXiv:2501.12051, 2025.", + "[326] Yuxuan Jiang, Dawei Li, and Frank Ferraro. Drp: Distilled reasoning pruning with skill-aware step decomposition for efficient large reasoning models. arXiv preprint arXiv:2505.13975, 2025.", + "[327] Carlos E Jimenez, John Yang, Alexander Wettig, Shunyu Yao, Kexin Pei, Ofir Press, and Karthik R Narasimhan. SWE-bench: Can language models resolve real-world github issues? In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=VTF8yNQM66.", + "[328] Di Jin, Eileen Pan, Nassim Oufattole, Wei-Hung Weng, Hanyi Fang, and Peter Szolovits. What disease does this patient have? a large-scale open domain question answering dataset" + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 56 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 56 + }, + { + "type": "page_number", + "text": "57", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 56 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "from medical exams. Applied Sciences, 11(14), July 2021. ISSN 2076-3417. doi: 10.3390/app11146421. URL https://www.mdpi.com/2076-3417/11/14/6421.", + "[329] Mingyu Jin, Weidi Luo, Sitao Cheng, Xinyi Wang, Wenyue Hua, Ruixiang Tang, William Yang Wang, and Yongfeng Zhang. Disentangling memory and reasoning ability in large language models. arXiv preprint arXiv:2411.13504, 2024.", + "[330] Mingyu Jin, Qinkai Yu, Dong Shu, Haiyan Zhao, Wenyue Hua, Yanda Meng, Yongfeng Zhang, and Mengnan Du. The impact of reasoning step length on large language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 1830–1842, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.108. URL https://aclanthology.org/2024-findings-acl.108/.", + "[331] Mingyu Jin, Qinkai Yu, Jingyuan Huang, Qingcheng Zeng, Zhenting Wang, Wenyue Hua, Haiyan Zhao, Kai Mei, Yanda Meng, Kaize Ding, Fan Yang, Mengnan Du, and Yongfeng Zhang. Exploring concept depth: How large language models acquire knowledge and concept at different layers? In Owen Rambow, Leo Wanner, Marianna Apidianaki, Hend Al-Khalifa, Barbara Di Eugenio, and Steven Schockaert, editors, Proceedings of the 31st International Conference on Computational Linguistics, pages 558-573, Abu Dhabi, UAE, January 2025. Association for Computational Linguistics. URL https://aclanthology.org/2025.coling-main.37/.", + "[332] Zhensheng Jin, Xinze Li, Yifan Ji, Chunyi Peng, Zhenghao Liu, Qi Shi, Yukun Yan, Shuo Wang, Furong Peng, and Ge Yu. Recut: Balancing reasoning length and accuracy in llms via stepwise trails and preference optimization. arXiv preprint arXiv:2506.10822, 2025.", + "[333] Andy L Jones. Scaling scaling laws with board games. arXiv preprint arXiv:2104.03113, 2021.", + "[334] Cameron R Jones and Benjamin K Bergen. Large language models pass the Turing test. arXiv preprint arXiv:2503.23674, 2025.", + "[335] Prashank Kadam. Gpt-guided monte carlo tree search for symbolic regression in financial fraud detection. arXiv preprint arXiv:2411.04459, 2024.", + "[336] Saurav Kadavath, Tom Conerly, Amanda Askell, Tom Henighan, Dawn Drain, Ethan Perez, Nicholas Schiefer, Zac Hatfield-Dodds, Nova DasSarma, Eli Tran-Johnson, et al. Language models (mostly) know what they know. arXiv preprint arXiv:2207.05221, 2022.", + "[337] Ryo Kamoi, Sarkar Snigdha Sarathi Das, Renze Lou, Jihyun Janice Ahn, Yilun Zhao, Xiaoxin Lu, Nan Zhang, Yusen Zhang, Haoran Ranran Zhang, Sujeeth Reddy Vummanthala, Salika Dave, Shaobo Qin, Arman Cohan, Wenpeng Yin, and Rui Zhang. Evaluating LLMs at detecting errors in LLM responses. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=dnwRScljXr.", + "[338] Jikun Kang, Xin Zhe Li, Xi Chen, Amirreza Kazemi, Qianyi Sun, Boxing Chen, Dong Li, Xu He, Quan He, Feng Wen, et al. Mindstar: Enhancing math reasoning in pre-trained llms at inference time. arXiv preprint arXiv:2405.16265, 2024.", + "[339] Liwei Kang, Zirui Zhao, David Hsu, and Wee Sun Lee. On the empirical complexity of reasoning and planning in LLMs. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 2897-2936, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.164. URL https://aclanthology.org/2024-findings-emnlp.164/.", + "[340] Yu Kang, Xianghui Sun, Liangyu Chen, and Wei Zou. C3ot: Generating shorter chain-of-thought without compromising effectiveness. 39(23):24312-24320, Apr 2025.", + "[341] Zhewei Kang, Xuandong Zhao, and Dawn Song. Scalable best-of-n selection for large language models via self-certainty. arXiv preprint arXiv:2502.18581, 2025.", + "[342] Manuj Kant, Sareh Nabi, Manav Kant, Roland Scharrer, Megan Ma, and Marzieh Nabi. Towards robust legal reasoning: Harnessing logical llms in law. arXiv preprint arXiv:2502.17638, 2025.", + "[343] Mehran Kazemi, Najoung Kim, Deepti Bhatia, Xin Xu, and Deepak Ramachandran. LAM-BADA: Backward chaining for automated reasoning in natural language. In Anna Rogers," + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 57 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 57 + }, + { + "type": "page_number", + "text": "58", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 57 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6547-6568, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.361. URL https://aclanthology.org/2023.acl-long.361/.", + "[344] Amirhossein Kazemnejad, Milad Aghajohari, Eva Portelance, Alessandro Sordoni, Siva Reddy, Aaron Courville, and Nicolas Le Roux. Vineppo: Unlocking rl potential for llm reasoning through refined credit assignment. arXiv preprint arXiv:2410.01679, 2024.", + "[345] Muhammad Khalifa, Lajanugen Logeswaran, Moontae Lee, Honglak Lee, and Lu Wang. Grace: Discriminator-guided chain-of-thought reasoning. arXiv preprint arXiv:2305.14934, 2023.", + "[346] Artyom Kharinaev, Viktor Moskvoretskii, Egor Shvetsov, Kseniia Studenikina, Bykov Mikhail, and Evgeny Burnaev. Investigating the impact of quantization methods on the safety and reliability of large language models. arXiv preprint arXiv:2502.15799, 2025.", + "[347] Hyunwoo Kim, Melanie Sclar, Tan Zhi-Xuan, Lance Ying, Sydney Levine, Yang Liu, Joshua B Tenenbaum, and Yejin Choi. Hypothesis-driven theory-of-mind reasoning for large language models. arXiv preprint arXiv:2502.11881, 2025.", + "[348] Jiin Kim, Byeongjun Shin, Jinha Chung, and Minsoo Rhu. The cost of dynamic reasoning: Demystifying ai agents and test-time scaling from an ai infrastructure perspective. arXiv preprint arXiv:2506.04301, 2025.", + "[349] Juno Kim, Denny Wu, Jason Lee, and Taiji Suzuki. Metastable dynamics of chain-of-thought reasoning: Provable benefits of search, rl and distillation. arXiv preprint arXiv:2502.01694, 2025.", + "[350] Moo Jin Kim, Chelsea Finn, and Percy Liang. Fine-tuning vision-language-action models: Optimizing speed and success. arXiv preprint arXiv:2502.19645, 2025.", + "[351] Naryeong Kim, Sungmin Kang, Gabin An, and Shin Yoo. Lachesis: Predicting llm inference accuracy using structural properties of reasoning paths. arXiv preprint arXiv:2412.08281, 2024.", + "[352] Seungone Kim, Se Joo, Doyoung Kim, Joel Jang, Seonghyeon Ye, Jamin Shin, and Minjoon Seo. The CoT collection: Improving zero-shot and few-shot learning of language models via chain-of-thought fine-tuning. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 12685-12708, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.782. URL https://aclanthology.org/2023.emnlp-main.782/.", + "[353] Seungone Kim, Juyoung Suk, Shayne Longpre, Bill Yuchen Lin, Jamin Shin, Sean Welleck, Graham Neubig, Moontae Lee, Kyungjae Lee, and Minjoon Seo. Prometheus 2: An open source language model specialized in evaluating other language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 4334-4353, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.248. URL https://aclanthology.org/2024.emnlp-main.248/.", + "[354] Sunnie SY Kim, Jennifer Wortman Vaughan, Q Vera Liao, Tania Lombrozo, and Olga Russakovsky. Fostering appropriate reliance on large language models: The role of explanations, sources, and inconsistencies. arXiv preprint arXiv:2502.08554, 2025.", + "[355] Jing Yu Koh, Stephen McAleer, Daniel Fried, and Ruslan Salakhutdinov. Tree search for language model agents. arXiv preprint arXiv:2407.01476, 2024.", + "[356] Deqian Kong, Minglu Zhao, Dehong Xu, Bo Pang, Shu Wang, Edouardo Honig, Zhangzhang Si, Chuan Li, Jianwen Xie, Sirui Xie, et al. Scalable language models with posterior inference of latent thought vectors. arXiv preprint arXiv:2502.01567, 2025.", + "[357] Abhinav Kumar, Jaechul Roh, Ali Naseh, Marzena Karpinska, Mohit Iyyer, Amir Houmansadr, and Eugene Bagdasarian. Overthinking: Slowdown attacks on reasoning llms. arXiv preprint arXiv:2502.02542, 2025." + ], + "bbox": [ + 181, + 90, + 828, + 912 + ], + "page_idx": 58 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 58 + }, + { + "type": "page_number", + "text": "59", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 58 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[358] Aviral Kumar, Vincent Zhuang, Rishabh Agarwal, Yi Su, John D Co-Reyes, Avi Singh, Kate Baumli, Shariq Iqbal, Colton Bishop, Rebecca Roelofs, et al. Training language models to self-correct via reinforcement learning. arXiv preprint arXiv:2409.12917, 2024.", + "[359] Komal Kumar, Tajamul Ashraf, Omkar Thawakar, Rao Muhammad Anwer, Hisham Cholakkal, Mubarak Shah, Ming-Hsuan Yang, Phillip H. S. Torr, Salman Khan, and Fahad Shahbaz Khan. Llm post-training: A deep dive into reasoning large language models, 2025.", + "[360] Martin Kuo, Jianyi Zhang, Aolin Ding, Qinsi Wang, Louis DiValentin, Yujia Bao, Wei Wei, Da-Cheng Juan, Hai Li, and Yiran Chen. H-cot: Hijacking the chain-of-thought safety reasoning mechanism to jailbreak large reasoning models, including openai o1/o3, deepseek-r1, and gemini 2.0 flash thinking. arXiv preprint arXiv:2502.12893, 2025.", + "[361] EvolvingLMMs Lab. Open-r1-multimodal. https://github.com/EvolvingLMMs-Lab/open-r1-multimodal, February 2025.", + "[362] Bespoke Labs. Bespoke-stratos: The unreasonable effectiveness of reasoning distillation. https://www.bespokelabs.ai/blog/bespoke-stratos-the-unreasonable-effectiveness-of-reasoning-distillation, January 2025. Accessed: 2025-01-22.", + "[363] Inception Labs, Samar Khanna, Siddhant Kharbanda, Shufan Li, Harshit Varma, Eric Wang, Sawyer Birnbaum, Ziyang Luo, Yanis Miraoui, Akash Palrecha, et al. Mercury: Ultra-fast language models based on diffusion. arXiv preprint arXiv:2506.17298, 2025.", + "[364] Huiyuan Lai, Xiao Zhang, and Malvina Nissim. Multidimensional consistency improves reasoning in language models. arXiv preprint arXiv:2503.02670, 2025.", + "[365] Xin Lai, Zhuotao Tian, Yukang Chen, Senqiao Yang, Xiangru Peng, and Jiaya Jia. Step-dpo: Step-wise preference optimization for long-chain reasoning of llms. arXiv preprint arXiv:2406.18629, 2024.", + "[366] Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V. Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, Yuling Gu, Saumya Malik, Victoria Graf, Jena D. Hwang, Jiangjiang Yang, Ronan Le Bras, Oyvind Tafjord, Chris Wilhelm, Luca Soldaini, Noah A. Smith, Yizhong Wang, Pradeep Dasigi, and Hannaneh Hajishirzi. Tulu 3: Pushing frontiers in open language model post-training, 2024.", + "[367] Nathan Lambert, Valentina Pyatkin, Jacob Morrison, LJ Miranda, Bill Yuchen Lin, Khyathi Chandu, Nouha Dziri, Sachin Kumar, Tom Zick, Yejin Choi, et al. Rewardbench: Evaluating reward models for language modeling. arXiv preprint arXiv:2403.13787, 2024.", + "[368] Andrew Lampinen, Ishita Dasgupta, Stephanie Chan, Kory Mathewson, Mh Tessler, Antonia Creswell, James McClelland, Jane Wang, and Felix Hill. Can language models learn from explanations in context? In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang, editors, Findings of the Association for Computational Linguistics: EMNLP 2022, pages 537-563, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022-findings-emnlp.38. URL https://aclanthology.org/2022-findings-emnlp.38.", + "[369] Jack Lanchantin, Angelica Chen, Shehzaad Dhuliawala, Ping Yu, Jason Weston, Sainbayar Sukhbaatar, and Ilia Kulikov. Diverse preference optimization. arXiv preprint arXiv:2501.18101, 2025.", + "[370] Anh Duc Le, Tu Vu, Nam Le Hai, Nguyen Thi Ngoc Diep, Linh Ngo Van, Trung Le, and Thien Huu Nguyen. Cot2align: Cross-chain of thought distillation via optimal transport alignment for language models with different tokenizers. arXiv preprint arXiv:2502.16806, 2025.", + "[371] Joshua Ong Jun Leang, Aryo Pradipta Gema, and Shay B Cohen. Comat: Chain of mathematically annotated thought improves mathematical reasoning. arXiv preprint arXiv:2410.10336, 2024.", + "[372] Joshua Ong Jun Leang, Giwon Hong, Wenda Li, and Shay B Cohen. Theorem prover as a judge for synthetic data generation. arXiv preprint arXiv:2502.13137, 2025.", + "[373] Byeongchan Lee, Jonghoon Lee, Dongyoung Kim, Jaehyung Kim, and Jinwoo Shin. Collaborative llm inference via planning for efficient reasoning. arXiv preprint arXiv:2506.11578, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 910 + ], + "page_idx": 59 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 59 + }, + { + "type": "page_number", + "text": "60", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 59 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[374] Hyunseok Lee, Seunghyuk Oh, Jaehyung Kim, Jinwoo Shin, and Jihoon Tack. Revise: Learning to refine at test-time via intrinsic self-verification. arXiv preprint arXiv:2502.14565, 2025.", + "[375] Jinu Lee and Julia Hockenmaier. Evaluating step-by-step reasoning traces: A survey. arXiv preprint arXiv:2502.12289, 2025.", + "[376] Jung Hyun Lee, June Yong Yang, Byeongho Heo, Dongyoon Han, and Kang Min Yoo. Token-supervised value models for enhancing mathematical reasoning capabilities of large language models. arXiv preprint arXiv:2407.12863, 2024.", + "[377] Kuang-Huei Lee, Ian Fischer, Yueh-Hua Wu, Dave Marwood, Shumeet Baluja, Dale Schuurmans, and Xinyun Chen. Evolving deeper llm thinking. arXiv preprint arXiv:2501.09891, 2025.", + "[378] Lucas Lehnert, Sainbayar Sukhbaatar, DiJia Su, Qinqing Zheng, Paul McVay, Michael Rabbat, and Yuandong Tian. Beyond a*: Better planning with transformers via search dynamics bootstrapping. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=SGoVIC0u0f.", + "[379] Bin Lei, Yi Zhang, Shan Zuo, Ali Payani, and Caiwen Ding. MACM: Utilizing a multi-agent system for condition mining in solving complex mathematical problems. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=VR2RdSxtzs.", + "[380] Jixuan Leng, Cassandra A Cohen, Zhixian Zhang, Chenyan Xiong, and William W Cohen. Semi-structured llm reasoners can be rigorously audited. arXiv preprint arXiv:2505.24217, 2025.", + "[381] Adam Lerer, Hengyuan Hu, Jakob Foerster, and Noam Brown. Improving policies via search in cooperative partially observable games. Proceedings of the AAAI Conference on Artificial Intelligence, 34(05):7187-7194, Apr. 2020. doi: 10.1609/aaai.v34i05.6208. URL https://ojs.aaai.org/index.php/AAAI/article/view/6208.", + "[382] Belinda Z Li, Been Kim, and Zi Wang. Questbench: Can llms ask the right question to acquire information in reasoning tasks? arXiv preprint arXiv:2503.22674, 2025.", + "[383] Bingxuan Li, Yiwei Wang, Jiuming Gu, Kai-Wei Chang, and Nanyun Peng. Metal: A multiagent framework for chart generation with test-time scaling. arXiv preprint arXiv:2502.17651, 2025.", + "[384] Bohan Li, Jiannan Guan, Longxu Dou, Yunlong Feng, Dingzirui Wang, Yang Xu, Enbo Wang, Qiguang Chen, Bichen Wang, Xiao Xu, et al. Can large language models understand you better? an mbti personality detection dataset aligned with population traits. arXiv preprint arXiv:2412.12510, 2024.", + "[385] Chen Li, Weiqi Wang, Jingcheng Hu, Yixuan Wei, Nanning Zheng, Han Hu, Zheng Zhang, and Houwen Peng. Common 7b language models already possess strong math capabilities. arXiv preprint arXiv:2403.04706, 2024.", + "[386] Chen Li, Nazhou Liu, and Kai Yang. Adaptive group policy optimization: Towards stable training and token-efficient reasoning. arXiv preprint arXiv:2503.15952, 2025.", + "[387] Chengpeng Li, Zhengyang Tang, Ziniu Li, Mingfeng Xue, Keqin Bao, Tian Ding, Ruoyu Sun, Benyou Wang, Xiang Wang, Junyang Lin, et al. Cort: Code-integrated reasoning within thinking. arXiv preprint arXiv:2506.09820, 2025.", + "[388] Chengpeng Li, Mingfeng Xue, Zhenru Zhang, Jiaxi Yang, Beichen Zhang, Xiang Wang, Bowen Yu, Binyuan Hui, Junyang Lin, and Dayiheng Liu. Start: Self-taught reasoner with tools. arXiv preprint arXiv:2503.04625, 2025.", + "[389] Chengshu Li, Jacky Liang, Andy Zeng, Xinyun Chen, Karol Hausman, Dorsa Sadigh, Sergey Levine, Li Fei-Fei, Fei Xia, and Brian Ichter. Chain of code: Reasoning with a language model-augmented code emulator. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 28259-28277. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/1i24ar.html." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 60 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 60 + }, + { + "type": "page_number", + "text": "61", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 60 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[390] Chengzhu Li, Wenshan Wu, Huanyu Zhang, Yan Xia, Shaoguang Mao, Li Dong, Ivan Vulic, and Furu Wei. Imagine while reasoning in space: Multimodal visualization-of-thought. arXiv preprint arXiv:2501.07542, 2025.", + "[391] Cheryl Li, Tianyuan Xu, and Yiwen Guo. Reasoning-as-logic-units: Scaling test-time reasoning in large language models through logic unit alignment. arXiv preprint arXiv:2502.07803, 2025.", + "[392] Dacheng Li, Shiyi Cao, Chengkun Cao, Xiuyu Li, Shangyin Tan, Kurt Keutzer, Jiarong Xing, Joseph E Gonzalez, and Ion Stoica. S*: Test time scaling for code generation. arXiv preprint arXiv:2502.14382, 2025.", + "[393] Dacheng Li, Shiyi Cao, Tyler Griggs, Shu Liu, Xiangxi Mo, Shishir G Patil, Matei Zaharia, Joseph E Gonzalez, and Ion Stoica. Llms can easily learn to reason from demonstrations structure, not content, is what matters! arXiv preprint arXiv:2502.07374, 2025.", + "[394] Dawei Li, Bohan Jiang, Liangjie Huang, Alimohammad Beigi, Chengshuai Zhao, Zhen Tan, Amrita Bhattacharjee, Yuxuan Jiang, Canyu Chen, Tianhao Wu, et al. From generation to judgment: Opportunities and challenges of llm-as-a-judge. arXiv preprint arXiv:2411.16594, 2024.", + "[395] Gengxu Li, Tingyu Xia, Yi Chang, and Yuan Wu. Length-controlled margin-based preference optimization without reference model. arXiv preprint arXiv:2502.14643, 2025.", + "[396] Haitao Li, Qian Dong, Junjie Chen, Huixue Su, Yujia Zhou, Qingyao Ai, Ziyi Ye, and Yiqun Liu. Llms-as-judges: a comprehensive survey on llm-based evaluation methods. arXiv preprint arXiv:2412.05579, 2024.", + "[397] Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. Numinamath. https://huggingface.co/AI-MO/NuminaMath-CoT, 2024.", + "[398] Jia-Nan Li, Jian Guan, Wei Wu, and Rui Yan. Extended inductive reasoning for personalized preference inference from behavioral signals. arXiv preprint arXiv:2505.18071, 2025.", + "[399] Jiachun Li, Pengfei Cao, Yubo Chen, Jiexin Xu, Huajun Li, Xiaojian Jiang, Kang Liu, and Jun Zhao. Rewarding curse: Analyze and mitigate reward modeling issues for llm reasoning. arXiv preprint arXiv:2503.05188, 2025.", + "[400] Jierui Li, Hung Le, Yinbo Zhou, Caiming Xiong, Silvio Savarese, and Doyen Sahoo. Codetree: Agent-guided tree search for code generation with large language models. arXiv preprint arXiv:2411.04329, 2024.", + "[401] Junlong Li, Daya Guo, Dejian Yang, Runxin Xu, Yu Wu, and Junxian He. Codei/o: Condensing reasoning patterns via code input-output prediction. arXiv preprint arXiv:2502.07316, 2025.", + "[402] Kaixin Li. Verified taco problems. https://huggingface.co/datasets/likaixin/TACO-verified, 2024. URL https://huggingface.co/datasets/likaixin/TACO-verified.", + "[403] Kechen Li, Wenqi Zhu, Coralia Cartis, Tianbo Ji, and Shiwei Liu. Sos1: O1 and r1-like reasoning llms are sum-of-square solvers. arXiv preprint arXiv:2502.20545, 2025.", + "[404] Long Li, Weiwen Xu, Jiayan Guo, Ruochen Zhao, Xingxuan Li, Yuqian Yuan, Boqiang Zhang, Yuming Jiang, Yifei Xin, Ronghao Dang, et al. Chain of ideas: Revolutionizing research via novel idea development with llm agents. arXiv preprint arXiv:2410.13185, 2024.", + "[405] Margaret Li, Sneha Kudugunta, and Luke Zettlemoyer. (mis) fitting: A survey of scaling laws. arXiv preprint arXiv:2502.18969, 2025.", + "[406] Ming Li, Lichang Chen, Jiuhai Chen, Shwai He, Heng Huang, Jiuming Gu, and Tianyi Zhou. Reflection-tuning: Data recycling improves llm instruction-tuning. arXiv preprint arXiv:2310.11716, 2023.", + "[407] Ming Li, Yanhong Li, and Tianyi Zhou. What happened in llms layers when trained for fast vs. slow thinking: A gradient perspective. arXiv preprint arXiv:2410.23743, 2024." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 61 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 61 + }, + { + "type": "page_number", + "text": "62", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 61 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[408] Minzhi Li, Zhengyuan Liu, Shumin Deng, Shafiq Joty, Nancy Chen, and Min-Yen Kan. Dna-eval: Enhancing large language model evaluation through decomposition and aggregation. In Proceedings of the 31st International Conference on Computational Linguistics, pages 2277-2290, January 2025.", + "[409] Moxin Li, Yuantao Zhang, Wenjie Wang, Wentao Shi, Zhuo Liu, Fuli Feng, and Tat-Seng Chua. Self-improvement towards pareto optimality: Mitigating preference conflicts in multi-objective alignment. arXiv preprint arXiv:2502.14354, 2025.", + "[410] Peiji Li, Kai Lv, Yunfan Shao, Yichuan Ma, Linyang Li, Xiaqing Zheng, Xipeng Qiu, and Qipeng Guo. Fastmcts: A simple sampling strategy for data synthesis. arXiv preprint arXiv:2502.11476, 2025.", + "[411] Qingyao Li, Wei Xia, Kounianhua Du, Xinyi Dai, Ruiming Tang, Yasheng Wang, Yong Yu, and Weinan Zhang. Rethinkmcts: Refining erroneous thoughts in monte carlo tree search for code generation. arXiv preprint arXiv:2409.09584, 2024.", + "[412] Shuangtao Li, Shuaihao Dong, Kexin Luan, Xinhan Di, and Chaofan Ding. Enhancing reasoning through process supervision with monte carlo tree search. In The First Workshop on Neural Reasoning and Mathematical Discovery at AAAI'2025, January 2025. URL https://openreview.net/forum?id=OupEEi1341.", + "[413] Siheng Li, Zhanhui Zhou, Wai Lam, Chao Yang, and Chaochao Lu. Repo: Replay-enhanced policy optimization. arXiv preprint arXiv:2506.09340, 2025.", + "[414] Wen-Ding Li, Keya Hu, Carter Larsen, Yuqing Wu, Simon Alford, Caleb Woo, Spencer M Dunn, Hao Tang, Michelangelo Naim, Dat Nguyen, et al. Combining induction and transduction for abstract reasoning. arXiv preprint arXiv:2411.02272, 2024.", + "[415] Wendi Li and Yixuan Li. Process reward model with q-value rankings. arXiv preprint arXiv:2410.11287, 2024.", + "[416] Wenjun Li, Changyu Chen, and Pradeep Varakantham. Unlocking large language model's planning capabilities with maximum diversity fine-tuning. arXiv preprint arXiv:2406.10479, 2024.", + "[417] Xiaonan Li and Xipeng Qiu. MoT: Memory-of-thought enables ChatGPT to self-improve. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 6354-6374, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.392. URL https://aclanthology.org/2023.emnlp-main.392/.", + "[418] Xiaoxi Li, Guanting Dong, Jiajie Jin, Yuyao Zhang, Yujia Zhou, Yutao Zhu, Peitian Zhang, and Zhicheng Dou. Search-o1: Agentic search-enhanced large reasoning models. arXiv preprint arXiv:2501.05366, 2025.", + "[419] Xinzhe Li. A survey on llm test-time compute via search: Tasks, llm profiling, search algorithms, and relevant frameworks. arXiv preprint arXiv:2501.10069, 2025.", + "[420] Xuefeng Li, Haoyang Zou, and Pengfei Liu. Limr: Less is more for rl scaling. arXiv preprint arXiv:2502.11886, 2025.", + "[421] Yafu Li, Zhilin Wang, Tingchen Fu, Ganqu Cui, Sen Yang, and Yu Cheng. From drafts to answers: Unlocking lIm potential via aggregation fine-tuning. arXiv preprint arXiv:2501.11877, 2025.", + "[422] Yang Li. Policy guided tree search for enhanced ltm reasoning. arXiv preprint arXiv:2502.06813, 2025.", + "[423] Yang Li, Dong Du, Linfeng Song, Chen Li, Weikang Wang, Tao Yang, and Haitao Mi. Hunyuanprover: A scalable data synthesis framework and guided tree search for automated theorem proving. arXiv preprint arXiv:2412.20735, 2024.", + "[424] Yang Li, Youssef Emad, Karthik Padthe, Jack Lanchantin, Weizhe Yuan, Thao Nguyen, Jason Weston, Shang-Wen Li, Dong Wang, Ilia Kulikov, et al. Naturalthoughts: Selecting and distilling reasoning traces for general reasoning tasks. arXiv preprint arXiv:2507.01921, 2025.", + "[425] Yifei Li, Zeqi Lin, Shizhuo Zhang, Qiang Fu, Bei Chen, Jian-Guang Lou, and Weizhu Chen. Making language models better reasoners with step-aware verifier. In Anna Rogers, Jordan" + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 62 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 62 + }, + { + "type": "page_number", + "text": "63", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 62 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 5315-5333, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.291. URL https://aclanthology.org/2023.acl-long.291/.", + "[426] Yiwei Li, Ji Zhang, Shaoxiong Feng, Peiwen Yuan, Xinglin Wang, Jiayi Shi, Yueqi Zhang, Chuyi Tan, Boyuan Pan, Yao Hu, et al. Revisiting self-consistency from dynamic distributional alignment perspective on answer aggregation. arXiv preprint arXiv:2502.19830, 2025.", + "[427] Yujia Li, David Choi, Junyoung Chung, Nate Kushman, Julian Schrittwieser, Rémi Leblond, Tom Eccles, James Keeling, Felix Gimeno, Agustin Dal Lago, Thomas Hubert, Peter Choy, Cyprien de Masson d'Autume, Igor Babuschkin, Xinyun Chen, Po-Sen Huang, Johannes Welbl, Sven Gowal, Alexey Cherepanov, James Molloy, Daniel Mankowitz, Esme Sutherland Robson, Pushmeet Kohli, Nando de Freitas, Koray Kavukcuoglu, and Oriol Vinyals. Competition-level code generation with alphabet. arXiv preprint arXiv:2203.07814, 2022.", + "[428] Yunxin Li, Zhenyu Liu, Zitao Li, Xuanyu Zhang, Zhenran Xu, Xinyu Chen, Haoyuan Shi, Shenyuan Jiang, Xintong Wang, Jifang Wang, et al. Perception, reason, think, and plan: A survey on large multimodal reasoning models. arXiv preprint arXiv:2505.04921, 2025.", + "[429] Zheng Li, Qingxiu Dong, Jingyuan Ma, Di Zhang, and Zhifang Sui. Selfbudgeter: Adaptive token allocation for efficient llm reasoning. arXiv preprint arXiv:2505.11274, 2025.", + "[430] Zhiyuan Li, Hong Liu, Denny Zhou, and Tengyu Ma. Chain of thought empowers transformers to solve inherently serial problems. In The Twelfth International Conference on Learning Representations, January 2023. URL https://openreview.net/pdf?id=3EWTEy9MTM.", + "[431] Zhiyuan Li, Dongnan Liu, Chaoyi Zhang, Heng Wang, Tengfei Xue, and Weidong Cai. Enhancing advanced visual reasoning ability of large language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 1915-1929, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.114. URL https://aclanthology.org/2024.emnlp-main.114/.", + "[432] Zhong-Zhi Li, Duzhen Zhang, Ming-Liang Zhang, Jiaxin Zhang, Zengyan Liu, Yuxuan Yao, Haotian Xu, Junhao Zheng, Pei-Jie Wang, Xiuyi Chen, et al. From system 1 to system 2: A survey of reasoning large language models. arXiv preprint arXiv:2502.17419, 2025.", + "[433] Zhongzhi Li, Ming-Liang Zhang, Pei-Jie Wang, Jian Xu, Rui-Song Zhang, Yin Fei, Zhi-Long Ji, Jin-Feng Bai, Zhen-Ru Pan, Jiaxin Zhang, and Cheng-Lin Liu. CMMaTH: A Chinese multi-modal math skill evaluation benchmark for foundation models. In Owen Rambow, Leo Wanner, Marianna Apidianaki, Hend Al-Khalifa, Barbara Di Eugenio, and Steven Schockaert, editors, Proceedings of the 31st International Conference on Computational Linguistics, pages 2690–2726, Abu Dhabi, UAE, January 2025. Association for Computational Linguistics. URL https://aclanthology.org/2025.coling-main.184/.", + "[434] Zhuoqun Li, Haiyang Yu, Xuanang Chen, Hongyu Lin, Yaojie Lu, Fei Huang, Xianpei Han, Yongbin Li, and Le Sun. Deepsolution: Boosting complex engineering solution design via tree-based exploration and bi-point thinking. arXiv preprint arXiv:2502.20730, 2025.", + "[435] Zichao Li, Xueru Wen, Jie Lou, Yuqiu Ji, Yaojie Lu, Xianpei Han, Debing Zhang, and Le Sun. The devil is in the details: Tackling unimodal spurious correlations for generalizable multimodal reward models. In *Forty-second International Conference on Machine Learning*, 2025. URL https://openreview.net/forum?id=b0qRSUcQP7.", + "[436] Ziniu Li, Tian Xu, Yushun Zhang, Zhihang Lin, Yang Yu, Ruoyu Sun, and Zhi-Quan Luo. Remax: A simple, effective, and efficient reinforcement learning method for aligning large language models. In *Forty-first International Conference on Machine Learning*, May 2024. URL https://openreview.net/forum?id=Stn8hXkpe6.", + "[437] Jing Liang, Hongyao Tang, Yi Ma, Jinyi Liu, Yan Zheng, Shuyue Hu, Lei Bai, and Jianye Hao. Squeeze the soaked sponge: Efficient off-policy reinforcement finetuning for large language model. arXiv preprint arXiv:2507.06892, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 63 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 63 + }, + { + "type": "page_number", + "text": "64", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 63 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[438] Jintao Liang, Gang Su, Huifeng Lin, You Wu, Rui Zhao, and Ziyue Li. Reasoning rag via system 1 or system 2: A survey on reasoning agentic retrieval-augmented generation for industry challenges. arXiv preprint arXiv:2506.10408, 2025.", + "[439] Xiao Liang, Zhong-Zhi Li, Yeyun Gong, Yang Wang, Hengyuan Zhang, Yelong Shen, Ying Nian Wu, and Weizhu Chen. Sws: Self-aware weakness-driven problem synthesis in reinforcement learning for llm reasoning. arXiv preprint arXiv:2506.08989, 2025.", + "[440] Xun Liang, Shichao Song, Zifan Zheng, Hanyu Wang, Qingchen Yu, Xunkai Li, Rong-Hua Li, Yi Wang, Zhonghao Wang, Feiyu Xiong, et al. Internal consistency and self-feedback in large language models: A survey. arXiv preprint arXiv:2407.14507, 2024.", + "[441] Baohao Liao, Xinyi Chen, Sara Rajaee, Yuhui Xu, Christian Herold, Anders Søgaard, Maarten de Rijke, and Christof Monz. Lost at the beginning of reasoning. arXiv preprint arXiv:2506.22058, 2025.", + "[442] Baohao Liao, Yuhui Xu, Hanze Dong, Junnan Li, Christof Monz, Silvio Savarese, Doyen Sahoo, and Caiming Xiong. Reward-guided speculative decoding for efficient ltm reasoning. arXiv preprint arXiv:2501.19324, 2025.", + "[443] Huanxuan Liao, Shizhu He, Yupu Hao, Xiang Li, Yanzhe Zhang, Jun Zhao, and Kang Liu. Skintern: Internalizing symbolic knowledge for distilling better cot capabilities into small language models. In Proceedings of the 31st International Conference on Computational Linguistics, pages 3203-3221, January 2025. URL https://aclanthology.org/2025.coling-main.215.pdf.", + "[444] Mengqi Liao, Xiangyu Xi, Ruinian Chen, Jia Leng, Yangen Hu, Ke Zeng, Shuai Liu, and Huaiyu Wan. Enhancing efficiency and exploration in reinforcement learning for llms. arXiv preprint arXiv:2505.18573, 2025.", + "[445] Minpeng Liao, Wei Luo, Chengxi Li, Jing Wu, and Kai Fan. Mario: Math reasoning with code interpreter output-a reproducible pipeline. arXiv preprint arXiv:2401.08190, 2024.", + "[446] Weibin Liao, Xu Chu, and Yasha Wang. Tpo: Aligning large language models with multi-branch & multi-step preference trees. arXiv preprint arXiv:2410.12854, 2024.", + "[447] Jonathan Light, Min Cai, Weiqin Chen, Guanzhi Wang, Xiusi Chen, Wei Cheng, Yisong Yue, and Ziniu Hu. Strategist: Learning strategic skills by LLMs via bi-level tree search. In Automated Reinforcement Learning: Exploring Meta-Learning, AutoML, and LLMs, June 2024. URL https://openreview.net/forum?id=UHWbmZuJPF.", + "[448] Jonathan Light, Yue Wu, Yiyou Sun, Wenchao Yu, Xujiang Zhao, Ziniu Hu, Haifeng Chen, Wei Cheng, et al. Scattered forest search: Smarter code space exploration with llms. arXiv preprint arXiv:2411.05010, 2024.", + "[449] Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=v8L0pN6EOi.", + "[450] Bill Yuchen Lin, Ronan Le Bras, Kyle Richardson, Ashish Sabharwal, Radha Poovendran, Peter Clark, and Yejin Choi. Zebralogic: On the scaling limits of lms for logical reasoning. arXiv preprint arXiv:2502.01100, 2025.", + "[451] Haohan Lin, Zhiqing Sun, Yiming Yang, and Sean Welleck. Lean-star: Learning to interleave thinking and proving. arXiv preprint arXiv:2407.10040, 2024.", + "[452] Qingwen Lin, Boyan Xu, Guimin Hu, Zijian Li, Zhifeng Hao, Keli Zhang, and Ruichu Cai. Cmcts: A constrained monte carlo tree search framework for mathematical reasoning in large language model. arXiv preprint arXiv:2502.11169, 2025.", + "[453] Qingwen Lin, Boyan Xu, Zijian Li, Zhifeng Hao, Keli Zhang, and Ruichu Cai. Leveraging constrained monte carlo tree search to generate reliable long chain-of-thought for mathematical reasoning. arXiv preprint arXiv:2502.11169, 2025.", + "[454] Yen-Ting Lin, Di Jin, Tengyu Xu, Tianhao Wu, Sainbayar Sukhbaatar, Chen Zhu, Yun He, Yun-Nung Chen, Jason Weston, Yuandong Tian, et al. Step-kto: Optimizing mathematical reasoning through stepwise binary feedback. arXiv preprint arXiv:2501.10799, 2025." + ], + "bbox": [ + 181, + 90, + 825, + 912 + ], + "page_idx": 64 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 64 + }, + { + "type": "page_number", + "text": "65", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 64 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[455] Yujie Lin, Ante Wang, Moye Chen, Jingyao Liu, Hao Liu, Jinsong Su, and Xinyan Xiao. Investigating inference-time scaling for chain of multi-modal thought: A preliminary study. arXiv preprint arXiv:2502.11514, 2025.", + "[456] Zicheng Lin, Zhibin Gou, Tian Liang, Ruilin Luo, Haowei Liu, and Yujiu Yang. CriticBench: Benchmarking LLMs for critique-correct reasoning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 1552–1587, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.91. URL https://aclanthology.org/2024-findings-acl.91/.", + "[457] Zicheng Lin, Tian Liang, Jiahao Xu, Xing Wang, Ruilin Luo, Chufan Shi, Siheng Li, Yujiu Yang, and Zhaopeng Tu. Critical tokens matter: Token-level contrastive estimation enhance llm's reasoning capability. arXiv preprint arXiv:2411.19943, 2024.", + "[458] Zongyu Lin, Yao Tang, Xingcheng Yao, Da Yin, Ziniu Hu, Yizhou Sun, and Kai-Wei Chang. Qlass: Boosting language agent inference via q-guided stepwise search. arXiv preprint arXiv:2502.02584, 2025.", + "[459] Zehui Ling, Deshu Chen, Hongwei Zhang, Yifeng Jiao, Xin Guo, and Yuan Cheng. Fast on the easy, deep on the hard: Efficient reasoning via powered length penalty. arXiv preprint arXiv:2506.10446, 2025.", + "[460] Zhan Ling, Yunhao Fang, Xuanlin Li, Zhiao Huang, Mingu Lee, Roland Memisevic, and Hao Su. Deductive verification of chain-of-thought reasoning. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 36407-36433. Curran Associates, Inc., September 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/72393bd47a35f5b3bee4c609e7bba733-Paper-Conference.pdf.", + "[461] Philip Lippmann and Jie Yang. Style over substance: Distilled language models reason via stylistic replication. arXiv preprint arXiv:2504.01738, 2025.", + "[462] Aiwei Liu, Haoping Bai, Zhiyun Lu, Xiang Kong, Xiaoming Wang, Jiulong Shan, Meng Cao, and Lijie Wen. Direct large language model alignment through self-rewarding contrastive prompt distillation. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9688–9712, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.523. URL https://aclanthology.org/2024.acl-long.523/.", + "[463] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024.", + "[464] Bingbin Liu, Sebastien Bubeck, Ronen Eldan, Janardhan Kulkarni, Yanzhi Li, Anh Nguyen, Rachel Ward, and Yi Zhang. Tinygsm: achieving $>80\\%$ on gsm8k with small language models. arXiv preprint arXiv:2312.09241, 2023.", + "[465] Bo Liu, Leon Guertler, Simon Yu, Zichen Liu, Penghui Qi, Daniel Balcells, Mickel Liu, Cheston Tan, Weiyan Shi, Min Lin, et al. Spiral: Self-play on zero-sum games incentivizes reasoning via multi-agent multi-turn reinforcement learning. arXiv preprint arXiv:2506.24119, 2025.", + "[466] Chris Yuhao Liu, Liang Zeng, Jiacai Liu, Rui Yan, Jujie He, Chaojie Wang, Shuicheng Yan, Yang Liu, and Yahui Zhou. Skywork-reward: Bag of tricks for reward modeling in llms. arXiv preprint arXiv:2410.18451, 2024.", + "[467] Chris Yuhao Liu, Liang Zeng, Yuzhen Xiao, Jujie He, Jiacai Liu, Chaojie Wang, Rui Yan, Wei Shen, Fuxiang Zhang, Jiacheng Xu, et al. Skywork-reward-v2: Scaling preference data curation via human-ai synergy. arXiv preprint arXiv:2507.01352, 2025.", + "[468] Cong Liu, Zhong Wang, ShengYu Shen, Jialiang Peng, Xiaoli Zhang, Zhen-Dong Du, and YaFang Wang. The chinese dataset distilled from deepseek-r1-671b. https://huggingface.co/datasets/Congliu/Chinese-DeepSeek-R1-Distill-data-110k, 2025." + ], + "bbox": [ + 181, + 90, + 828, + 911 + ], + "page_idx": 65 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 65 + }, + { + "type": "page_number", + "text": "66", + "bbox": [ + 488, + 936, + 508, + 946 + ], + "page_idx": 65 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[469] Dancheng Liu, Amir Nassereldine, Ziming Yang, Chenhui Xu, Yuting Hu, Jiajie Li, Utkarsh Kumar, Changjae Lee, Ruiyang Qin, Yiyu Shi, et al. Large language models have intrinsic self-correction ability. arXiv preprint arXiv:2406.15673, 2024.", + "[470] Fan Liu, Wenshuo Chao, Naiqiang Tan, and Hao Liu. Bag of tricks for inference-time computation of lIm reasoning. arXiv preprint arXiv:2502.07191, 2025.", + "[471] Guanlin Liu, Kaixuan Ji, Renjie Zheng, Zheng Wu, Chen Dun, Quanquan Gu, and Lin Yan. Enhancing multi-step reasoning abilities of language models through direct q-function optimization. arXiv preprint arXiv:2410.09302, 2024.", + "[472] Hanbing Liu, Lang Cao, Yuanyi Ren, Mengyu Zhou, Haoyu Dong, Xiaojun Ma, Shi Han, and Dongmei Zhang. Bingo: Boosting efficient reasoning of llms via dynamic and significance-based reinforcement learning. arXiv preprint arXiv:2506.08125, 2025.", + "[473] Hanmeng Liu, Zhizhang Fu, Mengru Ding, Ruoxi Ning, Chaoli Zhang, Xiaozhang Liu, and Yue Zhang. Logical reasoning in large language models: A survey. arXiv preprint arXiv:2502.09100, 2025.", + "[474] Hao Liu, Zhengren Wang, Xi Chen, Zhiyu Li, Feiyu Xiong, Qinhan Yu, and Wentao Zhang. Hoprag: Multi-hop reasoning for logic-aware retrieval-augmented generation. arXiv preprint arXiv:2502.12442, 2025.", + "[475] Hongxuan Liu, Zhiyao Luo, and Tingting Zhu. Best of both worlds: Harmonizing LLM capabilities in decision-making and question-answering for treatment regimes. In Advances In Medical Foundation Models: Explainability, Robustness, Security, and Beyond, 2024. URL https://openreview.net/forum?id=afu9qhp7md.", + "[476] Jiacai Liu, Chaojie Wang, Chris Yuhao Liu, Liang Zeng, Rui Yan, Yiwen Sun, Yang Liu, and Yahui Zhou. Improving multi-step reasoning abilities of large language models with direct advantage policy optimization. arXiv preprint arXiv:2412.18279, 2024.", + "[477] Jiacheng Liu, Andrew Cohen, Ramakanth Pasunuru, Yejin Choi, Hannaneh Hajishirzi, and Asli Celikyilmaz. Don't throw away your value model! generating more preferable text with value-guided monte-carlo tree search decoding. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=kh9Zt2Ldmn.", + "[478] Jiacheng Liu, Andrew Cohen, Ramakanth Pasunuru, Yejin Choi, Hannaneh Hajishirzi, and Asli Celikyilmaz. Making PPO even better: Value-guided monte-carlo tree search decoding, September 2024. URL https://openreview.net/forum?id=QaODpeRaOK.", + "[479] Junnan Liu, Hongwei Liu, Linchen Xiao, Shudong Liu, Taolin Zhang, Zihan Ma, Songyang Zhang, and Kai Chen. Deciphering trajectory-aided lIm reasoning: An optimization perspective. arXiv preprint arXiv:2505.19815, 2025.", + "[480] Junnan Liu, Linhao Luo, Thuy-Trang Vu, and Gholamreza Haffari. Situatedthinker: Grounding llm reasoning with real-world through situated thinking. arXiv preprint arXiv:2505.19300, 2025.", + "[481] Junteng Liu, Yuanxiang Fan, Zhuo Jiang, Han Ding, Yongyi Hu, Chi Zhang, Yiqi Shi, Shitong Weng, Aili Chen, Shiqi Chen, et al. Synlogic: Synthesizing verifiable reasoning data at scale for learning logical reasoning and beyond. arXiv preprint arXiv:2505.19641, 2025.", + "[482] Liping Liu, Chunhong Zhang, Likang Wu, Chuang Zhao, Zheng Hu, Ming He, and Jianping Fan. Instruct-of-reflection: Enhancing large language models iterative reflection capabilities via dynamic-meta instruction. arXiv preprint arXiv:2503.00902, 2025.", + "[483] Mingjie Liu, Shizhe Diao, Ximing Lu, Jian Hu, Xin Dong, Yejin Choi, Jan Kautz, and Yi Dong. Prorl: Prolonged reinforcement learning expands reasoning boundaries in large language models. arXiv preprint arXiv:2505.24864, 2025.", + "[484] Qiang Liu, Xinlong Chen, Yue Ding, Shizhen Xu, Shu Wu, and Liang Wang. Attention-guided self-reflection for zero-shot hallucination detection in large language models. arXiv preprint arXiv:2501.09997, 2025.", + "[485] Qin Liu, Wenxuan Zhou, Nan Xu, James Y Huang, Fei Wang, Sheng Zhang, Hoifung Poon, and Muhao Chen. Metascale: Test-time scaling with evolving meta-thoughts. arXiv preprint arXiv:2503.13447, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 66 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 66 + }, + { + "type": "page_number", + "text": "67", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 66 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[486] Runze Liu, Junqi Gao, Jian Zhao, Kaiyan Zhang, Xiu Li, Biqing Qi, Wanli Ouyang, and Bowen Zhou. Can 1b llm surpass 405b llm? rethinking compute-optimal test-time scaling. arXiv preprint arXiv:2502.06703, 2025.", + "[487] Tengxuan Liu, Shiyao Li, Jiayi Yang, Tianchen Zhao, Feng Zhou, Xiaohui Song, Guohao Dai, Shengen Yan, Huazhong Yang, and Yu Wang. Pm-kvq: Progressive mixed-precision kv cache quantization for long-cot llms. arXiv preprint arXiv:2505.18610, 2025.", + "[488] Wanlong Liu, Junxiao Xu, Fei Yu, Yukang Lin, Ke Ji, Wenyu Chen, Yan Xu, Yasheng Wang, Lifeng Shang, and Benyou Wang. Qfft, question-free fine-tuning for adaptive reasoning. arXiv preprint arXiv:2506.12860, 2025.", + "[489] Wei Liu, Junlong Li, Xiwen Zhang, Fan Zhou, Yu Cheng, and Junxian He. Diving into self-evolving training for multimodal reasoning. arXiv preprint arXiv:2412.17451, 2024.", + "[490] Wei Liu, Ruochen Zhou, Yiyun Deng, Yuzhen Huang, Junteng Liu, Yuntian Deng, Yizhe Zhang, and Junxian He. Learn to reason efficiently with adaptive length-based reward shaping. arXiv preprint arXiv:2505.15612, 2025.", + "[491] Ye Liu, Kevin Qinghong Lin, Chang Wen Chen, and Mike Zheng Shou. Videomind: A chain-of-lora agent for long video reasoning. arXiv preprint arXiv:2503.13444, 2025.", + "[492] Yongjiang Liu, Haoxi Li, Xiaosong Ma, Jie Zhang, and Song Guo. Think how to think: Mitigating overthinking with autonomous difficulty cognition in large reasoning models. arXiv preprint arXiv:2507.02663, 2025.", + "[493] Yue Liu, Hongcheng Gao, Shengfang Zhai, Jun Xia, Tianyi Wu, Zhiwei Xue, Yulin Chen, Kenji Kawaguchi, Jiaheng Zhang, and Bryan Hooi. Guardreasoner: Towards reasoning-based llm safeguards. arXiv preprint arXiv:2501.18492, 2025.", + "[494] Yue Liu, Jiaying Wu, Yufei He, Hongcheng Gao, Hongyu Chen, Baolong Bi, Ruihan Gong, Jiaheng Zhang, Zhiqi Huang, and Bryan Hooi. Efficient inference for large reasoning models: A survey. arXiv preprint arXiv:2503.23077, 2025.", + "[495] Yuliang Liu, Junjie Lu, Zhaoling Chen, Chaofeng Qu, Jason Klein Liu, Chonghan Liu, Zefan Cai, Yunhui Xia, Li Zhao, Jiang Bian, et al. Adaptivestep: Automatically dividing reasoning step through model confidence. arXiv preprint arXiv:2502.13943, 2025.", + "[496] Zhaowei Liu, Xin Guo, Fangqi Lou, Lingfeng Zeng, Jinyi Niu, Zixuan Wang, Jiajie Xu, Weige Cai, Ziwei Yang, Xueqian Zhao, et al. Fin-r1: A large language model for financial reasoning through reinforcement learning. arXiv preprint arXiv:2503.16252, 2025.", + "[497] Zhiyuan Liu, Yuting Zhang, Feng Liu, Changwang Zhang, Ying Sun, and Jun Wang. Othinkmr1: Stimulating multimodal generalized reasoning capabilities through dynamic reinforcement learning. arXiv preprint arXiv:2503.16081, 2025.", + "[498] Zichen Liu, Changyu Chen, Wenjun Li, Tianyu Pang, Chao Du, and Min Lin. There may not be aha moment in r1-zero-like training — a pilot study. https://oatllm.notion.site/oat-zero, 2025. Notion Blog.", + "[499] Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783, 2025.", + "[500] Zihan Liu, Yang Chen, Mohammad Shoeybi, Bryan Catanzaro, and Wei Ping. Acemath: Advancing frontier math reasoning with post-training and reward modeling. arXiv preprint arXiv:2412.15084, 2024.", + "[501] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025.", + "[502] Elita Lobo, Chirag Agarwal, and Himabindu Lakkaraju. On the impact of fine-tuning on chain-of-thought reasoning. arXiv preprint arXiv:2411.15382, 2024.", + "[503] Chenwei Lou, Zewei Sun, Xinnian Liang, Meng Qu, Wei Shen, Wenqi Wang, Yuntao Li, Qingping Yang, and Shuangzhi Wu. Adacot: Pareto-optimal adaptive chain-of-thought triggering via reinforcement learning. arXiv preprint arXiv:2505.11896, 2025.", + "[504] Dakuan Lu, Xiaoyu Tan, Rui Xu, Tianchu Yao, Chao Qu, Wei Chu, Yinghui Xu, and Yuan Qi. Scp-116k: A high-quality problem-solution dataset and a generalized pipeline for automated extraction in the higher education science domain, 2025." + ], + "bbox": [ + 181, + 90, + 828, + 911 + ], + "page_idx": 67 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 67 + }, + { + "type": "page_number", + "text": "68", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 67 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[505] Haolang Lu, Yilian Liu, Jingxin Xu, Guoshun Nan, Yuanlong Yu, Zhican Chen, and Kun Wang. Auditing meta-cognitive hallucinations in reasoning large language models. arXiv preprint arXiv:2505.13143, 2025.", + "[506] Jianqiao Lu, Zhiyang Dou, Hongru WANG, Zeyu Cao, Jianbo Dai, Yunlong Feng, and Zhijiang Guo. Autopsy: Automated process-supervised verifier. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 79935-79962. Curran Associates, Inc., December 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/9246aa822579d9b29a140ecdac36ad60-Paper-Conference.pdf.", + "[507] Pan Lu, Swaroop Mishra, Tony Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho, editors, Advances in Neural Information Processing Systems, November 2022. URL https://openreview.net/forum?id=HjwK-Tc_Bc.", + "[508] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=KUNzEQMWU7.", + "[509] Pan Lu, Bowen Chen, Sheng Liu, Rahul Thapa, Joseph Boen, and James Zou. Octo tools: An agentic framework with extensible tools for complex reasoning. arXiv preprint arXiv:2502.11271, 2025.", + "[510] Rubing Lu, João Sedoc, and Arun Sundararajan. Reasoning and the trusting behavior of deepseek and gpt: An experiment revealing hidden fault lines in large language models. arXiv preprint arXiv:2502.12825, 2025.", + "[511] Wenquan Lu, Yuechuan Yang, Kyle Lee, Yanshu Li, and Enqi Liu. Latent chain-of-thought? decoding the depth-recurrent transformer. arXiv preprint arXiv:2507.02199, 2025.", + "[512] Zhengxi Lu, Yuxiang Chai, Yaxuan Guo, Xi Yin, Liang Liu, Hao Wang, Guanjing Xiong, and Hongsheng Li. Ui-r1: Enhancing action prediction of gui agents by reinforcement learning. arXiv preprint arXiv:2503.21620, 2025.", + "[513] Zimu Lu, Aojun Zhou, Houxing Ren, Ke Wang, Weikang Shi, Junting Pan, Mingjie Zhan, and Hongsheng Li. Mathgenie: Generating synthetic data with question back-translation for enhancing mathematical reasoning of llms. arXiv preprint arXiv:2402.16352, 2024.", + "[514] Haipeng Luo, Qingfeng Sun, Can Xu, Pu Zhao, Jianguang Lou, Chongyang Tao, Xiubo Geng, Qingwei Lin, Shifeng Chen, and Dongmei Zhang. Wizardmath: Empowering mathematical reasoning for large language models via reinforced evol-instruct. arXiv preprint arXiv:2308.09583, 2023.", + "[515] Hanjun Luo, Shenyu Dai, Chiming Ni, Xinfeng Li, Guibin Zhang, Kun Wang, Tongliang Liu, and Hanan Salam. Agent auditor: Human-level safety and security evaluation for lIm agents. arXiv preprint arXiv:2506.00641, 2025.", + "[516] Haotian Luo, Li Shen, Haiying He, Yibo Wang, Shiwei Liu, Wei Li, Naiqiang Tan, Xiaochun Cao, and Dacheng Tao. O1-pruner: Length-harmonizing fine-tuning for o1-like reasoning pruning. arXiv preprint arXiv:2501.12570, 2025.", + "[517] Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, et al. Improve mathematical reasoning in language models by automated process supervision. arXiv preprint arXiv:2406.06592, 2024.", + "[518] Michael Luo, Sijun Tan, Justin Wong, Xiaoxiang Shi, William Y. Tang, Manan Roongta, Colin Cai, Jeffrey Luo, Tianjun Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. Deepscaler: Surpassing o1-preview with a 1.5b model by scaling rl, February 2025. URL https://github.com/agentica-project/rllm. Notion Blog.", + "[519] Ruilin Luo, Zhuofan Zheng, Yifan Wang, Yiyao Yu, Xinzhe Ni, Zicheng Lin, Jin Zeng, and Yujiu Yang. Ursa: Understanding and verifying chain-of-thought reasoning in multimodal mathematics. arXiv preprint arXiv:2501.04686, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 68 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 68 + }, + { + "type": "page_number", + "text": "69", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 68 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[520] Xianzhen Luo, Qingfu Zhu, Zhiming Zhang, Libo Qin, Xuanyu Zhang, Qing Yang, Dongliang Xu, and Wanxiang Che. Python is not always the best choice: Embracing multilingual program of thoughts. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 7185-7212, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.408. URL https://aclanthology.org/2024.emnlp-main.408/.", + "[521] Yijia Luo, Yulin Song, Xingyao Zhang, Jiaheng Liu, Weixun Wang, GengRu Chen, Wenbo Su, and Bo Zheng. Deconstructing long chain-of-thought: A structured reasoning optimization framework for long cot distillation. arXiv preprint arXiv:2503.16385, 2025.", + "[522] Chengqi Lyu, Songyang Gao, Yuzhe Gu, Wenwei Zhang, Jianfei Gao, Kuikun Liu, Ziyi Wang, Shuaibin Li, Qian Zhao, Haian Huang, et al. Exploring the limit of outcome reward for learning mathematical reasoning. arXiv preprint arXiv:2502.06781, 2025.", + "[523] Qing Lyu, Shreya Havaldar, Adam Stein, Li Zhang, Delip Rao, Eric Wong, Marianna Apidianaki, and Chris Callison-Burch. Faithful chain-of-thought reasoning. In Jong C. Park, Yuki Arase, Baotian Hu, Wei Lu, Derry Wijaya, Ayu Purwarianti, and Adila Alfa Krisnadhi, editors, Proceedings of the 13th International Joint Conference on Natural Language Processing and the 3rd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pages 305-329, Nusa Dua, Bali, November 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.ijcnlp-main.20. URL https://aclanthology.org/2023.ijcnlp-main.20/.", + "[524] Alexander Lyzhov, Yuliya Molchanova, Armenii Ashukha, Dmitry Molchanov, and Dmitry Vetrov. Greedy policy search: A simple baseline for learnable test-time augmentation. In Jonas Peters and David Sontag, editors, Proceedings of the 36th Conference on Uncertainty in Artificial Intelligence (UAI), volume 124 of Proceedings of Machine Learning Research, pages 1308-1317. PMLR, 03-06 Aug 2020. URL https://proceedings.mlr.press/v124/lyzhov20a.html.", + "[525] Jingyuan Ma, Rui Li, Zheng Li, Junfeng Liu, Lei Sha, and Zhifang Sui. Hauntattack: When attack follows reasoning as a shadow. arXiv preprint arXiv:2506.07031, 2025.", + "[526] Lu Ma, Hao Liang, Meiyi Qiang, Lexiang Tang, Xiaochen Ma, Zhen Hao Wong, Junbo Niu, Chengyu Shen, Running He, Bin Cui, et al. Learning what reinforcement learning can't: Interleaved online fine-tuning for hardest questions. arXiv preprint arXiv:2506.07527, 2025.", + "[527] Nanye Ma, Shangyuan Tong, Haolin Jia, Hexiang Hu, Yu-Chuan Su, Mingda Zhang, Xuan Yang, Yandong Li, Tommi Jaakkola, Xuhui Jia, et al. Inference-time scaling for diffusion models beyond scaling denoising steps. arXiv preprint arXiv:2501.09732, 2025.", + "[528] Qianli Ma, Haotian Zhou, Tingkai Liu, Jianbo Yuan, Pengfei Liu, Yang You, and Hongxia Yang. Let's reward step by step: Step-level reward model as the navigators for reasoning. arXiv preprint arXiv:2310.10080, 2023.", + "[529] Ruotian Ma, Peisong Wang, Cheng Liu, Xingyan Liu, Jiaqi Chen, Bang Zhang, Xin Zhou, Nan Du, and Jia Li. $S^2 r$ : Teaching llms to self-verify and self-correct via reinforcement learning. arXiv preprint arXiv:2502.12853, 2025.", + "[530] Xinyin Ma, Guangnian Wan, Runpeng Yu, Gongfan Fang, and Xinchao Wang. Cot-valve: Length-compressible chain-of-thought tuning. arXiv preprint arXiv:2502.09601, 2025.", + "[531] Xueguang Ma, Qian Liu, Dongfu Jiang, Ge Zhang, Zejun Ma, and Wenhu Chen. Generalreasoner: Advancing llm reasoning across all domains. arXiv preprint arXiv:2505.14652, 2025.", + "[532] Xuetao Ma, Wenbin Jiang, and Hua Huang. Problem-solving logic guided curriculum in-context learning for llms complex reasoning. arXiv preprint arXiv:2502.15401, 2025.", + "[533] Yan Ma, Steffi Chern, Xuyang Shen, Yiran Zhong, and Pengfei Liu. Rethinking rl scaling for vision language models: A transparent, from-scratch framework and comprehensive evaluation scheme. arXiv preprint arXiv:2504.02587, 2025.", + "[534] Yiran Ma, Zui Chen, Tianqiao Liu, Mi Tian, Zhuo Liu, Zitao Liu, and Weiqi Luo. What are step-level reward models rewarding? counterintuitive findings from mcts-boosted mathematical reasoning. arXiv preprint arXiv:2412.15904, 2024." + ], + "bbox": [ + 181, + 90, + 828, + 912 + ], + "page_idx": 69 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 69 + }, + { + "type": "page_number", + "text": "70", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 69 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[535] Zexiong Ma, Chao Peng, Pengfei Gao, Xiangxin Meng, Yanzhen Zou, and Bing Xie. Sortf: Issue resolving with subtask-oriented reinforced fine-tuning. arXiv preprint arXiv:2502.20127, 2025.", + "[536] Zeyao Ma, Xiaokang Zhang, Jing Zhang, Jifan Yu, Sijia Luo, and Jie Tang. Dynamic scaling of unit tests for code reward modeling. arXiv preprint arXiv:2501.01054, 2025.", + "[537] Ziyang Ma, Zhuo Chen, Yuping Wang, Eng Siong Chng, and Xie Chen. Audio-cot: Exploring chain-of-thought reasoning in large audio language model. arXiv preprint arXiv:2501.07246, 2025.", + "[538] Aman Madaan, Katherine Hermann, and Amir Yazdanbakhsh. What makes chain-of-thought prompting effective? a counterfactual study. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 1448-1535, Singapore, December 2023. URL https://aclanthology.org/2023.findings-emnlp.101.pdf.", + "[539] Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, Shashank Gupta, Bodhisattwa Prasad Majumder, Katherine Hermann, Sean Welleck, Amir Yazdanbakhsh, and Peter Clark. Self-refine: Iterative refinement with self-feedback. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 46534-46594. Curran Associates, Inc., March 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/91edff07232fb1b55a505a9e9f6c0ff3-Paper-Conference.pdf.", + "[540] Sathwik Tejaswi Madhusudhan, Shruthan Radhakrishna, Jash Mehta, and Toby Liang. Millions scale dataset distilled from r1-32b. https://huggingface.co/datasets/ServiceNow-AI/R1-Distill-SFT, February 2025.", + "[541] Sadegh Mahdavi, Muchen Li, Kaiwen Liu, Christos Thrampoulidis, Leonid Sigal, and Renjie Liao. Leveraging online olympiad-level math problems for llms training and contamination-resistant evaluation. arXiv preprint arXiv:2501.14275, 2025.", + "[542] Tobias Materzok. Cos (m+ o) s: Curiosity and rl-enhanced mcts for exploring story space via language models. arXiv preprint arXiv:2501.17104, 2025.", + "[543] Justus Mattern, Sami Jaghourar, Manveer Basra, Jannik Straube, Matthew Di Ferrante, Felix Gabriel, Jack Min Ong, Vincent Weisser, and Johannes Hagemann. Synthetic-1: Two million collaboratively generated reasoning traces from deepseek-r1, 2025. URL https://www.primeintellect.ai/blog/synthetic-1-release.", + "[544] Nat McAleese, Rai Michael Pokorny, Juan Felipe Ceron Uribe, Evgenia Nitishinskaya, Maja Trebacz, and Jan Leike. Llm critics help catch llm bugs. arXiv preprint arXiv:2407.00215, 2024.", + "[545] R Thomas McCoy, Shunyu Yao, Dan Friedman, Mathew D Hardy, and Thomas L Grifths. When a language model is optimized for reasoning, does it still show embers of autoregression? an analysis of openai o1. arXiv preprint arXiv:2410.01792, 2024.", + "[546] Lingrui Mei, Jiayu Yao, Yuyao Ge, Yiwei Wang, Baolong Bi, Yujun Cai, Jiazhi Liu, Mingyu Li, Zhong-Zhi Li, Duzhen Zhang, Chenlin Zhou, Jiayi Mao, Tianze Xia, Jiafeng Guo, and Shenghua Liu. A survey of context engineering for large language models. arXiv preprint arXiv:2507.13334, 2025.", + "[547] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, Ping Luo, Yu Qiao, Qiaosheng Zhang, and Wenqi Shao. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025.", + "[548] William Merrill and Ashish Sabharwal. The expressive power of transformers with chain of thought. In *The Twelfth International Conference on Learning Representations*, January 2023. URL https://openreview.net/pdf?id=CDmerQ37Zs.", + "[549] Ning Miao, Yee Whye Teh, and Tom Rainforth. Selfcheck: Using LLMs to zero-shot check their own step-by-step reasoning. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id= pTHfApDakA." + ], + "bbox": [ + 181, + 90, + 828, + 911 + ], + "page_idx": 70 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 70 + }, + { + "type": "page_number", + "text": "71", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 70 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[550] Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, et al. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems. arXiv preprint arXiv:2412.09413, 2024.", + "[551] Seyed Iman Mirzadeh, Keivan Alizadeh, Hooman Shahrokhi, Oncel Tuzel, Samy Bengio, and Mehrdad Farajtabar. GSM-symbolic: Understanding the limitations of mathematical reasoning in large language models. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=AjXkRZIvjb.", + "[552] Prakamya Mishra, Jiang Liu, Jialian Wu, Xiaodong Yu, Zicheng Liu, and Emad Barsoum. Tttbench: A benchmark for evaluating reasoning ability with simple and novel tic-tac-toe-style games. arXiv preprint arXiv:2506.10209, 2025.", + "[553] Arindam Mitra, Hamed Khanpour, Corby Rosset, and Ahmed Awadallah. Orca-math: Unlocking the potential of slms in grade school math. arXiv preprint arXiv:2402.14830, 2024.", + "[554] Chancharik Mitra, Brandon Huang, Trevor Darrell, and Roei Herzig. Compositional chain-of-thought prompting for large multimodal models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14420-14431, 2024.", + "[555] Purbesh Mitra and Sennur Ulukus. Motif: Modular thinking via reinforcement fine-tuning in llms. arXiv preprint arXiv:2507.02851, 2025.", + "[556] Shentong Mo and Miao Xin. Tree of uncertain thoughts reasoning for large language models. In ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 12742-12746, April 2024. doi: 10.1109/ICASSP48485.2024.10448355. URL https://ieeexplore.ieee.org/document/10448355.", + "[557] Philipp Mondorf and Barbara Plank. Beyond accuracy: Evaluating the reasoning behavior of large language models—a survey. arXiv preprint arXiv:2404.01869, 2024.", + "[558] Terufumi Morishita, Gaku Morio, Atsuki Yamaguchi, and Yasuhiro Sogawa. Enhancing reasoning capabilities of llms via principled synthetic logic corpus. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 73572-73604. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/8678da90126aa58326b2fc0254b33a8c-Paper-Conference.pdf.", + "[559] Yongyu Mu, Jiali Zeng, Bei Li, Xinyan Guan, Fandong Meng, Jie Zhou, Tong Xiao, and Jingbo Zhu. Dissecting long reasoning models: An empirical study. arXiv preprint arXiv:2506.04913, 2025.", + "[560] Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025.", + "[561] Tergel Munkhbat, Namgyu Ho, Seohyun Kim, Yongjin Yang, Yujin Kim, and Se-Young Yun. Self-training elicits concise reasoning in large language models. arXiv preprint arXiv:2502.20122, 2025.", + "[562] Vaskar Nath, Pranav Raja, Claire Yoon, and Sean Hendryx. Toolcomp: A multi-tool reasoning & process supervision benchmark. arXiv preprint arXiv:2501.01290, 2025.", + "[563] Sania Nayab, Giulio Rossolini, Marco Simoni, Andrea Saracino, Giorgio Buttazzo, Nicola Maria Manes, and Fabrizio Giacomelli. Concise thoughts: Impact of output length on llm reasoning and cost. arXiv preprint arXiv:2407.19825, 2024.", + "[564] Ansong Ni, Srini Iyer, Dragomir Radev, Veselin Stoyanov, Wen-Tau Yih, Sida Wang, and Xi Victoria Lin. LEVER: Learning to verify language-to-code generation with execution. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett, editors, Proceedings of the 40th International Conference on Machine Learning, volume 202 of Proceedings of Machine Learning Research, pages 26106-26128. PMLR, 23-29 Jul 2023. URL https://proceedings.mlr.press/v202/ni23b.html." + ], + "bbox": [ + 181, + 90, + 825, + 910 + ], + "page_idx": 71 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 71 + }, + { + "type": "page_number", + "text": "72", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 71 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[565] Ziyi Ni, Yifan Li, Ning Yang, Dou Shen, Pin Lv, and Daxiang Dong. Tree-of-code: A tree-structured exploring framework for end-to-end code generation and execution in complex task handling. arXiv preprint arXiv:2412.15305, 2024.", + "[566] Allen Nie, Yi Su, Bo Chang, Jonathan N Lee, Ed H Chi, Quoc V Le, and Minmin Chen. Evolve: Evaluating and optimizing llms for exploration. arXiv preprint arXiv:2410.06238, 2024.", + "[567] Yansong Ning, Wei Li, Jun Fang, Naiqiang Tan, and Hao Liu. Not all thoughts are generated equal: Efficient lIm reasoning via multi-turn reinforcement learning. arXiv preprint arXiv:2505.11827, 2025.", + "[568] Harsha Nori, Naoto Usuyama, Nicholas King, Scott Mayer McKinney, Xavier Fernandes, Sheng Zhang, and Eric Horvitz. From medprompt to o1: Exploration of run-time strategies for medical challenge problems and beyond. arXiv preprint arXiv:2411.03590, 2024.", + "[569] Maxwell Nye, Anders Johan Andreassen, Guy Gur-Ari, Henryk Michalewski, Jacob Austin, David Bieber, David Dohan, Aitor Lewkowycz, Maarten Bosma, David Luan, Charles Sutton, and Augustus Odena. Show your work: Scratchpads for intermediate computation with language models. In Deep Learning for Code Workshop, March 2022. URL https://openreview.net/forum?id=HB1x2idbkbq.", + "[570] Skywork o1 Team. Skywork-o1 open series. https://huggingface.co/Skywork, November 2024.", + "[571] OpenCompass. Aime 2025. https://huggingface.co/datasets/opencompass/AIME2025, February 2025.", + "[572] Yixin Ou, Yunzhi Yao, Ningyu Zhang, Hui Jin, Jiacheng Sun, Shumin Deng, Zhenguo Li, and Huajun Chen. How do llms acquire new knowledge? a knowledge circuits perspective on continual pre-training. arXiv preprint arXiv:2502.11196, 2025.", + "[573] Alexander Pan, Kush Bhatia, and Jacob Steinhardt. The effects of reward misspecification: Mapping and mitigating misaligned models. arXiv preprint arXiv:2201.03544, 2022.", + "[574] Jiabao Pan, Yan Zhang, Chen Zhang, Zuozhu Liu, Hongwei Wang, and Haizhou Li. DynaThink: Fast or slow? a dynamic decision-making framework for large language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 14686-14695, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.814. URL https://aclanthology.org/2024.emnlp-main.814/.", + "[575] Jianfeng Pan, Senyou Deng, and Shaomang Huang. Coat: Chain-of-associated-thoughts framework for enhancing large language models reasoning. arXiv preprint arXiv:2502.02390, 2025.", + "[576] Jiayi Pan, Junjie Zhang, Xingyao Wang, Lifan Yuan, Hao Peng, and Alane Suhr. Tinyzero. https://github.com/Jiayi-Pan/TinyZero, 2025. Accessed: 2025-01-24.", + "[577] Jiazhen Pan, Che Liu, Junde Wu, Fenglin Liu, Jiayuan Zhu, Hongwei Bran Li, Chen Chen, Cheng Ouyang, and Daniel Rueckert. Medvlm-r1: Incentivizing medical reasoning capability of vision-language models (vlms) via reinforcement learning. arXiv preprint arXiv:2502.19634, 2025.", + "[578] Liangming Pan, Michael Saxon, Wenda Xu, Deepak Nathani, Xinyi Wang, and William Yang Wang. Automatically correcting large language models: Surveying the landscape of diverse self-correction strategies. arXiv preprint arXiv:2308.03188, 2023.", + "[579] Wenbo Pan, Zhichao Liu, Qiguang Chen, Xiangyang Zhou, Haining Yu, and Xiaohua Jia. The hidden dimensions of llm alignment: A multi-dimensional safety analysis. arXiv preprint arXiv:2502.09674, 2025.", + "[580] Zhihong Pan, Kai Zhang, Yuze Zhao, and Yupeng Han. Route to reason: Adaptive routing for lIm and reasoning strategy selection. arXiv preprint arXiv:2505.19435, 2025.", + "[581] Bo Pang, Hanze Dong, Jiacheng Xu, Silvio Savarese, Yingbo Zhou, and Caiming Xiong. Bolt: Bootstrap long chain-of-thought in language models without distillation. arXiv preprint arXiv:2502.03860, 2025." + ], + "bbox": [ + 181, + 90, + 828, + 910 + ], + "page_idx": 72 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 72 + }, + { + "type": "page_number", + "text": "73", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 72 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[582] Richard Yuanzhe Pang, Weizhe Yuan, He He, Kyunghyun Cho, Sainbayar Sukhbaatar, and Jason Weston. Iterative reasoning preference optimization. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 116617-116637. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/d37c9ad425fe5b65304d500c6edcba00-Paper-Conference.pdf.", + "[583] Shubham Parashar, Blake Olson, Sambhav Khurana, Eric Li, Hongyi Ling, James Caverlee, and Shuiwang Ji. Inference-time computations for llm reasoning and planning: A benchmark and insights. arXiv preprint arXiv:2502.12521, 2025.", + "[584] Chanwoo Park, Seungju Han, Xingzhi Guo, Asuman Ozdaglar, Kaiqing Zhang, and Joo-Kyung Kim. Maporl: Multi-agent post-co-training for collaborative large language models with reinforcement learning. arXiv preprint arXiv:2502.18439, 2025.", + "[585] Junsoo Park, Seungyeon Jwa, Meiying Ren, Daeyoung Kim, and Sanghyuk Choi. Offsetbias: Leveraging debiased data for tuning evaluators, 2024.", + "[586] Sungjin Park, Xiao Liu, Yeyun Gong, and Edward Choi. Ensembling large language models with process reward-guided tree search for better complex reasoning. arXiv preprint arXiv:2412.15797, 2024.", + "[587] Manojkumar Parmar and Yuvaraj Govindarajulu. Challenges in ensuring ai safety in deepseek-r1 models: The shortcomings of reinforcement learning strategies. arXiv preprint arXiv:2501.17030, 2025.", + "[588] Avinash Patil. Advancing reasoning in large language models: Promising methods and approaches. arXiv preprint arXiv:2502.03671, 2025.", + "[589] Avinash Patil and Amardeep Kour Gedhu. Cognitive-mental-llm: Leveraging reasoning in large language models for mental health prediction via online text. arXiv preprint arXiv:2503.10095, 2025.", + "[590] Debjit Paul, Mete Ismayilzada, Maxime Peyrard, Beatrix Borges, Antoine Bosselut, Robert West, and Boi Faltings. REFINER: Reasoning feedback on intermediate representations. In Yvette Graham and Matthew Purver, editors, Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1100–1126, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.67/.", + "[591] Patomporn Payoungkhamdee, Pume Tuchinda, Jinheon Baek, Samuel Cahyawijaya, Can Udomcharoenchaikit, Potsawee Manakul, Peerat Limkonchotiwat, Ekapol Chuangsuwanich, and Sarana Nutanong. Towards better understanding of program-of-thought reasoning in cross-lingual and multilingual environments. arXiv preprint arXiv:2502.17956, 2025.", + "[592] Chunyi Peng, Zhipeng Xu, Zhenghao Liu, Yishan Li, Yukun Yan, Shuo Wang, Zhiyuan Liu, Yu Gu, Minghe Yu, Ge Yu, et al. Learning to route queries across knowledge bases for step-wise retrieval-augmented reasoning. arXiv preprint arXiv:2505.22095, 2025.", + "[593] Dengyun Peng, Yuhang Zhou, Qiguang Chen, Jinhao Liu, Jingjing Chen, and Libo Qin. Dlpo: Towards a robust, efficient, and generalizable prompt optimization framework from a deep-learning perspective. arXiv preprint arXiv:2503.13413, 2025.", + "[594] Hao Peng, Yunjia Qi, Xiaozhi Wang, Zijun Yao, Bin Xu, Lei Hou, and Juanzi Li. Agentic reward modeling: Integrating human preferences with verifiable correctness signals for reliable reward systems. arXiv preprint arXiv:2502.19328, 2025.", + "[595] Keqin Peng, Liang Ding, Yuanxin Ouyang, Meng Fang, and Dacheng Tao. Revisiting overthinking in long chain-of-thought from the perspective of self-doubt. arXiv preprint arXiv:2505.23480, 2025.", + "[596] Miao Peng, Nuo Chen, Zongrui Suo, and Jia Li. Rewarding graph reasoning process makes llms more generalized reasoners. arXiv preprint arXiv:2503.00845, 2025.", + "[597] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b lmms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 911 + ], + "page_idx": 73 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 73 + }, + { + "type": "page_number", + "text": "74", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 73 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[598] Ivo Petrov, Jasper Dekoninck, Lyuben Baltadzhiev, Maria Drencheva, Kristian Minchev, Mislav Balunovic, Nikola Jovanovic, and Martin Vechev. Proof or bluff? evaluating llms on 2025 usa math olympiad. arXiv preprint arXiv:2503.21934, 2025.", + "[599] Rolf Pfister and Hansueli Jud. Understanding and benchmarking artificial intelligence: Openai's o3 is not agi. arXiv preprint arXiv:2501.07458, 2025.", + "[600] Quang Hieu Pham, Thuy Duong Nguyen, Tung Pham, Anh Tuan Luu, and Dat Quoc Nguyen. Clozemath: Improving mathematical reasoning in language models by learning to fill equations. arXiv preprint arXiv:2506.03763, 2025.", + "[601] Thinh Pham, Nguyen Nguyen, Pratibha Zunjare, Weiyuan Chen, Yu-Min Tseng, and Tu Vu. Sealqa: Raising the bar for reasoning in search-augmented language models. arXiv preprint arXiv:2506.01062, 2025.", + "[602] Long Phan, Alice Gatti, Ziwen Han, Nathaniel Li, Josephina Hu, Hugh Zhang, Sean Shi, Michael Choi, Anish Agrawal, Arnav Chopra, et al. Humanity's last exam. arXiv preprint arXiv:2501.14249, 2025.", + "[603] Aske Plaat, Annie Wong, Suzan Verberne, Joost Broekens, Niki van Stein, and Thomas Back. Reasoning with large language models, a survey. arXiv preprint arXiv:2407.11511, 2024.", + "[604] Gabriel Poesia, Kanishk Gandhi, Eric Zelikman, and Noah Goodman. Certified deductive reasoning with language models. Transactions on Machine Learning Research, May 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=yXnwrS2T16.", + "[605] Stanislas Polu and Ilya Sutskever. Generative language modeling for automated theorem proving. arXiv preprint arXiv:2009.03393, 2020.", + "[606] Archiki Prasad, Swarnadeep Saha, Xiang Zhou, and Mohit Bansal. ReCEval: Evaluating reasoning chains via correctness and informativeness. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 10066-10086, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.622. URL https://aclanthology.org/2023.emnlp-main.622/.", + "[607] Archiki Prasad, Alexander Koller, Mareike Hartmann, Peter Clark, Ashish Sabharwal, Mohit Bansal, and Tushar Khot. ADaPT: As-needed decomposition and planning with language models. In Kevin Duh, Helena Gomez, and Steven Bethard, editors, Findings of the Association for Computational Linguistics: NAACL 2024, pages 4226-4252, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-naacl.264. URL https://aclanthology.org/2024-findings-naacl.264/.", + "[608] Tidor-Vlad Pricope. Hardml: A benchmark for evaluating data science and machine learning knowledge and reasoning in ai. arXiv preprint arXiv:2501.15627, 2025.", + "[609] Ben Prystawski, Michael Li, and Noah Goodman. Why think step by step? reasoning emerges from the locality of experience. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 70926-70947. Curran Associates, Inc., September 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/e0af79ad53a336b4c4b4f7e2a68eb609-Paper-Conference.pdf.", + "[610] Israel Puerta-Merino, Carlos Núñez-Molina, Pablo Mesejo, and Juan Fernández-Olivares. A roadmap to guide the integration of llms in hierarchical planning. arXiv preprint arXiv:2501.08068, 2025.", + "[611] Haritz Puerto, Tilek Chubakov, Xiaodan Zhu, Harish Tayyar Madabushi, and Iryna Gurevych. Fine-tuning with divergent chains of thought boosts reasoning through self-correction in language models. arXiv preprint arXiv:2407.03181, 2024.", + "[612] Isha Puri, Shivchander Sudalairaj, Guangxuan Xu, Kai Xu, and Akash Srivastava. A probabilistic inference approach to inference-time scaling of llms using particle-based monte carlo methods. arXiv preprint arXiv:2502.01618, 2025.", + "[613] Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv preprint arXiv:2408.07199, 2024." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 74 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 74 + }, + { + "type": "page_number", + "text": "75", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 74 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[614] Penghui Qi, Zichen Liu, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Optimizing anytime reasoning via budget relative policy optimization. arXiv preprint arXiv:2505.13438, 2025.", + "[615] Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual reasoning makes smaller llms stronger problem-solvers. arXiv preprint arXiv:2408.06195, 2024.", + "[616] Hongjin Qian and Zheng Liu. Scent of knowledge: Optimizing search-enhanced reasoning with information foraging. arXiv preprint arXiv:2505.09316, 2025.", + "[617] Libo Qin, Qiguang Chen, Fuxuan Wei, Shijue Huang, and Wanxiang Che. Cross-lingual prompting: Improving zero-shot chain-of-thought reasoning across languages. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 2695–2709, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.163. URL https://aclanthology.org/2023.emnlp-main.163/.", + "[618] Libo Qin, Qiguang Chen, Hao Fei, Zhi Chen, Min Li, and Wanxiang Che. What factors affect multi-modal in-context learning? an in-depth exploration. arXiv preprint arXiv:2410.20482, 2024.", + "[619] Libo Qin, Qiguang Chen, Xiachong Feng, Yang Wu, Yongheng Zhang, Yinghui Li, Min Li, Wanxiang Che, and Philip S Yu. Large language models meet nlp: A survey. arXiv preprint arXiv:2405.12819, 2024.", + "[620] Libo Qin, Qiguang Chen, Yuhang Zhou, Zhi Chen, Yinghui Li, Lizi Liao, Min Li, Wanxiang Che, and Philip S Yu. Multilingual large language model: A survey of resources, taxonomy and frontiers. arXiv preprint arXiv:2404.04925, 2024.", + "[621] Libo Qin, Qiguang Chen, Jingxuan Zhou, Jin Wang, Hao Fei, Wanxiang Che, and Min Li. Divide-solve-combine: An interpretable and accurate prompting framework for zero-shot multi-intent detection. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 39, pages 25038-25046, 2025.", + "[622] Libo Qin, Qiguang Chen, Yuhang Zhou, Zhi Chen, Yinghui Li, Lizi Liao, Min Li, Wanxiang Che, and S Yu Philip. A survey of multilingual large language models. Patterns, 6(1), January 2025. URL https://www.cell.com/patterns/fulltext/S2666-3899(24)00290-3.", + "[623] Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, et al. O1 replication journey: A strategic progress report-part 1. arXiv preprint arXiv:2410.18982, 2024.", + "[624] Yulei Qin, Gang Li, Zongyi Li, Zihan Xu, Yuchen Shi, Zhekai Lin, Xiao Cui, Ke Li, and Xing Sun. Incentivizing reasoning for advanced instruction-following of large language models. arXiv preprint arXiv:2506.01413, 2025.", + "[625] Jiahao Qiu, Yifu Lu, Yifan Zeng, Jiacheng Guo, Jiayi Geng, Huazheng Wang, Kaixuan Huang, Yue Wu, and Mengdi Wang. Treebon: Enhancing inference-time alignment with speculative tree-search and best-of-n sampling. arXiv preprint arXiv:2410.16033, 2024.", + "[626] Xiaoye Qu, Yafu Li, Zhaochen Su, Weigao Sun, Jianhao Yan, Dongrui Liu, Ganqu Cui, Daizong Liu, Shuxian Liang, Junxian He, et al. A survey of efficient reasoning for large reasoning models: Language, multimodality, and beyond. arXiv preprint arXiv:2503.21614, 2025.", + "[627] Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive introspection: Teaching language model agents how to self-improve. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=DRC9pZwBwR.", + "[628] Yuxiao Qu, Matthew Y. R. Yang, Amrith Setlur, Lewis Tunstall, Edward Emanuel Beeching, Ruslan Salakhutdinov, and Aviral Kumar. Optimizing test-time compute via meta reinforcement finetuning. In Workshop on Reasoning and Planning for Large Language Models, March 2025. URL https://openreview.net/forum?id=WGz4ytjolh.", + "[629] Gollam Rabby, Farhana Keya, Parvez Zamil, and Soren Auer. Mc-nest-enhancing mathematical reasoning in large language models with a monte carlo nash equilibrium self-refine tree. arXiv preprint arXiv:2411.15645, 2024." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 75 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 75 + }, + { + "type": "page_number", + "text": "76", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 75 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[630] Santosh Kumar Radha and Oktay Goktas. On the reasoning capacity of ai models and how to quantify it. arXiv preprint arXiv:2501.13833, 2025.", + "[631] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023. URL https://openreview.net/pdf?id=HPuSIXJaa9.", + "[632] Daking Rai and Ziyu Yao. An investigation of neuron activation as a unified lens to explain chain-of-thought eliciting arithmetic reasoning of LLMs. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 7174–7193, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.387. URL https://aclanthology.org/2024.acl-long.387/.", + "[633] Leonardo Ranaldi, Giulia Pucci, Federico Ranaldi, Elena Sofia Ruzzetti, and Fabio Massimo Zanzotto. A tree-of-thoughts to broaden multi-step reasoning across languages. In Kevin Duh, Helena Gomez, and Steven Bethard, editors, Findings of the Association for Computational Linguistics: NAACL 2024, pages 1229-1241, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-naacl.78. URL https://aclanthology.org/2024 findings-naacl.78/.", + "[634] Leonardo Ranaldi, Marco Valentino, Alexander Polonsky, and André Freitas. Improving chain-of-thought reasoning via quasi-symbolic abstractions. arXiv preprint arXiv:2502.12616, 2025.", + "[635] Mohammad Raza and Natasha Milic-Frayling. Instantiation-based formalization of logical reasoning tasks using language models and logical solvers. arXiv preprint arXiv:2501.16961, 2025.", + "[636] Ali Razghandi, Seyed Mohammad Hadi Hosseini, and Mahdieh Soleymani Baghshah. Cer: Confidence enhanced reasoning in llms. arXiv preprint arXiv:2502.14634, 2025.", + "[637] David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. GPQA: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=Ti67584b98.", + "[638] Matthew Renze and Erhan Guven. Self-reflection in llm agents: Effects on problem-solving performance. arXiv preprint arXiv:2405.06682, 2024.", + "[639] Baptiste Roziere, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaqing Ellen Tan, Yossi Adi, Jingyu Liu, Romain Sauvestre, Tal Remez, et al. Code llama: Open foundation models for code. arXiv preprint arXiv:2308.12950, 2023.", + "[640] Yangjun Ruan, Neil Band, Chris J Maddison, and Tatsunori Hashimoto. Reasoning to learn from latent thoughts. arXiv preprint arXiv:2503.18866, 2025.", + "[641] Jon Saad-Falcon, Rajan Vivek, William Berrios, Nandita Shankar Naik, Matija Franklin, Bertie Vidgen, Amanpreet Singh, Douwe Kiela, and Shikib Mehri. Lmunit: Fine-grained evaluation with natural language unit tests. arXiv preprint arXiv:2412.13091, 2024.", + "[642] Nikta Gohari Sadr, Sangmitra Madhusudan, and Ali Emami. Think or step-by-step? unzipping the black box in zero-shot prompts. arXiv preprint arXiv:2502.03418, 2025.", + "[643] Swarnadeep Saha, Xian Li, Marjan Ghazvininejad, Jason Weston, and Tianlu Wang. Learning to plan & reason for evaluation with thinking-llm-as-a-judge. arXiv preprint arXiv:2501.18099, 2025.", + "[644] S Sauhandikaa, R Bhagavath Narethranath, and R Sathya Bama Krishna. Explainable ai in large language models: A review. In 2024 International Conference on Emerging Research in Computational Science (ICERCS), pages 1-6. IEEE, 2024. URL http://ieeexplore.ieee.org/abstract/document/10895578.", + "[645] William Saunders, Catherine Yeh, Jeff Wu, Steven Bills, Long Ouyang, Jonathan Ward, and Jan Leike. Self-critiquing models for assisting human evaluators. arXiv preprint arXiv:2206.05802, 2022." + ], + "bbox": [ + 181, + 90, + 825, + 911 + ], + "page_idx": 76 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 76 + }, + { + "type": "page_number", + "text": "77", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 76 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[646] Nikunj Saunshi, Nishanth Dikkala, Zhiyuan Li, Sanjiv Kumar, and Sashank J Reddi. Reasoning with latent thoughts: On the power of looped transformers. arXiv preprint arXiv:2502.17416, 2025.", + "[647] Mark Schöne, Babak Rahmani, Heiner Kremer, Fabian Falck, Hitesh Ballani, and Jannes Gladrow. Implicit language models are RNNs: Balancing parallelization and expressivity. In *Forty-second International Conference on Machine Learning*, May 2025. URL https://openreview.net/forum?id=5EbiopWH6e.", + "[648] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017.", + "[649] ByteDance Seed, Jiaze Chen, Tiantian Fan, Xin Liu, Lingjun Liu, Zhiqi Lin, Mingxuan Wang, Chengyi Wang, Xiangpeng Wei, Wenyuan Xu, et al. Seed1. 5-thinking: Advancing superb reasoning models with reinforcement learning. arXiv preprint arXiv:2504.13914, 2025.", + "[650] Amrith Setlur, Saurabh Garg, Xinyang Geng, Naman Garg, Virginia Smith, and Aviral Kumar. Rl on incorrect synthetic data scales the efficiency of lIm math reasoning by eight-fold. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 43000-43031. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/4b77d5b896c321a29277524a98a50215-Paper-Conference.pdf.", + "[651] Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, and Aviral Kumar. Rewarding progress: Scaling automated process verifiers for LLM reasoning. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=A6Y7Aq1zLW.", + "[652] Amrith Setlur, Nived Rajaraman, Sergey Levine, and Aviral Kumar. Scaling test-time compute without verification or r1 is suboptimal. arXiv preprint arXiv:2502.12118, 2025.", + "[653] Amrith Setlur, Matthew YR Yang, Charlie Snell, Jeremy Greer, Ian Wu, Virginia Smith, Max Simchowitz, and Aviral Kumar. e3: Learning to explore enables extrapolation of test-time compute for llms. arXiv preprint arXiv:2506.09026, 2025.", + "[654] Yu Shang, Yu Li, Fengli Xu, and Yong Li. Synergy-of-thoughts: Eliciting efficient reasoning in hybrid language models. arXiv preprint arXiv:2402.02563, 2024.", + "[655] Rulin Shao, Shuyue Stella Li, Rui Xin, Scott Geng, Yiping Wang, Sewoong Oh, Simon Shaolei Du, Nathan Lambert, Sewon Min, Ranjay Krishna, et al. Spurious rewards: Rethinking training signals in rlvr. arXiv preprint arXiv:2506.10947, 2025.", + "[656] Wenqi Shao, Qiaosheng Zhang, Lingxiao Du, Xiangyan Liu, and Fanqing Meng. R1-multimodal-journey. https://github.com/FanqingM/R1-Multimodal-Journey, February 2025.", + "[657] Zhihong Shao, Yeyun Gong, Yelong Shen, Minlie Huang, Nan Duan, and Weizhu Chen. Synthetic prompting: Generating chain-of-thought demonstrations for large language models. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett, editors, Proceedings of the 40th International Conference on Machine Learning, volume 202 of Proceedings of Machine Learning Research, pages 30706-30775. PMLR, 23-29 Jul 2023. URL https://proceedings.mlr.press/v202/shao23a.html.", + "[658] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024.", + "[659] Shuaijie She, Junxiao Liu, Yifeng Liu, Jiajun Chen, Xin Huang, and Shujian Huang. R-prm: Reasoning-driven process reward modeling. arXiv preprint arXiv:2503.21295, 2025.", + "[660] Haozhan Shen, Zilun Zhang, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. https://github.com/om-ai-lab/VLM-R1, February 2025.", + "[661] Maohao Shen, Guangtao Zeng, Zhenting Qi, Zhang-Wei Hong, Zhenfang Chen, Wei Lu, Gregory Wornell, Subhro Das, David Cox, and Chuang Gan. Satori: Reinforcement learning" + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 77 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 77 + }, + { + "type": "page_number", + "text": "78", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 77 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "with chain-of-action-thought enhances llm reasoning via autoregressive search. arXiv preprint arXiv:2502.02508, 2025.", + "[662] Xuan Shen, Yizhou Wang, Xiangxi Shi, Yanzhi Wang, Pu Zhao, and Jiuming Gu. Efficient reasoning with hidden thinking. arXiv preprint arXiv:2501.19201, 2025.", + "[663] Yi Shen, Jian Zhang, Jieyun Huang, Shuming Shi, Wenjing Zhang, Jiangze Yan, Ning Wang, Kai Wang, and Shiguo Lian. Dast: Difficulty-adaptive slow-thinking for large reasoning models. arXiv preprint arXiv:2503.04472, 2025.", + "[664] Yifan Shen, Yuanzhe Liu, Jingyuan Zhu, Xu Cao, Xiaofeng Zhang, Yixiao He, Wenming Ye, James Matthew Rehg, and Ismini Lourentzou. Fine-grained preference optimization improves spatial reasoning in vlms. arXiv preprint arXiv:2506.21656, 2025.", + "[665] Leheng Sheng, An Zhang, Zijian Wu, Weixiang Zhao, Changshuo Shen, Yi Zhang, Xiang Wang, and Tat-Seng Chua. On reasoning strength planning in large reasoning models. arXiv preprint arXiv:2506.08390, 2025.", + "[666] Hengyu Shi, Junhao Su, Huansheng Ning, Xiaoming Wei, and Jialin Gao. Layoutcot: Unleashing the deep reasoning potential of large language models for layout generation. arXiv preprint arXiv:2504.10829, 2025.", + "[667] Junhao Shi, Zhaoye Fei, Siyin Wang, Qipeng Guo, Jingjing Gong, and Xipeng Qiu. World-aware planning narratives enhance large vision-language model planner. arXiv preprint arXiv:2506.21230, 2025.", + "[668] Wenhao Shi, Zhiqiang Hu, Yi Bin, Yang Yang, See-Kiong Ng, and Heng Tao Shen. Multimodal mathematical reasoning with diverse solving perspective. arXiv preprint arXiv:2507.02804, 2025.", + "[669] Noah Shinn, Federico Cassano, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. Reflexion: language agents with verbal reinforcement learning. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 8634-8652. Curran Associates, Inc., December 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/1b44b878bb782e6954cd888628510e90-Paper-Conference.pdf.", + "[670] Safal Shrestha, Minwu Kim, and Keith Ross. Mathematical reasoning in large language models: Assessing logical and arithmetic errors across wide numerical ranges. arXiv preprint arXiv:2502.08680, 2025.", + "[671] Kashun Shum, Shizhe Diao, and Tong Zhang. Automatic prompt augmentation and selection with chain-of-thought from labeled data. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 12113-12139, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.811. URL https://aclanthology.org/2023.findings-emnlp.811/.", + "[672] Chenglei Si, Diyi Yang, and Tatsunori Hashimoto. Can llms generate novel research ideas? a large-scale human study with $100+$ nlp researchers. arXiv preprint arXiv:2409.04109, 2024.", + "[673] Sam Silver, Jimin Sun, Ivan Zhang, Sara Hooker, and Eddie Kim. Language models can perform single-utterance self-correction of perturbed reasoning. arXiv preprint arXiv:2506.15894, 2025.", + "[674] Avi Singh, John D Co-Reyes, Rishabh Agarwal, Ankesh Anand, Piyush Patil, Xavier Garcia, Peter J Liu, James Harrison, Jaehoon Lee, Kelvin Xu, et al. Beyond human data: Scaling self-training for problem-solving with language models. Transactions on Machine Learning Research, April 2024. URL https://openreview.net/pdf?id=lnAyUngGFK.", + "[675] Oscar Skean, Md Rifat Arefin, Dan Zhao, Niket Patel, Jalal Naghiyev, Yann LeCun, and Ravid Shwartz-Ziv. Layer by layer: Uncovering hidden representations in language models. arXiv preprint arXiv:2502.02013, 2025.", + "[676] Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024." + ], + "bbox": [ + 181, + 90, + 826, + 910 + ], + "page_idx": 78 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 78 + }, + { + "type": "page_number", + "text": "79", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 78 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[677] Huatong Song, Jinhao Jiang, Yingqian Min, Jie Chen, Zhipeng Chen, Wayne Xin Zhao, Lei Fang, and Ji-Rong Wen. R1-searcher: Incentivizing the search capability in llms via reinforcement learning. arXiv preprint arXiv:2503.05592, 2025.", + "[678] Jiwon Song, Dongwon Jo, Yulhwa Kim, and Jae-Joon Kim. Reasoning path compression: Compressing generation trajectories for efficient ltm reasoning. arXiv preprint arXiv:2505.13866, 2025.", + "[679] Mingyang Song, Zhaochen Su, Xiaoye Qu, Jiawei Zhou, and Yu Cheng. Prmbench: A fine-grained and challenging benchmark for process-level reward models. arXiv preprint arXiv:2501.03124, 2025.", + "[680] Mingyang Song, Mao Zheng, Zheng Li, Wenjie Yang, Xuan Luo, Yue Pan, and Feng Zhang. Fastcurl: Curriculum reinforcement learning with stage-wise context scaling for efficient training r1-like reasoning models. arXiv preprint arXiv:2503.17287, 2025.", + "[681] Woomin Song, Saket Dingliwal, Sai Muralidhar Jayanthi, Bhavana Ganesh, Jinwoo Shin, Aram Galstyan, and Sravan Babu Bodapati. Accelerated test-time scaling with model-free speculative sampling. arXiv preprint arXiv:2506.04708, 2025.", + "[682] Xiaoshuai Song, Yanan Wu, Weixun Wang, Jiaheng Liu, Wenbo Su, and Bo Zheng. Progco: Program helps self-correction of large language models. arXiv preprint arXiv:2501.01264, 2025.", + "[683] Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. arXiv preprint arXiv:2409.12183, 2024.", + "[684] Zayne Rea Sprague, Xi Ye, Kaj Bostrom, Swarat Chaudhuri, and Greg Durrett. MuSR: Testing the limits of chain-of-thought with multistep soft reasoning. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=jenyYQzuel.", + "[685] Gaurav Srivastava, Shuxiang Cao, and Xuan Wang. Towards reasoning ability of small language models. arXiv preprint arXiv:2502.11569, 2025.", + "[686] Saksham Sahai Srivastava and Vaneet Aggarwal. A technical survey of reinforcement learning techniques for large language models. arXiv preprint arXiv:2507.04136, 2025.", + "[687] Saksham Sahai Srivastava and Ashutosh Gandhi. Mathdivide: Improved mathematical reasoning by large language models. arXiv preprint arXiv:2405.13004, 2024.", + "[688] Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. Chain of thoughtlessness? an analysis of cot in planning. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id= kPBEAZU5Nm.", + "[689] Nisan Stiennon, Long Ouyang, Jeffrey Wu, Daniel Ziegler, Ryan Lowe, Chelsea Voss, Alec Radford, Dario Amodei, and Paul F Christiano. Learning to summarize with human feedback. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems, volume 33, pages 3008-3021. Curran Associates, Inc., December 2020. URL https://proceedings.neurips.cc/paper_files/paper/2020/file/1f89885d556929e98d3ef9b86448f951-Paper.pdf.", + "[690] Josefa Lia Stoisser, Marc Boubnovski Martell, and Julien Fauqueur. Sparks of tabular reasoning via text2sql reinforcement learning. arXiv preprint arXiv:2505.00016, 2025.", + "[691] DiJia Su, Sainbayar Sukhbaatar, Michael Rabbat, Yuandong Tian, and Qinqing Zheng. Dualformer: Controllable fast and slow thinking by learning with randomized reasoning traces. arXiv preprint arXiv:2410.09918, 2024.", + "[692] Jinyan Su and Claire Cardie. Thinking fast and right: Balancing accuracy and reasoning length with adaptive rewards. arXiv preprint arXiv:2505.18298, 2025.", + "[693] Yi Su, Dian Yu, Linfeng Song, Juntao Li, Haitao Mi, Zhaopeng Tu, Min Zhang, and Dong Yu. Expanding rl with verifiable rewards across diverse domains. arXiv preprint arXiv:2503.23829, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 79 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 79 + }, + { + "type": "page_number", + "text": "80", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 79 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[694] Zhaochen Su, Peng Xia, Hangyu Guo, Zhenhua Liu, Yan Ma, Xiaoye Qu, Jiaqi Liu, Yanshu Li, Kaide Zeng, Zhengyuan Yang, et al. Thinking with images for multimodal reasoning: Foundations, methods, and future frontiers. arXiv preprint arXiv:2506.23918, 2025.", + "[695] Guangyan Sun, Mingyu Jin, Zhenting Wang, Cheng-Long Wang, Siqi Ma, Qifan Wang, Tong Geng, Ying Nian Wu, Yongfeng Zhang, and Dongfang Liu. Visual agents as fast and slow thinkers. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=ncCuiD3KJQ.", + "[696] Jiankai Sun, Chuanyang Zheng, Enze Xie, Zhengying Liu, Ruihang Chu, Jianing Qiu, Jiaqi Xu, Mingyu Ding, Hongyang Li, Mengzhe Geng, et al. A survey of reasoning with foundation models. arXiv preprint arXiv:2312.11562, 2023.", + "[697] Linzhuang Sun, Hao Liang, Jingxuan Wei, Bihui Yu, Tianpeng Li, Fan Yang, Zenan Zhou, and Wentao Zhang. Mm-verify: Enhancing multimodal reasoning with chain-of-thought verification. arXiv preprint arXiv:2502.13383, 2025.", + "[698] Qiushi Sun, Zhoumianze Liu, Chang Ma, Zichen Ding, Fangzhi Xu, Zhangyue Yin, Haiteng Zhao, Zhenyu Wu, Kanzhi Cheng, Zhaoyang Liu, Jianing Wang, Qintong Li, Robert Tang, Tianbao Xie, Xiachong Feng, Xiang Li, Ben Kao, Wenhai Wang, Biqing Qi, Lingpeng Kong, and Zhiyong Wu. Scienceboard: Evaluating multimodal autonomous agents in realistic scientific workflows. In ICML 2025 Workshop on Computer Use Agents, June 2025. URL https://openreview.net/forum?id=CTtuHMeU5e.", + "[699] Shengyang Sun, Yian Zhang, Alexander Bukharin, David Mosallanezhad, Jiaqi Zeng, Soumye Singhal, Gerald Shen, Adi Renduchintala, Tugrul Konuk, Yi Dong, et al. Reward-aware preference optimization: A unified mathematical framework for model alignment. arXiv preprint arXiv:2502.00203, 2025.", + "[700] Wei Sun, Qianlong Du, Fuwei Cui, and Jiajun Zhang. An efficient and precise training data construction framework for process-supervised reward model in mathematical reasoning. arXiv preprint arXiv:2503.02382, 2025.", + "[701] Yifan Sun, Jingyan Shen, Yibin Wang, Tianyu Chen, Zhendong Wang, Mingyuan Zhou, and Huan Zhang. Improving data efficiency for ltm reinforcement fine-tuning through difficulty-targeted online data selection and rollout replay. arXiv preprint arXiv:2506.05316, 2025.", + "[702] Yuhong Sun, Zhangyue Yin, Xuanjing Huang, Xipeng Qiu, and Hui Zhao. Error classification of large language models on math word problems: A dynamically adaptive framework. arXiv preprint arXiv:2501.15581, 2025.", + "[703] Zhongxiang Sun, Qipeng Wang, Weijie Yu, Xiaoxue Zang, Kai Zheng, Jun Xu, Xiao Zhang, Song Yang, and Han Li. Rearter: Retrieval-augmented reasoning with trustworthy process rewarding. arXiv preprint arXiv:2501.07861, 2025.", + "[704] Richard S Sutton, David McAllester, Satinder Singh, and Yishay Mansour. Policy gradient methods for reinforcement learning with function approximation. In S. Solla, T. Leen, and K. Müller, editors, Advances in Neural Information Processing Systems, volume 12. MIT Press, November 1999. URL https://proceedings.neurips.cc/paper_files/paper/1999/file/464d828b85b0bed98e80ade0a5c43b0f-Paper.pdf.", + "[705] Mirac Suzgun, Nathan Scales, Nathanael Schärli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc Le, Ed Chi, Denny Zhou, and Jason Wei. Challenging BIG-bench tasks and whether chain-of-thought can solve them. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Findings of the Association for Computational Linguistics: ACL 2023, pages 13003-13051, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.824. URL https://aclanthology.org/2023-findings-acl.824/.", + "[706] Jihoon Tack, Jack Lanchantin, Jane Yu, Andrew Cohen, Ilia Kulikov, Janice Lan, Shibo Hao, Yuandong Tian, Jason Weston, and Xian Li. Llm pretraining with continuous concepts. arXiv preprint arXiv:2502.08524, 2025.", + "[707] Huajie Tan, Yuheng Ji, Xiaoshuai Hao, Minglan Lin, Pengwei Wang, Zhongyuan Wang, and Shanghang Zhang. Reason-rft: Reinforcement fine-tuning for visual reasoning. arXiv preprint arXiv:2503.20752, 2025." + ], + "bbox": [ + 181, + 90, + 825, + 912 + ], + "page_idx": 80 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 80 + }, + { + "type": "page_number", + "text": "81", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 80 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[708] Juanhe (TJ) Tan. Causal abstraction for chain-of-thought reasoning in arithmetic word problems. In Yonatan Belinkov, Sophie Hao, Jaap Jumelet, Najoung Kim, Arya McCarthy, and Hosein Mohebbi, editors, Proceedings of the 6th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP, pages 155–168, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.blackboxnlp-1.12. URL https://aclanthology.org/2023.blackboxnlp-1.12.", + "[709] Sijun Tan, Siyuan Zhuang, Kyle Montgomery, William Y Tang, Alejandro Cuadron, Chenguang Wang, Raluca Ada Popa, and Ion Stoica. Judgebench: A benchmark for evaluating llm-based judges. arXiv preprint arXiv:2410.12784, 2024.", + "[710] Xiaoyu Tan, Tianchu Yao, Chao Qu, Bin Li, Minghao Yang, Dakuan Lu, Haozhe Wang, Xihe Qiu, Wei Chu, Yinghui Xu, et al. Aurora: Automated training framework of universal process reward models via ensemble prompting and reverse verification. arXiv preprint arXiv:2502.11520, 2025.", + "[711] Kexian Tang, Junyao Gao, Yanhong Zeng, Haodong Duan, Yanan Sun, Zhening Xing, Wenran Liu, Kaifeng Lyu, and Kai Chen. Lego-puzzles: How good are mllms at multi-step spatial reasoning? arXiv preprint arXiv:2503.19990, 2025.", + "[712] Yihong Tang, Kehai Chen, Muyun Yang, Zhengyu Niu, Jing Li, Tiejun Zhao, and Min Zhang. Thinking in character: Advancing role-playing agents with role-aware reasoning. arXiv preprint arXiv:2506.01748, 2025.", + "[713] Zhengyang Tang, Ziniu Li, Zhenyang Xiao, Tian Ding, Ruoyu Sun, Benyou Wang, Dayiheng Liu, Fei Huang, Tianyu Liu, Bowen Yu, et al. Enabling scalable oversight via self-evolving critic. arXiv preprint arXiv:2501.05727, 2025.", + "[714] Zhengyang Tang, Ziniu Li, Zhenyang Xiao, Tian Ding, Ruoyu Sun, Benyou Wang, Dayiheng Liu, Fei Huang, Tianyu Liu, Bowen Yu, et al. Realcritic: Towards effectiveness-driven evaluation of language model critiques. arXiv preprint arXiv:2501.14492, 2025.", + "[715] Sree Harsha Tanneru, Dan Ley, Chirag Agarwal, and Himabindu Lakkaraju. On the hardness of faithful chain-of-thought reasoning in large language models. arXiv preprint arXiv:2406.10625, 2024.", + "[716] Amir Taubenfeld, Tom Sheffer, Eran Ofek, Amir Feder, Ariel Goldstein, Zorik Gekhman, and Gal Yona. Confidence improves self-consistency in llms. arXiv preprint arXiv:2502.06233, 2025.", + "[717] DolphinR1 Team. Dolphin R1. https://huggingface.co/datasets/cognitivecomputations/dolphin-r1, February 2025.", + "[718] Fancy-MLLM Team. R1 Onevision. https://huggingface.co/datasets/Fancy-MLLM/R1-Onevision, February 2025.", + "[719] Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024.", + "[720] Gemma Team, Morgane Riviere, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, Léonard Hussenot, Thomas Mesnard, Bobak Shahriari, Alexandre Ramé, et al. Gemma 2: Improving open language models at a practical size. arXiv preprint arXiv:2408.00118, 2024.", + "[721] Huggingface Team. Open r1. https://github.com/huggingface/open-r1, January 2025.", + "[722] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025.", + "[723] NovaSky Team. Think less, achieve more: Cut reasoning costs by $50\\%$ without sacrificing accuracy. https://novasky-ai.github.io/posts/reduce-overthinking, January 2025. Accessed: 2025-01-23.", + "[724] NovaSky Team. Sky-t1: Train your own o1 preview model within $ 450. https://novaskyai.github.io/posts/sky-t1, January 2025. Accessed: 2025-01-09.", + "[725] NVIDIA Team. Mistral-nemo-12b-instruct. https://huggingface.co/nvidia/Mistral-NeMo-12B-Instruct, July 2024." + ], + "bbox": [ + 181, + 90, + 830, + 912 + ], + "page_idx": 81 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 81 + }, + { + "type": "page_number", + "text": "82", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 81 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[726] OpenDeepResearch Team. Open deep research. https://github.com/nickscamara/open-deepresearch, February 2025.", + "[727] OpenO1 Team. Open o1. https://github.com/Open-Source-O1/Open-O1, February 2025.", + "[728] OpenR1 Team. Open r1 math 200k. https://huggingface.co/datasets/open-r1/OpenR1-Math-220k, February 2025.", + "[729] OpenThoughts Team. Open Thoughts. https://open-thoughts.ai, January 2025.", + "[730] PowerInfer Team. QwQ LongCoT 500k. https://huggingface.co/datasets/PowerInfer/QWQ-LONGCOT-500K, January 2025.", + "[731] QwQ Team. Qwq: Reflect deeply on the boundaries of the unknown. https://qwenlm.github.io/blog/qwq-32b-preview/, November 2025.", + "[732] X-R1 Team. X-r1. https://github.com/dhcode-cpp/X-R1, February 2025.", + "[733] Fengwei Teng, Zhaoyang Yu, Quan Shi, Jiayi Zhang, Chenglin Wu, and Yuyu Luo. Atom of thoughts for markov ltm test-time scaling. arXiv preprint arXiv:2502.12018, 2025.", + "[734] Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, et al. Llamav-o1: Rethinking step-by-step visual reasoning in llms. arXiv preprint arXiv:2501.06186, 2025.", + "[735] George Thomas, Alex J Chan, Jikun Kang, Wenqi Wu, Filippos Christianos, Fraser Greenlee, Andy Toulis, and Marvin Purtorab. Webgames: Challenging general-purpose web-browsing ai agents. arXiv preprint arXiv:2502.18356, 2025.", + "[736] Xiaoyu Tian, Sitong Zhao, Haotian Wang, Shuaiang Chen, Yunjie Ji, Yiping Peng, Han Zhao, and Xiangang Li. Think twice: Enhancing lIm reasoning by scaling multi-round test-time thinking. arXiv preprint arXiv:2503.19855, 2025.", + "[737] Ye Tian, Baolin Peng, Linfeng Song, Lifeng Jin, Dian Yu, Lei Han, Haitao Mi, and Dong Yu. Toward self-improvement of llms via imagination, searching, and criticizing. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 52723-52748. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/5e5853f35164e434015716a8c2a66543-Paper-Conference.pdf.", + "[738] Yuxuan Tong, Xiwen Zhang, Rui Wang, Ruidong Wu, and Junxian He. Dart-math: Difficulty-aware rejection tuning for mathematical problem-solving. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 7821-7846. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/0ef1afa0daa888d695dcd5e9513bafa3-Paper-Conference.pdf.", + "[739] Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. Openmathinstruct-2: Accelerating ai for math with massive open-source instruction data. arXiv preprint arXiv:2410.01560, 2024.", + "[740] Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. Openmathinstruct-2: Accelerating ai for math with massive open-source instruction data. arXiv preprint arXiv:2410.01560, 2024.", + "[741] Shubham Toshniwal, Ivan Moshkov, Sean Naresthiran, Daria Gitman, Fei Jia, and Igor Gitman. Openmathinstruct-1: A 1.8 million math instruction tuning dataset. arXiv preprint arXiv: Arxiv-2402.10176, 2024.", + "[742] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023.", + "[743] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023.", + "[744] Christoph Treude and Raula Gaikovina Kula. Interacting with ai reasoning models: Harnessing \"thoughts\" for ai-driven software engineering. arXiv preprint arXiv:2503.00483, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 910 + ], + "page_idx": 82 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 82 + }, + { + "type": "page_number", + "text": "83", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 82 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[745] Luong Trung, Xinbo Zhang, Zhanming Jie, Peng Sun, Xiaoran Jin, and Hang Li. ReFT: Reasoning with reinforced fine-tuning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 7601–7614, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.410. URL https://aclanthology.org/2024.acl-long.410/.", + "[746] Songjun Tu, Jiahao Lin, Qichao Zhang, Xiangyu Tian, Linjing Li, Xiangyuan Lan, and Dongbin Zhao. Learning when to think: Shaping adaptive reasoning in r1-style models via multi-stage rl. arXiv preprint arXiv:2505.10832, 2025.", + "[747] Benjamin Turtel, Danny Franklin, and Philipp Schoenegger. Llms can teach themselves to better predict the future. arXiv preprint arXiv:2502.05253, 2025.", + "[748] Martin Tutek, Fateme Hashemi Chaleshtori, Ana Marasović, and Yonatan Belinkov. Measuring faithfulness of chains of thought by unlearning reasoning steps. arXiv preprint arXiv:2502.14829, 2025.", + "[749] Jonathan Uesato, Nate Kushner, Ramana Kumar, Francis Song, Noah Siegel, Lisa Wang, Antonia Creswell, Geoffrey Irving, and Irina Higgins. Solving math word problems with process- and outcome-based feedback. arXiv preprint arXiv:2211.14275, 2022.", + "[750] Robert Vacareanu, Anurag Pratik, Evangelia Spiliopoulou, Zheng Qi, Giovanni Paolini, Neha Anna John, Jie Ma, Yassine Benajiba, and Miguel Ballesteros. General purpose verification for chain of thought prompting. arXiv preprint arXiv:2405.00204, 2024.", + "[751] Karthik Valmeekam, Kaya Stechly, and Subbarao Kambhampati. LLMs still can't plan; can LRMs? a preliminary evaluation of openAI's o1 on planbench. In NeurIPS 2024 Workshop on Open-World Agents, October 2024. URL https://openreview.net/forum?id=Gcr1Lx4Koz.", + "[752] Jean Vassoyan, Nathanaël Beau, and Roman Plaud. Ignore the kl penalty! boosting exploration on critical tokens to enhance rl fine-tuning. arXiv preprint arXiv:2502.06533, 2025.", + "[753] Tu Vu, Kalpesh Krishna, Salaheddin Alzubi, Chris Tar, Manaal Faruqui, and Yun-Hsuan Sung. Foundational autorators: Taming large language models for better automatic evaluation. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 17086-17105, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10. 18653/v1/2024.emnlp-main.949. URL https://aclanthology.org/2024.emnlp-main.949/.", + "[754] Guangya Wan, Yuqi Wu, Jie Chen, and Sheng Li. Cot rerailer: Enhancing the reliability of large language models in complex reasoning tasks through error detection and correction. arXiv preprint arXiv:2408.13940, 2024.", + "[755] Ziyu Wan, Xidong Feng, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. Alphazero-like tree-search can guide large language model decoding and training. In *Forty-first International Conference on Machine Learning*, May 2024. URL https://openreview.net/forum?id=C4OpREezgj.", + "[756] Ziyu Wan, Yunxiang Li, Yan Song, Hanjing Wang, Linyi Yang, Mark Schmidt, Jun Wang, Weinan Zhang, Shuyue Hu, and Ying Wen. Rema: Learning to meta-think for llms with multi-agent reinforcement learning. arXiv preprint arXiv:2503.09501, 2025.", + "[757] Ante Wang, Linfeng Song, Ye Tian, Baolin Peng, Dian Yu, Haitao Mi, Jinsong Su, and Dong Yu. Litesearch: Efficacious tree search for lIm. arXiv preprint arXiv:2407.00320, 2024.", + "[758] Ante Wang, Linfeng Song, Ye Tian, Dian Yu, Haitao Mi, Xiangyu Duan, Zhaopeng Tu, Jinsong Su, and Dong Yu. Don't get lost in the trees: Streamlining llm reasoning by overcoming tree search exploration pitfalls. arXiv preprint arXiv:2502.11183, 2025.", + "[759] Boshi Wang, Sewon Min, Xiang Deng, Jiaming Shen, You Wu, Luke Zettlemoyer, and Huan Sun. Towards understanding chain-of-thought prompting: An empirical study of what matters. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2717–2739, Toronto, Canada, July 2023. Association for Computational" + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 83 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 83 + }, + { + "type": "page_number", + "text": "84", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 83 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Linguistics. doi: 10.18653/v1/2023.acl-long.153. URL https://aclanthology.org/2023.acl-long.153/.", + "[760] Chao Wang, Luning Zhang, Zheng Wang, and Yang Zhou. Can large language models unveil the mysteries? an exploration of their ability to unlock information in complex scenarios. arXiv preprint arXiv:2502.19973, 2025.", + "[761] Chaojie Wang, Yanchen Deng, Zhiyi Lyu, Liang Zeng, Jujie He, Shuicheng Yan, and Bo An. Q*: Improving multi-step reasoning for llms with deliberative planning. arXiv preprint arXiv:2406.14283, 2024.", + "[762] Chenlong Wang, Yuanning Feng, Dongping Chen, Zhaoyang Chu, Ranjay Krishna, and Tianyi Zhou. Wait, we don't need to\" wait!! removing thinking tokens improves reasoning efficiency. arXiv preprint arXiv:2506.08343, 2025.", + "[763] Clinton J Wang, Dean Lee, Cristina Menghini, Johannes Mols, Jack Doughty, Adam Khoja, Jayson Lynch, Sean Hendryx, Summer Yue, and Dan Hendrycks. Enigmaeval: A benchmark of long multimodal reasoning challenges. arXiv preprint arXiv:2502.08859, 2025.", + "[764] Danqing Wang, Zhuorui Ye, Fei Fang, and Lei Li. Cooperative strategic planning enhances reasoning capabilities in large language models. arXiv preprint arXiv:2410.20007, 2024.", + "[765] Evan Z Wang, Federico Cassano, Catherine Wu, Yunfeng Bai, William Song, Vaskar Nath, Ziwen Han, Sean M. Hendryx, Summer Yue, and Hugh Zhang. Planning in natural language improves LLM search for code generation. In The First Workshop on System-2 Reasoning at Scale, NeurIPS'24, October 2024. URL https://openreview.net/forum?id=B2iSfPNj49.", + "[766] Guoxin Wang, Minyu Gao, Shuai Yang, Ya Zhang, Lizhi He, Liang Huang, Hanlin Xiao, Yexuan Zhang, Wanyue Li, Lu Chen, et al. Citrus: Leveraging expert cognitive pathways in a medical language model for advanced medical decision support. arXiv preprint arXiv:2502.18274, 2025.", + "[767] Hanbin Wang, Xiaoxuan Zhou, Zhipeng Xu, Keyuan Cheng, Yuxin Zuo, Kai Tian, Jingwei Song, Junting Lu, Wenhui Hu, and Xueyang Liu. Code-vision: Evaluating multimodal llms logic understanding and code generation capabilities. arXiv preprint arXiv:2502.11829, 2025.", + "[768] Hanlin Wang, Jian Wang, Chak Tou Leong, and Wenjie Li. Steca: Step-level trajectory calibration for lIm agent learning. arXiv preprint arXiv:2502.14276, 2025.", + "[769] Hanyin Wang, Zhenbang Wu, Gururaj Kolar, Hariprasad Korsapati, Brian Bartlett, Bryan Hull, and Jimeng Sun. Reinforcement learning for out-of-distribution reasoning in llms: An empirical study on diagnosis-related group coding. arXiv preprint arXiv:2505.21908, 2025.", + "[770] Hao Wang, Boyi Liu, Yufeng Zhang, and Jie Chen. Seed-cts: Unleashing the power of tree search for superior performance in competitive coding tasks. arXiv preprint arXiv:2412.12544, 2024.", + "[771] Haoxiang Wang, Wei Xiong, Tengyang Xie, Han Zhao, and Tong Zhang. Interpretable preferences via multi-objective reward modeling and mixture-of-experts. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 10582-10592, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.620. URL https://aclanthology.org/2024/findings-emnlp.620/.", + "[772] Haoyu Wang, Zeyu Qin, Li Shen, Xueqian Wang, Minhao Cheng, and Dacheng Tao. Leveraging reasoning with guidelines to elicit and utilize knowledge for enhancing safety alignment. arXiv preprint arXiv:2502.04040, 2025.", + "[773] Huaijie Wang, Shibo Hao, Hanze Dong, Shenao Zhang, Yilin Bao, Ziran Yang, and Yi Wu. Offline reinforcement learning for llm multi-step reasoning. arXiv preprint arXiv:2412.16145, 2024.", + "[774] Jiaan Wang, Fandong Meng, Yunlong Liang, and Jie Zhou. Drt-o1: Optimized deep reasoning translation via long chain-of-thought. arXiv preprint arXiv:2412.17498, 2024.", + "[775] Jiaan Wang, Fandong Meng, and Jie Zhou. Extrans: Multilingual deep reasoning translation via exemplar-enhanced reinforcement learning. arXiv preprint arXiv:2505.12996, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 84 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 84 + }, + { + "type": "page_number", + "text": "85", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 84 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[776] Jiaqi WANG, Yuhang Zhou, Zhixiong Zhang, Qiguang Chen, Yongqiang Chen, and James Cheng. DivIL: Unveiling and addressing over-invariance for out-of-distribution generalization. Transactions on Machine Learning Research, February 2025. ISSN 2835-8856. URL https://openreview.net/forum?id=2Zan4ATYsh.", + "[777] Jun Wang, Meng Fang, Ziyu Wan, Muning Wen, Jiachen Zhu, Anjie Liu, Ziqin Gong, Yan Song, Lei Chen, Lionel M Ni, et al. Openr: An open source framework for advanced reasoning with large language models. arXiv preprint arXiv:2410.09671, 2024.", + "[778] Junlin Wang, Jue Wang, Ben Athiwaratkun, Ce Zhang, and James Zou. Mixture-of-agents enhances large language model capabilities. arXiv preprint arXiv:2406.04692, 2024.", + "[779] Junxiong Wang, Wen-Ding Li, Daniele Paliotta, Daniel Ritter, Alexander M Rush, and Tri Dao. M1: Towards scalable test-time compute with mamba reasoning models. arXiv preprint arXiv:2504.10449, 2025.", + "[780] Junyang Wang, Haiyang Xu, Xi Zhang, Ming Yan, Ji Zhang, Fei Huang, and Jitao Sang. Mobile-agent-v: Learning mobile device operation through video-guided multi-agent collaboration. arXiv preprint arXiv:2502.17110, 2025.", + "[781] Ke Wang, Houxing Ren, Aojun Zhou, Zimu Lu, Sichun Luo, Weikang Shi, Renrui Zhang, Linqi Song, Mingjie Zhan, and Hongsheng Li. Mathcoder: Seamless code integration in llms for enhanced mathematical reasoning. arXiv preprint arXiv:2310.03731, 2023.", + "[782] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Houxing Ren, Aojun Zhou, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with MATH-vision dataset. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=QWTCcxMpPA.", + "[783] Ke Wang, Houxing Ren, Aojun Zhou, Zimu Lu, Sichun Luo, Weikang Shi, Renrui Zhang, Linqi Song, Mingjie Zhan, and Hongsheng Li. Mathcoder: Seamless code integration in LLMs for enhanced mathematical reasoning. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=z8TW0ttBPp.", + "[784] Kevin Wang, Junbo Li, Neel P Bhatt, Yihan Xi, Qiang Liu, Ufuk Topcu, and Zhangyang Wang. On the planning abilities of openai's o1 models: Feasibility, optimality, and generalizability. arXiv preprint arXiv:2409.19924, 2024.", + "[785] Kun Wang, Guibin Zhang, Zhenhong Zhou, Jiahao Wu, Miao Yu, Shiqian Zhao, Chenlong Yin, Jinhu Fu, Yibo Yan, Hanjun Luo, et al. A comprehensive survey in llm (-agent) full stack safety: Data, training and deployment. arXiv preprint arXiv:2504.15585, 2025.", + "[786] Liang Wang, Haonan Chen, Nan Yang, Xiaolong Huang, Zhicheng Dou, and Furu Wei. Chain-of-retrieval augmented generation. arXiv preprint arXiv:2501.14342, 2025.", + "[787] Libo Wang. Dynamic chain-of-thought: Towards adaptive deep reasoning. arXiv preprint arXiv:2502.10428, 2025.", + "[788] Mengru Wang, Xingyu Chen, Yue Wang, Zhiwei He, Jiahao Xu, Tian Liang, Qizhhi Liu, Yunzhi Yao, Wenxuan Wang, Ruotian Ma, et al. Two experts are all you need for steering thinking: Reinforcing cognitive effort in moe reasoning models without additional training. arXiv preprint arXiv:2505.14681, 2025.", + "[789] Mingyang Wang, Lukas Lange, Heike Adel, Yunpu Ma, Jannik Strötgen, and Hinrich Schütze. Language mixing in reasoning language models: Patterns, impact, and internal causes. arXiv preprint arXiv:2505.14815, 2025.", + "[790] Minzheng Wang, Yongbin Li, Haobo Wang, Xinghua Zhang, Nan Xu, Bingli Wu, Fei Huang, Haiyang Yu, and Wenji Mao. Adaptive thinking via mode policy optimization for social language agents. arXiv preprint arXiv:2505.02156, 2025.", + "[791] Peifeng Wang, Austin Xu, Yilun Zhou, Caiming Xiong, and Shafiq Joty. Direct judgement preference optimization. arXiv preprint arXiv:2409.14664, 2024.", + "[792] Peiyi Wang, Lei Li, Zhihong Shao, Runxin Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. Math-shepherd: Verify and reinforce LLMs step-by-step without human annotations. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long" + ], + "bbox": [ + 181, + 90, + 825, + 912 + ], + "page_idx": 85 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 85 + }, + { + "type": "page_number", + "text": "86", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 85 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Papers), pages 9426-9439, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.510. URL https://aclanthology.org/2024.acl-long.510/.", + "[793] Peng Wang, Xuesi Hu, Jiageng Wu, Yuntao Zou, Qiancheng Zhang, and Dagang Li. What factors affect llms and rllms in financial question answering? arXiv preprint arXiv:2507.08339, 2025.", + "[794] Peng Wang, Ruihan Tao, Qiguang Chen, Mengkang Hu, and Libo Qin. X-webagentbench: A multilingual interactive web benchmark for evaluating global agentic system. arXiv preprint arXiv:2505.15372, 2025.", + "[795] Peng-Yuan Wang, Tian-Shuo Liu, Chenyang Wang, Yi-Di Wang, Shu Yan, Cheng-Xing Jia, Xu-Hui Liu, Xin-Wei Chen, Jia-Cheng Xu, Ziniu Li, et al. A survey on large language models for mathematical reasoning. arXiv preprint arXiv:2506.08446, 2025.", + "[796] Ru Wang, Wei Huang, Selena Song, Haoyu Zhang, Yusuke Iwasawa, Yutaka Matsuo, and Jiaxian Guo. Beyond in-distribution success: Scaling curves of cot granularity for language model generalization. arXiv preprint arXiv:2502.18273, 2025.", + "[797] Ruida Wang, Rui Pan, Yuxin Li, Jipeng Zhang, Yizhen Jia, Shizhe Diao, Renjie Pi, Junjie Hu, and Tong Zhang. Ma-lot: Model-collaboration lean-based long chain-of-thought reasoning enhances formal theorem proving. arXiv preprint arXiv:2503.03205, 2025.", + "[798] Ruoyao Wang, Peter Jansen, Marc-Alexandre Côté, and Prithviraj Ammanabrolu. Science-World: Is your agent smarter than a 5th grader? In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang, editors, Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 11279–11298, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.775. URL https://aclanthology.org/2022.emnlp-main.775/.", + "[799] Siyuan Wang, Enda Zhao, Zhongyu Wei, and Xiang Ren. Stepwise informativeness search for improving llm reasoning. arXiv preprint arXiv:2502.15335, 2025.", + "[800] Song Wang, Gongfan Fang, Lingdong Kong, Xiangtai Li, Jianyun Xu, Sheng Yang, Qiang Li, Jianke Zhu, and Xinchao Wang. Pixelthink: Towards efficient chain-of-pixel reasoning. arXiv preprint arXiv:2505.23727, 2025.", + "[801] Tianlong Wang, Junzhe Chen, Xueting Han, and Jing Bai. Cpl: Critical plan step learning boosts llm generalization in reasoning tasks. arXiv preprint arXiv:2409.08642, 2024.", + "[802] Tianlu Wang, Ping Yu, Xiaoqing Ellen Tan, Sean O'Brien, Ramakanth Pasunuru, Jane Dwivedi-Yu, Olga Golovneva, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. Shepherd: A critic for language model generation. arXiv preprint arXiv:2308.04592, 2023.", + "[803] Tianlu Wang, Ilia Kulikov, Olga Golovneva, Ping Yu, Weizhe Yuan, Jane Dwivedi-Yu, Richard Yuanzhe Pang, Maryam Fazel-Zarandi, Jason Weston, and Xian Li. Self-taught evaluators. arXiv preprint arXiv:2408.02666, 2024.", + "[804] Weixuan Wang, Minghao Wu, Barry Haddow, and Alexandra Birch. Demystifying multilingual chain-of-thought in process reward modeling. arXiv preprint arXiv:2502.12663, 2025.", + "[805] Weixun Wang, Shaopan Xiong, Gengru Chen, Wei Gao, Sheng Guo, Yancheng He, Ju Huang, Jiaheng Liu, Zhendong Li, Xiaoyang Li, et al. Reinforcement learning optimization for large-scale learning: An efficient and user-friendly scaling library. arXiv preprint arXiv:2506.06122, 2025.", + "[806] Weiyun Wang, Zhe Chen, Wenhai Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Jinguo Zhu, Xizhou Zhu, Lewei Lu, Yu Qiao, et al. Enhancing the reasoning ability of multimodal large language models via mixed preference optimization. arXiv preprint arXiv:2411.10442, 2024.", + "[807] Weiyun Wang, Zhangwei Gao, Lianjie Chen, Zhe Chen, Jinguo Zhu, Xiangyu Zhao, Yangzhou Liu, Yue Cao, Shenglong Ye, Xizhou Zhu, et al. Visualprm: An effective process reward model for multimodal reasoning. arXiv preprint arXiv:2503.10291, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 86 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 86 + }, + { + "type": "page_number", + "text": "87", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 86 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[808] Xiaoqiang Wang, Suyuchen Wang, Yun Zhu, and Bang Liu. System-1.5 reasoning: Traversal in language and latent spaces with dynamic shortcuts. arXiv preprint arXiv:2505.18962, 2025.", + "[809] Xiaoxuan Wang, Yihe Deng, Mingyu Derek Ma, and Wei Wang. Entropy-based adaptive weighting for self-training. arXiv preprint arXiv:2503.23913, 2025.", + "[810] Xinyi Wang, Lucas Caccia, Oleksiy Ostapenko, Xingdi Yuan, William Yang Wang, and Alessandro Sordoni. Guiding language model reasoning with planning tokens. arXiv preprint arXiv:2310.05707, 2023.", + "[811] Xinyi Wang, Alfonso Amayuelas, Kexun Zhang, Liangming Pan, Wenhu Chen, and William Yang Wang. Understanding reasoning ability of language models from the perspective of reasoning paths aggregation. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 50026-50042. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/wang24a.html.", + "[812] Xinyi Wang, Shawn Tan, Mingyu Jin, William Yang Wang, Rameswar Panda, and Yikang Shen. Do larger language models imply better reasoning? a pretraining scaling law for reasoning. arXiv preprint arXiv:2504.03635, 2025.", + "[813] Xiyao Wang, Jiuhai Chen, Zhaoyang Wang, Yuhang Zhou, Yiyang Zhou, Huaxiu Yao, Tianyi Zhou, Tom Goldstein, Parminder Bhatia, Furong Huang, et al. Enhancing visual-language modality alignment in large vision language models via self-improvement. arXiv preprint arXiv:2405.15973, 2024.", + "[814] Xiyao Wang, Linfeng Song, Ye Tian, Dian Yu, Baolin Peng, Haitao Mi, Furong Huang, and Dong Yu. Towards self-improvement of llms via mcts: Leveraging stepwise knowledge with curriculum preference learning. arXiv preprint arXiv:2410.06508, 2024.", + "[815] Xuezhi Wang and Denny Zhou. Chain-of-thought reasoning without prompting. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=4Zt7S0B0Jp.", + "[816] Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, February 2023. URL https://openreview.net/forum?id=1PL1NIMMrw.", + "[817] Yao Wang, Mingxuan Cui, and Arthur Jiang. Enabling ai scientists to recognize innovation: A domain-agnostic algorithm for assessing novelty. arXiv preprint arXiv:2503.01508, 2025.", + "[818] Yifei Wang, Yuyang Wu, Zeming Wei, Stefanie Jegelka, and Yisen Wang. A theoretical understanding of self-correction through in-context alignment. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=OtvNLTWYww.", + "[819] Yiqun Wang, Sile Hu, Yonggang Zhang, Xiang Tian, Xuesong Liu, Yaowu Chen, Xu Shen, and Jieping Ye. How large language models implement chain-of-thought? September 2023. URL https://openreview.net/pdf?id=b2XfOm3RJa.", + "[820] Yu Wang, Nan Yang, Liang Wang, and Furu Wei. Examining false positives under inference scaling for mathematical reasoning. arXiv preprint arXiv:2502.06217, 2025.", + "[821] Yubo Wang, Xueguang Ma, Ge Zhang, Yuansheng Ni, Abhranil Chandra, Shiguang Guo, Weiming Ren, Aaran Arulraj, Xuan He, Ziyan Jiang, Tianle Li, Max Ku, Kai Wang, Alex Zhuang, Rongqi Fan, Xiang Yue, and Wenhu Chen. MMLU-pro: A more robust and challenging multi-task language understanding benchmark. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=y10DM6R2r3.", + "[822] Yubo Wang, Xiang Yue, and Wenhu Chen. Critique fine-tuning: Learning to critique is more effective than learning to imitate. arXiv preprint arXiv:2501.17703, 2025.", + "[823] Yue Wang, Qiuzhi Liu, Jiahao Xu, Tian Liang, Xingyu Chen, Zhiwei He, Linfeng Song, Dian Yu, Juntao Li, Zhuosheng Zhang, et al. Thoughts are all over the place: On the underthinking of o1-like llms. arXiv preprint arXiv:2501.18585, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 911 + ], + "page_idx": 87 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 87 + }, + { + "type": "page_number", + "text": "88", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 87 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[824] Yuhang Wang, Youhe Jiang, Bin Cui, and Fangcheng Fu. Thinking short and right over thinking long: Serving lmm reasoning efficiently and accurately. arXiv preprint arXiv:2505.13326, 2025.", + "[825] Zengzhi Wang, Fan Zhou, Xuefeng Li, and Pengfei Liu. Octothinker: Mid-training incentivizes reinforcement learning scaling. arXiv preprint arXiv:2506.20512, 2025.", + "[826] Zhaoyang Wang, Weilei He, Zhiyuan Liang, Xuchao Zhang, Chetan Bansal, Ying Wei, Weitong Zhang, and Huaxiu Yao. Cream: Consistency regularized self-rewarding language models. In Neurips Safe Generative AI Workshop 2024, October 2024. URL https://openreview.net/forum?id=oaWajnM93y.", + "[827] Zhengren Wang, Jiayang Yu, Dongsheng Ma, Zhe Chen, Yu Wang, Zhiyu Li, Feiyu Xiong, Yanfeng Wang, Linpeng Tang, Wentao Zhang, et al. Rare: Retrieval-augmented reasoning modeling. arXiv preprint arXiv:2503.23513, 2025.", + "[828] Zhenhailong Wang, Haiyang Xu, Junyang Wang, Xi Zhang, Ming Yan, Ji Zhang, Fei Huang, and Heng Ji. Mobile-agent-e: Self-evolving mobile assistant for complex tasks. arXiv preprint arXiv:2501.11733, 2025.", + "[829] Zhilin Wang, Yi Dong, Olivier Delalleau, Jiaqi Zeng, Gerald Shen, Daniel Egert, Jimmy J. Zhang, Makes Narsimhan Sreedhar, and Oleksii Kuchaiev. Helpsteer 2: Open-source dataset for training top-performing reward models. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=PvVKUFhaNy.", + "[830] Zhongsheng Wang, Jiamou Liu, Qiming Bao, Hongfei Rong, and Jingfeng Zhang. Chatlogic: Integrating logic programming with large language models for multi-step reasoning. In Neuro-Symbolic Learning and Reasoning in the era of Large Language Models, December 2023. URL https://openreview.net/forum?id=AOqGF7Po7Z.", + "[831] Zihan Wang, Yunxuan Li, Yuexin Wu, Liangchen Luo, Le Hou, Hongkun Yu, and Jingbo Shang. Multi-step problem solving through a verifier: An empirical analysis on model-induced process supervision. arXiv preprint arXiv:2402.02658, 2024.", + "[832] Zixiao Wang, Yuxin Wang, Xiaorui Wang, Mengting Xing, Jie Gao, Jianjun Xu, Guangcan Liu, Chenhui Jin, Zhuo Wang, Shengzhuo Zhang, et al. Test-time scaling with reflective generative model. arXiv preprint arXiv:2507.01951, 2025.", + "[833] Anjiang Wei, Jiannan Cao, Ran Li, Hongyu Chen, Yuhui Zhang, Ziheng Wang, Yaofeng Sun, Yuan Liu, Thiago SFX Teixeira, Diyi Yang, et al. Equibench: Benchmarking code reasoning capabilities of large language models via equivalence checking. arXiv preprint arXiv:2502.12466, 2025.", + "[834] Hao Wei. Medthoughts-8k: A medical question answering dataset, feb 2025. URL https://huggingface.co/datasets/hw-hwei/MedThoughts-8K.", + "[835] Haoran Wei, Youyang Yin, Yumeng Li, Jia Wang, Liang Zhao, Jianjian Sun, Zheng Ge, and Xiangyu Zhang. Slow perception: Let's perceive geometric figures step-by-step. arXiv preprint arXiv:2412.20631, 2024.", + "[836] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed Chi, Quoc V Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh, editors, Advances in Neural Information Processing Systems, volume 35, pages 24824-24837. Curran Associates, Inc., November 2022. URL https://proceedings.neurips.cc/paper_files/paper/2022/file/9d5609613524ecf4f15af0f7b31abca4-Paper-Conference.pdf.", + "[837] Shuyue Wei, Yongxin Tong, Zimu Zhou, Yi Xu, Jingkai Gao, Tongyu Wei, Tianran He, and Weifeng Lv. Federated reasoning llms: a survey. Frontiers of Computer Science, 19(12): 1-23, jun 2025.", + "[838] Ting-Ruen Wei, Haowei Liu, Xuyang Wu, and Yi Fang. A survey on feedback-based multi-step reasoning for large language models on mathematics. arXiv preprint arXiv:2502.14333, 2025.", + "[839] Yana Wei, Liang Zhao, Jianjian Sun, Kangheng Lin, Jisheng Yin, Jingcheng Hu, Yinmin Zhang, En Yu, Haoran Lv, Zejia Weng, et al. Open vision reasoner: Transferring linguistic cognitive behavior for visual reasoning. arXiv preprint arXiv:2507.05255, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 88 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 88 + }, + { + "type": "page_number", + "text": "89", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 88 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[840] Yongxian Wei, Anke Tang, Li Shen, Zixuan Hu, Chun Yuan, and Xiaochun Cao. Modeling multi-task model merging as adaptive projective gradient descent. arXiv preprint arXiv:2501.01230, 2025.", + "[841] Yuxiang Wei, Olivier Duchenne, Jade Copet, Quentin Carbonneaux, Lingming Zhang, Daniel Fried, Gabriel Synnaeve, Rishabh Singh, and Sida I. Wang. Swe-rl: Advancing llm reasoning via reinforcement learning on open software evolution. arXiv preprint arXiv:2502.18449, 2025.", + "[842] Nathaniel Weir, Muhammad Khalifa, Linlu Qiu, Orion Weller, and Peter Clark. Learning to reason via program generation, emulation, and search. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=te6VagJf6G.", + "[843] Sean Welleck, Amanda Bertsch, Matthew Finlayson, Hailey Schoelkopf, Alex Xie, Graham Neubig, Ilia Kulikov, and Zaid Harchaoui. From decoding to meta-generation: Inference-time algorithms for large language models. Transactions on Machine Learning Research, November 2024. ISSN 2835-8856. URL https://openreview.net/forum?id= eskQMcIbMS. Survey Certification.", + "[844] Cheng Wen, Tingwei Guo, Shuaijiang Zhao, Wei Zou, and Xiangang Li. Sari: Structured audio reasoning via curriculum-guided reinforcement learning. arXiv preprint arXiv:2504.15900, 2025.", + "[845] Jiaxin Wen, Jian Guan, Hongning Wang, Wei Wu, and Minlie Huang. Codeplan: Unlocking reasoning potential in large language models by scaling code-form planning. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=dCPF1wlqj8.", + "[846] Kaiyue Wen, Huaqing Zhang, Hongzhou Lin, and Jingzhao Zhang. From sparse dependence to sparse attention: Unveiling how chain-of-thought enhances transformer sample efficiency. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=AmEgWDhmTr.", + "[847] Xumeng Wen, Zihan Liu, Shun Zheng, Zhijian Xu, Shengyu Ye, Zhirong Wu, Xiao Liang, Yang Wang, Junjie Li, Ziming Miao, et al. Reinforcement learning with verifiable rewards implicitly incentivizes correct reasoning in base llms. arXiv preprint arXiv:2506.14245, 2025.", + "[848] Yixuan Weng, Minjun Zhu, Fei Xia, Bin Li, Shizhu He, Shengping Liu, Bin Sun, Kang Liu, and Jun Zhao. Large language models are better reasoners with self-verification. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 2550–2575, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.167. URL https://aclanthology.org/2023-findings-emnlp.167/.", + "[849] Jason Weston and Sainbayar Sukhbaatar. System 2 attention (is something you might need too). arXiv preprint arXiv:2311.11829, 2023.", + "[850] Colin White, Samuel Dooley, Manley Roberts, Arka Pal, Benjamin Feuer, Siddhartha Jain, Ravid Shwartz-Ziv, Neel Jain, Khalid Saifullah, Sreemanti Dey, Shubh-Agrawal, Sandeep Singh Sandha, Siddartha Venkat Naidu, Chinmay Hegde, Yann LeCun, Tom Goldstein, Willie Neiswanger, and Micah Goldblum. Livebench: A challenging, contamination-limited LLM benchmark. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=sKYHBTAxVa.", + "[851] Yotam Wolf, Binyamin Rothberg, Dorin Shteyman, and Amnon Shashua. Compositional hardness of code in large language models—a probabilistic perspective. arXiv preprint arXiv:2409.18028, 2024.", + "[852] Chengyue Wu, Yixiao Ge, Qiushan Guo, Jiahao Wang, Zhixuan Liang, Zeyu Lu, Ying Shan, and Ping Luo. Plot2code: A comprehensive benchmark for evaluating multi-modal large language models in code generation from scientific plots. arXiv preprint arXiv:2405.07990, 2024.", + "[853] Jinyang Wu, Mingkuan Feng, Shuai Zhang, Feihu Che, Zengqi Wen, and Jianhua Tao. Beyond examples: High-level automated reasoning paradigm in in-context learning via mcts. arXiv preprint arXiv:2411.18478, 2024." + ], + "bbox": [ + 181, + 90, + 825, + 912 + ], + "page_idx": 89 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 89 + }, + { + "type": "page_number", + "text": "90", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 89 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[854] Jinyang Wu, Mingkuan Feng, Shuai Zhang, Ruihan Jin, Feihu Che, Zengqi Wen, and Jianhua Tao. Boosting multimodal reasoning with mcts-automated structured thinking. arXiv preprint arXiv:2502.02339, 2025.", + "[855] Jinyang Wu, Chonghua Liao, Mingkuan Feng, Shuai Zhang, Zhengqi Wen, Pengpeng Shao, Huazhe Xu, and Jianhua Tao. Thought-augmented policy optimization: Bridging external guidance and internal capabilities. arXiv preprint arXiv:2505.15692, 2025.", + "[856] Junde Wu, Jiayuan Zhu, and Yuyuan Liu. Agentic reasoning: Reasoning llms with tools for the deep research. arXiv preprint arXiv:2502.04644, 2025.", + "[857] Qiong Wu, Xiangcong Yang, Yiyi Zhou, Chenxin Fang, Baiyang Song, Xiaoshuai Sun, and Rongrong Ji. Grounded chain-of-thought for multimodal large language models. arXiv preprint arXiv:2503.12799, 2025.", + "[858] Siwei Wu, Zhongyuan Peng, Xinrun Du, Tuney Zheng, Minghao Liu, Jialong Wu, Jiachen Ma, Yizhi Li, Jian Yang, Wangchunshu Zhou, et al. A comparative study on reasoning patterns of openai's o1 model. arXiv preprint arXiv:2410.13639, 2024.", + "[859] Siye Wu, Jian Xie, Yikai Zhang, Aili Chen, Kai Zhang, Yu Su, and Yanghua Xiao. Arm: Adaptive reasoning model. arXiv preprint arXiv:2505.20258, 2025.", + "[860] Tianhao Wu, Janice Lan, Weizhe Yuan, Jiantao Jiao, Jason Weston, and Sainbayar Sukhbaatar. Thinking llms: General instruction following with thought generation. arXiv preprint arXiv:2410.10630, 2024.", + "[861] Wenjie Wu, Yongcheng Jing, Yingjie Wang, Wenbin Hu, and Dacheng Tao. Graph-augmented reasoning: Evolving step-by-step knowledge graph retrieval for llm reasoning. arXiv preprint arXiv:2503.01642, 2025.", + "[862] Xiaobao Wu. Sailing by the stars: A survey on reward models and learning strategies for learning from rewards. arXiv preprint arXiv:2505.02686, 2025.", + "[863] Xiong Jun Wu, Zhenduo Zhang, ZuJie Wen, Zhiqiang Zhang, Wang Ren, Lei Shi, Cai Chen, Deng Zhao, Qing Wang, Xudong Han, et al. Sharp: Synthesizing high-quality aligned reasoning problems for large reasoning models reinforcement learning. arXiv preprint arXiv:2505.14147, 2025.", + "[864] Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models. arXiv preprint arXiv:2408.00724, January 2024.", + "[865] Yifan Wu, Jingze Shi, Bingheng Wu, Jiayi Zhang, Xiaotian Lin, Nan Tang, and Yuyu Luo. Concise reasoning, big gains: Pruning long reasoning trace with difficulty-aware prompting. arXiv preprint arXiv:2505.19716, 2025.", + "[866] Yong Wu, Weihang Pan, Ke Li, Chen Binhui, Ping Li, and Binbin Lin. Beyond templates: Dynamic adaptation of reasoning demonstrations via feasibility-aware exploration. arXiv preprint arXiv:2505.20700, 2025.", + "[867] Yuyang Wu, Yifei Wang, Tianqi Du, Stefanie Jegelka, and Yisen Wang. When more is less: Understanding chain-of-thought length in IIms. arXiv preprint arXiv:2502.07266, 2025.", + "[868] Zhenyu Wu, Qingkai Zeng, Zhihan Zhang, Zhaoxuan Tan, Chao Shen, and Meng Jiang. Enhancing mathematical reasoning in llms by stepwise correction. arXiv preprint arXiv:2410.12934, 2024.", + "[869] Zhenyu Wu, Qingkai Zeng, Zhihan Zhang, Zhaoxuan Tan, Chao Shen, and Meng Jiang. Large language models can self-correct with minimal effort. In AI for Math Workshop @ ICML 2024, May 2024. URL https://openreview.net/forum?id=mmZLMs413d.", + "[870] Zirui Wu, Xiao Liu, Jiayi Li, Lingpeng Kong, and Yansong Feng. Haste makes waste: Evaluating planning abilities of llms for efficient and feasible multitasking with time constraints between actions. arXiv preprint arXiv:2503.02238, 2025.", + "[871] Zongqian Wu, Tianyu Li, Jiaying Yang, Mengmeng Zhan, Xiaofeng Zhu, and Lei Feng. Is depth all you need? an exploration of iterative reasoning in llms. arXiv preprint arXiv:2502.10858, 2025.", + "[872] Zhiheng Xi, Wenxiang Chen, Boyang Hong, Senjie Jin, Rui Zheng, Wei He, Yiwen Ding, Shichun Liu, Xin Guo, Junzhe Wang, et al. Training large language models for reasoning through reverse curriculum reinforcement learning. arXiv preprint arXiv:2402.05808, 2024." + ], + "bbox": [ + 181, + 90, + 826, + 911 + ], + "page_idx": 90 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 90 + }, + { + "type": "page_number", + "text": "91", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 90 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[873] Zhiheng Xi, Dingwen Yang, Jixuan Huang, Jiafu Tang, Guanyu Li, Yiwen Ding, Wei He, Boyang Hong, Shihan Do, Wenyu Zhan, et al. Enhancing llm reasoning via critique models with test-time and training-time supervision. arXiv preprint arXiv:2411.16579, 2024.", + "[874] Zhiheng Xi, Guanyu Li, Yutao Fan, Honglin Guo, Yufang Liu, Xiaoran Fan, Jiaqi Liu, Jingchao Ding, Wangmeng Zuo, Zhenfei Yin, et al. Bmmr: A large-scale bilingual multimodal multi-discipline reasoning dataset. arXiv preprint arXiv:2507.03483, 2025.", + "[875] Fanzeng Xia, Yidong Luo, Tinko Sebastian Bartels, Yaqi Xu, and Tongxin Li. Rethinking the unsolvable: When in-context search meets test-time scaling. arXiv preprint arXiv:2505.22290, 2025.", + "[876] Heming Xia, Yongqi Li, Chak Tou Leong, Wenjie Wang, and Wenjie Li. Tokenskip: Controllable chain-of-thought compression in lms. arXiv preprint arXiv:2502.12067, 2025.", + "[877] Shijie Xia, Xuefeng Li, Yixin Liu, Tongshuang Wu, and Pengfei Liu. Evaluating mathematical reasoning beyond accuracy. arXiv preprint arXiv:2404.05692, 2024.", + "[878] Yunhui Xia, Wei Shen, Yan Wang, Jason Klein Liu, Huifeng Sun, Siyue Wu, Jian Hu, and Xiaolong Xu. Leetcodedataset: A temporal dataset for robust evaluation and efficient training of code llms. arXiv preprint arXiv:2504.14655, 2025.", + "[879] Kun Xiang, Zhili Liu, Zihao Jiang, Yunshuang Nie, Runhui Huang, Haoxiang Fan, Hanhui Li, Weiran Huang, Yihan Zeng, Jianhua Han, et al. Atomthink: A slow thinking framework for multimodal mathematical reasoning. arXiv preprint arXiv:2411.11930, 2024.", + "[880] Violet Xiang, Chase Blagden, Rafael Rafailov, Nathan Lile, Sang Truong, Chelsea Finn, and Nick Haber. Just enough thinking: Efficient reasoning with adaptive length penalties reinforcement learning. arXiv preprint arXiv:2506.05256, 2025.", + "[881] Violet Xiang, Charlie Snell, Kanishk Gandhi, Alon Albalak, Anikait Singh, Chase Blagden, Duy Phung, Rafael Rafailov, Nathan Lile, Dakota Mahan, et al. Towards system 2 reasoning in llms: Learning how to think with meta chain-of-though. arXiv preprint arXiv:2501.04682, 2025.", + "[882] Wenyi Xiao, Zechuan Wang, Leilei Gan, Shuai Zhao, Wanggui He, Luu Anh Tuan, Long Chen, Hao Jiang, Zhou Zhao, and Fei Wu. A comprehensive survey of direct preference optimization: Datasets, theories, variants, and applications. arXiv preprint arXiv:2410.15595, 2024.", + "[883] Chulin Xie, Yangsibo Huang, Chiyuan Zhang, Da Yu, Xinyun Chen, Bill Yuchen Lin, Bo Li, Badih Ghazi, and Ravi Kumar. On memorization of large language models in logical reasoning. arXiv preprint arXiv:2410.23123, 2024.", + "[884] Enze Xie, Junsong Chen, Yuyang Zhao, Jincheng Yu, Ligeng Zhu, Chengyue Wu, Yujun Lin, Zhekai Zhang, Muyang Li, Junyu Chen, et al. Sana 1.5: Efficient scaling of training-time and inference-time compute in linear diffusion transformer. arXiv preprint arXiv:2501.18427, 2025.", + "[885] Senwei Xie, Hongyu Wang, Zhanqi Xiao, Ruiping Wang, and Xilin Chen. Robotic programmer: Video instructed policy code generation for robotic manipulation. arXiv preprint arXiv:2501.04268, 2025.", + "[886] Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768, February 2025.", + "[887] Tianbao Xie, Danyang Zhang, Jixuan Chen, Xiaochuan Li, Siheng Zhao, Ruisheng Cao, Toh Jing Hua, Zhoujun Cheng, Dongchan Shin, Fangyu Lei, Yitao Liu, Yiheng Xu, Shuyan Zhou, Silvio Savarese, Caiming Xiong, Victor Zhong, and Tao Yu. OSWorld: Benchmarking multimodal agents for open-ended tasks in real computer environments. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=tN61DTr4Ed.", + "[888] Yuxi Xie, Kenji Kawaguchi, Yiran Zhao, Xu Zhao, Min-Yen Kan, Junxian He, and Qizhe Xie. Self-evaluation guided beam search for reasoning. In Thirty-seventh Conference on Neural Information Processing Systems, September 2023. URL https://openreview.net/forum?id=Bw82hwg5Q3." + ], + "bbox": [ + 181, + 90, + 826, + 911 + ], + "page_idx": 91 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 91 + }, + { + "type": "page_number", + "text": "92", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 91 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[889] Yuxi Xie, Anirudh Goyal, Wenyue Zheng, Min-Yen Kan, Timothy P Lillicrap, Kenji Kawaguchi, and Michael Shieh. Monte carlo tree search boosts reasoning via iterative preference learning. arXiv preprint arXiv:2405.00451, 2024.", + "[890] Zhifei Xie, Mingbao Lin, Zihang Liu, Pengcheng Wu, Shuicheng Yan, and Chunyan Miao. Audio-reasoner: Improving reasoning capability in large audio language models. arXiv preprint arXiv:2503.02318, 2025.", + "[891] Zhihui Xie, Liyu Chen, Weichao Mao, Jingjing Xu, Lingpeng Kong, et al. Teaching language models to critique via reinforcement learning. arXiv preprint arXiv:2502.03492, 2025.", + "[892] Siheng Xiong, Ali Payani, Yuan Yang, and Faramarz Fekri. Deliberate reasoning for llms as structure-aware planning with accurate world model. arXiv preprint arXiv:2410.03136, 2024.", + "[893] Wei Xiong, Chengshuai Shi, Jiaming Shen, Aviv Rosenberg, Zhen Qin, Daniele Calandriello, Misha Khalman, Rishabh Joshi, Bilal Piot, Mohammad Saleh, et al. Building math agents with multi-turn iterative preference learning. arXiv preprint arXiv:2409.02392, 2024.", + "[894] Wang Xiyao, Yang Zhengyuan, Li Linjie, Lu Hongjin, Xu Yuancheng, Lin Chung-Ching Lin, Lin Kevin, Huang Furong, and Wang Lijuan. Scaling inference-time search with vision value model for improved visual comprehension. arXiv preprint arXiv:2412.03704, 2024.", + "[895] Austin Xu, Yilun Zhou, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. J4r: Learning to judge with equivalent initial state group relative policy optimization. arXiv preprint arXiv:2505.13346, 2025.", + "[896] Bin Xu, Yiguan Lin, Yinghao Li, et al. Sra-mcts: Self-driven reasoning augmentation with monte carlo tree search for enhanced code generation. arXiv preprint arXiv:2411.11053, 2024.", + "[897] Fangzhi Xu, Qiushi Sun, Kanzhi Cheng, Jun Liu, Yu Qiao, and Zhiyong Wu. Interactive evolution: A neural-symbolic self-training framework for large language models. arXiv preprint arXiv:2406.11736, 2024.", + "[898] Fangzhi Xu, Hang Yan, Chang Ma, Haiteng Zhao, Qiushi Sun, Kanzhi Cheng, Junxian He, Jun Liu, and Zhiyong Wu. Genius: A generalizable and purely unsupervised self-training framework for advanced reasoning. arXiv preprint arXiv:2504.08672, 2025.", + "[899] Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, et al. Towards large reasoning models: A survey of reinforced reasoning with large language models. arXiv preprint arXiv:2501.09686, 2025.", + "[900] Guowei Xu, Peng Jin, Li Hao, Yibing Song, Lichao Sun, and Li Yuan. Llava-ol: Let vision language models reason step-by-step. arXiv preprint arXiv:2411.10440, 2024.", + "[901] Haotian Xu. No train still gain. unleash mathematical reasoning of large language models with monte carlo tree search guided by energy function. arXiv preprint arXiv:2309.03224, 2023.", + "[902] Haotian Xu, Xing Wu, Weinong Wang, Zhongzhi Li, Da Zheng, Boyuan Chen, Yi Hu, Shijia Kang, Jiaming Ji, Yingying Zhang, et al. Redstar: Does scaling long-cot data unlock better slow-reasoning systems? arXiv preprint arXiv:2501.11284, 2025.", + "[903] Huimin Xu, Xin Mao, Feng-Lin Li, Xiaobao Wu, Wang Chen, Wei Zhang, and Anh Tuan Luu. Full-step-dpo: Self-supervised preference optimization with step-wise rewards for mathematical reasoning. arXiv preprint arXiv:2502.14356, 2025.", + "[904] Jin Xu, Zhifang Guo, Jinzheng He, Hangrui Hu, Ting He, Shuai Bai, Keqin Chen, Jialin Wang, Yang Fan, Kai Dang, et al. Qwen2. 5-omni technical report. arXiv preprint arXiv:2503.20215, 2025.", + "[905] Pusheng Xu, Yue Wu, Kai Jin, Xiaolan Chen, Mingguang He, and Danli Shi. Deepseek-r1 outperforms gemini 2.0 pro, openai o1, and o3-mini in bilingual complex ophthalmology reasoning. arXiv preprint arXiv:2502.17947, 2025.", + "[906] Rongwu Xu, Xiaojian Li, Shuo Chen, and Wei Xu. \"nuclear deployed!\": Analyzing catastrophic risks in decision-making of autonomous llm agents. arXiv preprint arXiv:2502.11355, 2025." + ], + "bbox": [ + 181, + 90, + 826, + 910 + ], + "page_idx": 92 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 92 + }, + { + "type": "page_number", + "text": "93", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 92 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[907] Silei Xu, Wenhao Xie, Lingxiao Zhao, and Pengcheng He. Chain of draft: Thinking faster by writing less. arXiv preprint arXiv:2502.18600, 2025.", + "[908] Wenda Xu, Guanglei Zhu, Xuandong Zhao, Liangming Pan, Lei Li, and William Wang. Pride and prejudice: LLM amplifies self-bias in self-refinement. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 15474–15492, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.826. URL https://aclanthology.org/2024.acl-long.826/.", + "[909] Xiaoang Xu, Shuo Wang, Xu Han, Zhenghao Liu, Huijia Wu, Peipei Li, Zhiyuan Liu, Maosong Sun, and Zhaofeng He. A\\*thought: Efficient reasoning via bidirectional compression for low-resource settings. arXiv preprint arXiv:2505.24550, 2025.", + "[910] Xin Xu, Shizhe Diao, Can Yang, and Yang Wang. Can we verify step by step for incorrect answer detection? arXiv preprint arXiv:2402.10528, 2024.", + "[911] Yao Xu, Mingyu Xu, Fangyu Lei, Wangtao Sun, Xiangrong Zeng, Bingning Wang, Guang Liu, Shizhu He, Jun Zhao, and Kang Liu. Amplify adjacent token differences: Enhancing long chain-of-thought reasoning with shift-ffn. arXiv preprint arXiv:2505.17153, 2025.", + "[912] Yi Xu, Chengzhu Li, Han Zhou, Xingchen Wan, Caiqi Zhang, Anna Korhonen, and Ivan Vulić. Visual planning: Let's think only with images. In Workshop on Foundation Models Meet Embodied Agents at CVPR 2025, may 2025. URL https://openreview.net/forum?id=ELIt3v3S1J.", + "[913] Yige Xu, Xu Guo, Zhiwei Zeng, and Chunyan Miao. Softcot: Soft chain-of-thought for efficient reasoning with llms. arXiv preprint arXiv:2502.12134, 2025.", + "[914] Yige Xu, Xu Guo, Zhiwei Zeng, and Chunyan Miao. Softcot++: Test-time scaling with soft chain-of-thought reasoning. arXiv preprint arXiv:2505.11484, 2025.", + "[915] Zhangchen Xu, Fengqing Jiang, Luyao Niu, Yuntian Deng, Radha Poovendran, Yejin Choi, and Bill Yuchen Lin. Magpie: Alignment data synthesis from scratch by prompting aligned lms with nothing. arXiv preprint arXiv:2406.08464, 2024.", + "[916] Zhangchen Xu, Yang Liu, Yueqin Yin, Mingyuan Zhou, and Radha Poovendran. Kodcode: A diverse, challenging, and verifiable synthetic dataset for coding. February 2025.", + "[917] Jianhao Yan, Yafu Li, Zican Hu, Zhi Wang, Ganqu Cui, Xiaoye Qu, Yu Cheng, and Yue Zhang. Learning to reason under off-policy guidance. arXiv preprint arXiv:2504.14945, 2025.", + "[918] Kai Yan, Yufei Xu, Zhengyin Du, Xuesong Yao, Zheyu Wang, Xiaowen Guo, and Jiecao Chen. Recitation over reasoning: How cutting-edge language models can fail on elementary school-level reasoning problems? arXiv preprint arXiv:2504.00509, 2025.", + "[919] Ruin Yan, Zheng Liu, and Defu Lian. O1 embedder: Let retrievers think before action. arXiv preprint arXiv:2502.07555, 2025.", + "[920] Siming Yan, Min Bai, Weifeng Chen, Xiong Zhou, Qixing Huang, and Li Erran Li. Vigor: Improving visual grounding of large vision language models with fine-grained reward modeling. In European Conference on Computer Vision, pages 37-53. Springer, 2024.", + "[921] Yibo Yan, Jiamin Su, Jianxiang He, Fangteng Fu, Xu Zheng, Yuanhuiyi Lyu, Kun Wang, Shen Wang, Qingsong Wen, and Xuming Hu. A survey of mathematical reasoning in the era of multimodal large language model: Benchmark, method & challenges. arXiv preprint arXiv:2412.11936, 2024.", + "[922] Yibo Yan, Shen Wang, Jiahao Huo, Hang Li, Boyan Li, Jiamin Su, Xiong Gao, Yi-Fan Zhang, Tianlong Xu, Zhendong Chu, et al. Errorradar: Benchmarking complex mathematical reasoning of multimodal large language models via error detection. arXiv preprint arXiv:2410.04509, 2024.", + "[923] Yibo Yan, Shen Wang, Jiahao Huo, Jingheng Ye, Zhendong Chu, Xuming Hu, Philip S Yu, Carla Gomes, Bart Selman, and Qingsong Wen. Position: Multimodal large language models can significantly advance scientific reasoning. arXiv preprint arXiv:2502.02871, 2025.", + "[924] Yuchen Yan, Jin Jiang, Yang Liu, Yixin Cao, Xin Xu, Xunliang Cai, Jian Shao, et al. S $^{3}$ c-math: Spontaneous step-level self-correction makes large language models better mathematical reasoners. arXiv preprint arXiv:2409.01524, 2024." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 93 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 93 + }, + { + "type": "page_number", + "text": "94", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 93 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[925] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024.", + "[926] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024.", + "[927] An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2.5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024.", + "[928] Cehao Yang, Xueyuan Lin, Chengjin Xu, Xuhui Jiang, Xiaojun Wu, Honghao Liu, Hui Xiong, and Jian Guo. Select2reason: Efficient instruction-tuning data selection for long-cot reasoning. arXiv preprint arXiv:2505.17266, 2025.", + "[929] Chen Yang, Chenyang Zhao, Quanquan Gu, and Dongruo Zhou. Cops: Empowering llm agents with provable cross-task experience sharing. arXiv preprint arXiv:2410.16670, 2024.", + "[930] Cheng Yang, Chufan Shi, Siheng Li, Bo Shui, Yujiu Yang, and Wai Lam. Llm2: Let large language models harness system 2 reasoning. arXiv preprint arXiv:2412.20372, 2024.", + "[931] Cheng Yang, Chufan Shi, Yaxin Liu, Bo Shui, Junjie Wang, Mohan Jing, Linran Xu, Xinyu Zhu, Siheng Li, Yuxiang Zhang, Gongye Liu, Xiaomei Nie, Deng Cai, and Yujiu Yang. Chartmimic: Evaluating LMM's cross-modal reasoning capability via chart-to-code generation. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=sGpCzsfd1K.", + "[932] Kailai Yang, Zhiwei Liu, Qianqian Xie, Jimin Huang, Erxue Min, and Sophia Ananiadou. Selective preference optimization via token-level reward function estimation. arXiv preprint arXiv:2408.13518, 2024.", + "[933] Kaiyu Yang, Gabriel Poesia, Jingxuan He, Wenda Li, Kristin Lauter, Swarat Chaudhuri, and Dawn Song. Formal mathematical reasoning: A new frontier in ai. arXiv preprint arXiv:2412.16075, 2024.", + "[934] Lei Yang, Renren Jin, Ling Shi, Jianxiang Peng, Yue Chen, and Deyi Xiong. Probench: Benchmarking large language models in competitive programming. arXiv preprint arXiv:2502.20868, 2025.", + "[935] Ling Yang, Zhaochen Yu, Bin Cui, and Mengdi Wang. Reasonflux: Hierarchical llm reasoning via scaling thought templates. arXiv preprint arXiv:2502.06772, 2025.", + "[936] Ruihan Yang, Fanghua Ye, Jian Li, Siyu Yuan, Yikai Zhang, Zhaopeng Tu, Xiaolong Li, and Deqing Yang. The lighthouse of language: Enhancing llm agents via critique-guided improvement. arXiv preprint arXiv:2503.16024, 2025.", + "[937] Sherry Yang, Dale Schuurmans, Pieter Abbeel, and Ofir Nachum. Chain of thought imitation with procedure cloning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho, editors, Advances in Neural Information Processing Systems, November 2022. URL https://openreview.net/forum?id=ZJqqSa8FsH9.", + "[938] Shiming Yang, Yuxuan Tong, Xinyao Niu, Graham Neubig, and Xiang Yue. Demystifying long chain-of-thought reasoning. In *Forty-second International Conference on Machine Learning*, may 2025. URL https://openreview.net/forum?id=OLodUbcWjb.", + "[939] Shu Yang, Junchao Wu, Xin Chen, Yunze Xiao, Xinyi Yang, Derek F. Wong, and Di Wang. Understanding aha moments: from external observations to internal mechanisms. arXiv preprint arXiv:2504.02956, 2025.", + "[940] Shu Yang, Junchao Wu, Xuansheng Wu, Derek Wong, Ninhao Liu, and Di Wang. Is long-to-short a free lunch? investigating inconsistency and reasoning efficiency in Irms. arXiv preprint arXiv:2506.19492, 2025.", + "[941] Sohee Yang, Elena Gribovskaya, Nora Kassner, Mor Geva, and Sebastian Riedel. Do large language models latently perform multi-hop reasoning? In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 10210–10229, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.550. URL https://aclanthology.org/2024.acl-long.550/." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 94 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 94 + }, + { + "type": "page_number", + "text": "95", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 94 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[942] Wang Yang, Hongye Jin, Jingfeng Yang, Vipin Chaudhary, and Xiaotian Han. Thinking preference optimization. arXiv preprint arXiv:2502.13173, 2025.", + "[943] Wenkai Yang, Shuming Ma, Yankai Lin, and Furu Wei. Towards thinking-optimal scaling of test-time compute for lIm reasoning. arXiv preprint arXiv:2502.18080, 2025.", + "[944] Xiao-Wen Yang, Xuan-Yi Zhu, Wen-Da Wei, Ding-Chu Zhang, Jie-Jing Shao, Zhi Zhou, Lan-Zhe Guo, and Yu-Feng Li. Step back to leap forward: Self-backtracking for boosting reasoning of language models. arXiv preprint arXiv:2502.04404, 2025.", + "[945] Yang Yang, Xiaolu Zhou, Bosong Ding, and Miao Xin. Uncertainty-aware reward design process. arXiv preprint arXiv:2507.02256, 2025.", + "[946] Yifei Yang, Zouying Cao, Qiguang Chen, Libo Qin, Dongjie Yang, Hai Zhao, and Zhi Chen. Kvsharer: Efficient inference via layer-wise dissimilar kv cache sharing. arXiv preprint arXiv:2410.18517, 2024.", + "[947] Yue Yang, MingKang Chen, Qihua Liu, Mengkang Hu, Qiguang Chen, Gengrui Zhang, Shuyue Hu, Guangtao Zhai, Yu Qiao, Yu Wang, et al. Truly assessing fluid intelligence of large language models through dynamic reasoning evaluation. arXiv preprint arXiv:2506.02648, 2025.", + "[948] Yuqing Yang, Yan Ma, and Pengfei Liu. Weak-to-strong reasoning. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 8350-8367, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.490. URL https://aclanthology.org/2024 findings-emnlp.490/.", + "[949] Zeyuan Yang, Xueyang Yu, Delin Chen, Maohao Shen, and Chuang Gan. Machine mental imagery: Empower multimodal reasoning with latent visual tokens. arXiv preprint arXiv:2506.17218, 2025.", + "[950] Zhe Yang, Yichang Zhang, Yudong Wang, Ziyao Xu, Junyang Lin, and Zhifang Sui. Confidence vs critique: A decomposition of self-correction capability for llms. arXiv preprint arXiv:2412.19513, 2024.", + "[951] Zonghan Yang, Peng Li, Ming Yan, Ji Zhang, Fei Huang, and Yang Liu. React meets actre: Autonomous annotation of agent trajectories for contrastive self-training. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=0VLBwQGWpA.", + "[952] Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, et al. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. arXiv preprint arXiv:2412.18319, 2024.", + "[953] Huanjin Yao, Jiaxing Huang, Yawen Qiu, Michael K Chen, Wenzheng Liu, Wei Zhang, Wenjie Zeng, Xikun Zhang, Jingyi Zhang, Yuxin Song, et al. Mmreason: An open-ended multi-modal multi-step reasoning benchmark for mllms toward agi. arXiv preprint arXiv:2506.23563, 2025.", + "[954] Shunyu Yao, Howard Chen, John Yang, and Karthik R Narasimhan. Webshop: Towards scalable real-world web interaction with grounded language agents. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho, editors, Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=R9KnuFlvnU.", + "[955] Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Tom Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 11809-11822. Curran Associates, Inc., September 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/271db9922b8d1f4dd7aaef84ed5ac703-Paper-Conference.pdf.", + "[956] Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik R Narasimhan, and Yuan Cao. React: Synergizing reasoning and acting in language models. In The Eleventh International Conference on Learning Representations, February 2023. URL https://openreview.net/forum?id=WE_vluYUL-X." + ], + "bbox": [ + 181, + 90, + 826, + 912 + ], + "page_idx": 95 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 95 + }, + { + "type": "page_number", + "text": "96", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 95 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[957] Xinhao Yao, Ruifeng Ren, Yun Liao, and Yong Liu. Unveiling the mechanisms of explicit cot training: How chain-of-thought enhances reasoning generalization. arXiv preprint arXiv:2502.04667, 2025.", + "[958] Yang Yao, Xuan Tong, Ruofan Wang, Yixu Wang, Lujundong Li, Liang Liu, Yan Teng, and Yingchun Wang. A mousetrap: Fooling large reasoning models for jailbreak with chain of iterative chaos. arXiv preprint arXiv:2502.15806, 2025.", + "[959] Wang Yaoting, Wu Shengqiong, Zhang Yuechen, Yan Shuicheng, Liu Ziwei, Luo Jiebo, and Fei Hao. Multimodal chain-of-thought reasoning: A comprehensive survey. arXiv preprint arXiv:2503.12605, 2025.", + "[960] Michihiro Yasunaga, Luke Zettlemoyer, and Marjan Ghazvininejad. Multimodal reward-bench: Holistic evaluation of reward models for vision language models. arXiv preprint arXiv:2502.14191, 2025.", + "[961] Nicolas Yax, Hernán Anló, and Stefano Palminteri. Studying and improving reasoning in humans and machines. Communications Psychology, 2(1):51, 2024.", + "[962] Guanghao Ye, Khiem Duc Pham, Xinzhi Zhang, Sivakanth Gopi, Baolin Peng, Beibin Li, Janardhan Kulkarni, and Huseyin A Inan. On the emergence of thinking in llms i: Searching for the right intuition. arXiv preprint arXiv:2502.06773, 2025.", + "[963] Jiaran Ye, Zijun Yao, Zhidian Huang, Liangming Pan, Jinxin Liu, Yushi Bai, Amy Xin, Liu Weichuan, Xiaoyin Che, Lei Hou, et al. How does transformer learn implicit reasoning? arXiv preprint arXiv:2505.23653, 2025.", + "[964] Rui Ye, Shuo Tang, Rui Ge, Yaxin Du, Zhenfei Yin, Jing Shao, and Siheng Chen. MAS-GPT: Training LLMs to build LLM-based multi-agent systems. In Workshop on Reasoning and Planning for Large Language Models, March 2025. URL https://openreview.net/forum?id=TqHoQIlumy.", + "[965] Tian Ye, Zicheng Xu, Yuanzhi Li, and Zeyuan Allen-Zhu. Physics of language models: Part 2.2, how to learn from mistakes on grade-school math problems. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=zpDGwcmMV4.", + "[966] Xinwu Ye, Chengfan Li, Siming Chen, Xiangru Tang, and Wei Wei. Mmscibench: Benchmarking language models on multimodal scientific problems. arXiv preprint arXiv:2503.01891, 2025.", + "[967] Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning. arXiv preprint arXiv:2502.03387, 2025.", + "[968] Zihuiwen Ye, Fraser Greenlee-Scott, Max Bartolo, Phil Blunsom, Jon Ander Campos, and Matthias Galle. Improving reward models with synthetic critiques. arXiv preprint arXiv:2405.20850, 2024.", + "[969] Zihuiwen Ye, Luckeciano Carvalho Melo, Younesse Kaddar, Phil Blunsom, Sam Staton, and Yarin Gal. Uncertainty-aware step-wise verification with generative reward models. arXiv preprint arXiv:2502.11250, 2025.", + "[970] Hao Yi, Qingyang Li, Yulan Hu, Fuzheng Zhang, Di Zhang, and Yong Liu. Sppd: Self-training with process preference learning using dynamic value margin. arXiv preprint arXiv:2502.13516, 2025.", + "[971] Jingyang Yi, Jiazheng Wang, and Sida Li. Shorterbetter: Guiding reasoning models to find optimal inference length for efficient reasoning. arXiv preprint arXiv:2504.21370, 2025.", + "[972] Qiyue Yin, Pei Xu, Qiaozhe Li, Shengda Liu, Shengqi Shen, Tong Wang, Yihong Han, Xiaonan Zhao, Likun Yang, Shiyue Cao, et al. Wgsr-bench: Wargame-based game-theoretic strategic reasoning benchmark for large language models. arXiv preprint arXiv:2506.10264, 2025.", + "[973] Zhangyue Yin, Qiushi Sun, Qipeng Guo, Zhiyuan Zeng, Xiaonan Li, Junqi Dai, Qinyuan Cheng, Xuanjing Huang, and Xipeng Qiu. Reasoning in flux: Enhancing large language models reasoning through uncertainty-aware adaptive guidance. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association" + ], + "bbox": [ + 181, + 90, + 825, + 912 + ], + "page_idx": 96 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 96 + }, + { + "type": "page_number", + "text": "97", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 96 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "for Computational Linguistics (Volume 1: Long Papers), pages 2401-2416, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.131. URL https://aclanthology.org/2024.acl-long.131/.", + "[974] Huaiyuan Ying, Shuo Zhang, Linyang Li, Zhejian Zhou, Yunfan Shao, Zhaoye Fei, Yichuan Ma, Jiawei Hong, Kuikun Liu, Ziyi Wang, et al. Internl m - Math: Open math large language models toward verifiable reasoning. arXiv preprint arXiv:2402.06332, 2024.", + "[975] Eunseop Yoon, Hee Suk Yoon, SooHwan Eom, Gunsoo Han, Daniel Wontae Nam, Daejin Jo, Kyoung-Woon On, Mark A Hasegawa-Johnson, Sungwoong Kim, and Chang D Yoo. Tlcr: Token-level continuous reward for fine-grained reinforcement learning from human feedback. arXiv preprint arXiv:2407.16574, 2024.", + "[976] Jaesik Yoon, Hyeonseo Cho, Doojin Baek, Yoshua Bengio, and Sungjin Ahn. Monte carlo tree diffusion for system 2 planning. arXiv preprint arXiv:2502.07202, 2025.", + "[977] Bin Yu, Hang Yuan, Haotian Li, Xueyin Xu, Yuliang Wei, Bailing Wang, Weizhen Qi, and Kai Chen. Long-short chain-of-thought mixture supervised fine-tuning eliciting efficient reasoning in large language models. arXiv preprint arXiv:2505.03469, 2025.", + "[978] Dian Yu, Baolin Peng, Ye Tian, Linfeng Song, Haitao Mi, and Dong Yu. Siam: Self-improving code-assisted mathematical reasoning of large language models. arXiv preprint arXiv:2408.15565, 2024.", + "[979] Fei Yu, Anningzhe Gao, and Benyou Wang. OVM, outcome-supervised value models for planning in mathematical reasoning. In Kevin Duh, Helena Gomez, and Steven Bethard, editors, Findings of the Association for Computational Linguistics: NAACL 2024, pages 858-875, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-naacl.55. URL https://aclanthology.org/2024.findings-naacl.55/.", + "[980] Fei Yu, Hongbo Zhang, Prayag Tiwari, and Benyou Wang. Natural language reasoning, a survey. ACM Comput. Surv., 56(12), October 2024. ISSN 0360-0300. doi: 10.1145/3664194. URL https://doi.org/10.1145/3664194.", + "[981] Fei Yu, Yingru Li, and Benyou Wang. Uncertainty-aware search and value models: Mitigating search scaling flaws in llms. arXiv preprint arXiv:2502.11155, 2025.", + "[982] Hongli Yu, Tinghong Chen, Jiangtao Feng, Jiangjie Chen, Weinan Dai, Qiying Yu, YaQin Zhang, Wei-Ying Ma, Jingjing Liu, Mingxuan Wang, et al. Memagent: Reshaping long-context llm with multi-conv rl-based memory agent. arXiv preprint arXiv:2507.02259, 2025.", + "[983] Longhui Yu, Weisen Jiang, Han Shi, Jincheng YU, Zhengying Liu, Yu Zhang, James Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. Metamath: Bootstrap your own mathematical questions for large language models. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=N8N0hgNDRt.", + "[984] Ping Yu, Jing Xu, Jason Weston, and Ilia Kulikov. Distilling system 2 into system 1. arXiv preprint arXiv:2407.06023, 2024.", + "[985] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025.", + "[986] Tianyu Yu, Bo Ji, Shouli Wang, Shu Yao, Zefan Wang, Ganqu Cui, Lifan Yuan, Ning Ding, Yuan Yao, Zhiyuan Liu, et al. Rlpr: Extrapolating rlvr to general domains without verifiers. arXiv preprint arXiv:2506.18254, 2025.", + "[987] Tong Yu, Yongcheng Jing, Xikun Zhang, Wentao Jiang, Wenjie Wu, Yingjie Wang, Wenbin Hu, Bo Du, and Dacheng Tao. Benchmarking reasoning robustness in large language models. arXiv preprint arXiv:2503.04550, 2025.", + "[988] Xiao Yu, Baolin Peng, Vineeth Vajipey, Hao Cheng, Michel Galley, Jianfeng Gao, and Zhou Yu. ExACT: Teaching AI agents to explore with reflective-MCTS and exploratory learning. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=GBIUbwW9D8." + ], + "bbox": [ + 181, + 90, + 826, + 911 + ], + "page_idx": 97 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 97 + }, + { + "type": "page_number", + "text": "98", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 97 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[989] Yahan Yu, Yuyang Dong, and Masafumi Oyamada. Learning deliberately, acting intuitively: Unlocking test-time reasoning in multimodal llms. arXiv preprint arXiv:2507.06999, 2025.", + "[990] Yiyao Yu, Yuxiang Zhang, Dongdong Zhang, Xiao Liang, Hengyuan Zhang, Xingxing Zhang, Ziyi Yang, Mahmoud Khademi, Hany Awadalla, Junjie Wang, et al. Chain-of-reasoning: Towards unified mathematical reasoning in large language models via a multi-paradigm perspective. arXiv preprint arXiv:2501.11110, 2025.", + "[991] Yue Yu, Zhengxing Chen, Aston Zhang, Liang Tan, Chenguang Zhu, Richard Yuanzhe Pang, Yundi Qian, Xuewei Wang, Suchin Gururangan, Chao Zhang, et al. Self-generated critiques boost reward modeling for language models. arXiv preprint arXiv:2411.16646, 2024.", + "[992] Zeping Yu, Yonatan Belinkov, and Sophia Ananiadou. Back attention: Understanding and enhancing multi-hop reasoning in large language models. arXiv preprint arXiv:2502.10835, 2025.", + "[993] Zhaojian Yu, Yilun Zhao, Arman Cohan, and Xiao-Ping Zhang. Humaneval pro and mbpp pro: Evaluating large language models on self-invoking code generation. arXiv preprint arXiv:2412.21199, 2024.", + "[994] Zhaojian Yu, Yinghao Wu, Yilun Zhao, Arman Cohan, and Xiao-Ping Zhang. Z1: Efficient test-time scaling with code. arXiv preprint arXiv:2504.00810, 2025.", + "[995] Zhouliang Yu, Yuhuan Yuan, Tim Z Xiao, Fuxiang Frank Xia, Jie Fu, Ge Zhang, Ge Lin, and Weiyang Liu. Generating symbolic world models via test-time scaling of large language models. arXiv preprint arXiv:2502.04728, 2025.", + "[996] Zhuohao Yu, Weizheng Gu, Yidong Wang, Zhengran Zeng, Jindong Wang, Wei Ye, and Shikun Zhang. Outcome-refining process supervision for code generation. arXiv preprint arXiv:2412.15118, 2024.", + "[997] Zishun Yu, Tengyu Xu, Di Jin, Karthik Abinav Sankararaman, Yun He, Wenxuan Zhou, Zhouhao Zeng, Eryk Helenowski, Chen Zhu, Sinong Wang, et al. Think smarter not harder: Adaptive reasoning with inference aware optimization. arXiv preprint arXiv:2501.17974, 2025.", + "[998] Hang Yuan, Bin Yu, Haotian Li, Shijun Yang, Christina Dan Wang, Zhou Yu, Xueyin Xu, Weizhen Qi, and Kai Chen. Not all tokens are what you need in thinking. arXiv preprint arXiv:2505.17827, 2025.", + "[999] Jiahao Yuan, Dehui Du, Hao Zhang, Zixiang Di, and Usman Naseem. Reversal of thought: Enhancing large language models with preference-guided reverse reasoning warm-up. arXiv preprint arXiv:2410.12323, 2024.", + "[1000] Lifan Yuan, Wendi Li, Huayu Chen, Ganqu Cui, Ning Ding, Kaiyan Zhang, Bowen Zhou, Zhiyuan Liu, and Hao Peng. Free process rewards without process labels. arXiv preprint arXiv:2412.01981, 2024.", + "[1001] Lifan Yuan, Ganqu Cui, Hanbin Wang, Ning Ding, Xingyao Wang, Boji Shan, Zeyuan Liu, Jia Deng, Huimin Chen, Ruobing Xie, Yankai Lin, Zhenghao Liu, Bowen Zhou, Hao Peng, Zhiyuan Liu, and Maosong Sun. Advancing LLM reasoning generalists with preference trees. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=2ea5TNVR0c.", + "[1002] Michelle Yuan, Elman Mansimov, Katerina Margatina, Anurag Pratik, Daniele Bonadiman, Monica Sunkara, Yi Zhang, Yassine Benajiba, et al. A study on leveraging search and self-feedback for agent reasoning. arXiv preprint arXiv:2502.12094, 2025.", + "[1003] Siyu Yuan, Zehui Chen, Zhiheng Xi, Junjie Ye, Zhengyin Du, and Jiecao Chen. Agentr: Training language model agents to reflect via iterative self-training. arXiv preprint arXiv:2501.11425, 2025.", + "[1004] Weizhe Yuan, Jane Yu, Song Jiang, Karthik Padthe, Yang Li, Dong Wang, Ilia Kulikov, Kyunghyun Cho, Yuandong Tian, Jason E Weston, and Xian Li. Naturalreasoning: Reasoning in the wild with 2.8m challenging questions, 2025.", + "[1005] Yige Yuan, Teng Xiao, Shuchang Tao, Xue Wang, Jinyang Gao, Bolin Ding, and Bingbing Xu. Incentivizing reasoning from weak supervision. arXiv preprint arXiv:2505.20072, 2025." + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 98 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 98 + }, + { + "type": "page_number", + "text": "99", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 98 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1006] Xiang Yue, Xingwei Qu, Ge Zhang, Yao Fu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mammoth: Building math generalist models through hybrid instruction tuning. arXiv preprint arXiv:2309.05653, 2023.", + "[1007] Xiang Yue, Tianyu Zheng, Ge Zhang, and Wenhu Chen. Mammoth2: Scaling instructions from the web. Advances in Neural Information Processing Systems, 37:90629-90660, 2025. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/a4ca07aa108036f80cbb5b82285fd4b1-Paper-Conference.pdf.", + "[1008] Zhenrui Yue, Bowen Jin, Huimin Zeng, Honglei Zhuang, Zhen Qin, Jinsung Yoon, Lanyu Shang, Jiawei Han, and Dong Wang. Hybrid latent reasoning via reinforcement learning. arXiv preprint arXiv:2505.18454, 2025.", + "[1009] Mert Yuksekgonul, Federico Bianchi, Joseph Boen, Sheng Liu, Pan Lu, Zhi Huang, Carlos Guestrin, and James Zou. Optimizing generative ai by backpropagating language model feedback. Nature, 639(8055):609-616, March 2025. URL https://www.nature.com/articles/s41586-025-08661-4.", + "[1010] YuYue, Yufeng Yuan, Qiying Yu, Xiaochen Zuo, Ruofei Zhu, Wenyuan Xu, Jiaze Chen, Chengyi Wang, TianTian Fan, Zhengyin Du, Xiangpeng Wei, Gaohong Liu, Juncai Liu, Lingjun Liu, Haibin Lin, Zhiqi Lin, Bole Ma, Chi Zhang, Mofan Zhang, Wang Zhang, Hang Zhu, Ru Zhang, Xin Liu, Mingxuan Wang, Yonghui Wu, and Lin Yan. Vapo: Efficient and reliable reinforcement learning for advanced reasoning tasks. arXiv preprint arXiv:2504.05118, 2025.", + "[1011] Yuhang Zang, Xiaoyi Dong, Pan Zhang, Yuhang Cao, Ziyu Liu, Shengyuan Ding, Shenxi Wu, Yubo Ma, Haodong Duan, Wenwei Zhang, et al. Internlm-xcomposer2.5-reward: A simple yet effective multi-modal reward model. arXiv preprint arXiv:2501.12368, 2025.", + "[1012] Eric Zelikman, Yuhuai Wu, Jesse Mu, and Noah Goodman. Star: Bootstrapping reasoning with reasoning. Advances in Neural Information Processing Systems, 35:15476-15488, November 2022. URL https://openreview.net/pdf?id=3ELRdg2sqI.", + "[1013] Eric Zelikman, Georges Harik, Yijia Shao, Varuna Jayasiri, Nick Haber, and Noah D Goodman. Quiet-star: Language models can teach themselves to think before speaking. arXiv preprint arXiv:2403.09629, 2024.", + "[1014] Huaye Zeng, Dongfu Jiang, Haozhe Wang, Ping Nie, Xiaotong Chen, and Wenhu Chen. Acecoder: Acing coder rl via automated test-case synthesis. arXiv preprint arXiv:2502.01718, 2025.", + "[1015] Thomas Zeng, Shuibai Zhang, Shutong Wu, Christian Classen, Daewon Chae, Ethan Ewer, Minjae Lee, Heeju Kim, Wonjun Kang, Jackson Kunde, et al. Versaprm: Multi-domain process reward model via synthetic reasoning data. arXiv preprint arXiv:2502.06737, 2025.", + "[1016] Weihao Zeng, Yuzhen Huang, Lulu Zhao, Yijun Wang, Zifei Shan, and Junxian He. B-star: Monitoring and balancing exploration and exploitation in self-taught reasoners. arXiv preprint arXiv:2412.17256, 2024.", + "[1017] Weihao Zeng, Yuzhen Huang, Qian Liu, Wei Liu, Keqing He, Zejun Ma, and Junxian He. Simplerl-zoo: Investigating and taming zero reinforcement learning for open base models in the wild, 2025.", + "[1018] Yongcheng Zeng, Xinyu Cui, Xuanfa Jin, Guoqing Liu, Zexu Sun, Quan He, Dong Li, Ning Yang, Jianye Hao, Haifeng Zhang, et al. Aries: Stimulating self-refinement of large language models by iterative preference optimization. arXiv preprint arXiv:2502.05605, 2025.", + "[1019] Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Bo Wang, Shimin Li, Yunhua Zhou, Qipeng Guo, Xuanjing Huang, and Xipeng Qiu. Scaling of search and learning: A roadmap to reproduce o1 from reinforcement learning perspective. arXiv preprint arXiv:2412.14135, 2024.", + "[1020] Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Yunhua Zhou, and Xipeng Qiu. Revisiting the test-time scaling of o1-like models: Do they truly possess test-time scaling capabilities? arXiv preprint arXiv:2502.12215, 2025.", + "[1021] Zhongshen Zeng, Yinhong Liu, Yingjia Wan, Jingyao Li, Pengguang Chen, Jianbo Dai, Yuxuan Yao, Rongwu Xu, Zehan Qi, Wanru Zhao, Linling Shen, Jianqiao Lu, Haochen Tan, Yukang Chen, Hao Zhang, Zhan Shi, Bailin Wang, Zhijiang Guo, and Jiaya Jia. MR-ben:" + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 99 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 99 + }, + { + "type": "page_number", + "text": "100", + "bbox": [ + 485, + 934, + 511, + 946 + ], + "page_idx": 99 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "A meta-reasoning benchmark for evaluating system-2 thinking in LLMs. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, June 2024. URL https://openreview.net/forum?id=GN2qbxZ1ni.", + "[1022] Zihao Zeng, Xuyao Huang, Boxiu Li, and Zhijie Deng. Sift: Grounding llm reasoning in contexts via stickers. arXiv preprint arXiv:2502.14922, 2025.", + "[1023] Yuexiang Zhai, Hao Bai, Zipeng Lin, Jiayi Pan, Shengbang Tong, Yifei Zhou, Alane Suhr, Saining Xie, Yann LeCun, Yi Ma, and Sergey Levine. Fine-tuning large vision-language models as decision-making agents via reinforcement learning. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=nBjmMF2IZU.", + "[1024] Zaifu Zhan, Shuang Zhou, Huixue Zhou, Jiawen Deng, Yu Hou, Jeremy Yeung, and Rui Zhang. An evaluation of deepseek models in biomedical natural language processing. arXiv preprint arXiv:2503.00624, 2025.", + "[1025] Alexander Zhang, Marcus Dong, Jiaheng Liu, Wei Zhang, Yejie Wang, Jian Yang, Ge Zhang, Tianyu Liu, Zhongyuan Peng, Yingshui Tan, et al. Codecriticbench: A holistic code critique benchmark for large language models. arXiv preprint arXiv:2502.16614, 2025.", + "[1026] Beichen Zhang, Yuhong Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Haodong Duan, Yuhang Cao, Dahua Lin, and Jiaqi Wang. Booststep: Boosting mathematical capability of large language models via improved single-step reasoning. arXiv preprint arXiv:2501.03226, 2025.", + "[1027] Bohan Zhang, Xiaokang Zhang, Jing Zhang, Jifan Yu, Sijia Luo, and Jie Tang. Cot-based synthesizer: Enhancing llm performance through answer synthesis. arXiv preprint arXiv:2501.01668, 2025.", + "[1028] Che Zhang, Zhenyang Xiao, Chengcheng Han, Yixin Lian, and Yuejian Fang. Learning to check: Unleashing potentials for self-correction in large language models. arXiv preprint arXiv:2402.13035, 2024.", + "[1029] Chi Zhang, Jiajun Song, Siyu Li, Yitao Liang, Yuxi Ma, Wei Wang, Yixin Zhu, and Song-Chun Zhu. Proposing and solving olympiad geometry with guided tree search. arXiv preprint arXiv:2412.10673, 2024.", + "[1030] Chunhui Zhang, Zhongyu Ouyang, Kwonjoon Lee, Nakul Agarwal, Sean Dae Houlihan, Soroush Vosoughi, and Shao-Yuan Lo. Overcoming multi-step complexity in multimodal theory-of-mind reasoning: A scalable bayesian planner. In *Forty-second International Conference on Machine Learning*, 2025. URL https://openreview.net/forum?id=2dz6psiiA0.", + "[1031] Dalong Zhang, Jun Xu, Jun Zhou, Lei Liang, Lin Yuan, Ling Zhong, Mengshu Sun, Peilong Zhao, QiWei Wang, Xiaorui Wang, et al. Kag-thinker: Teaching large language models to think with human-like reasoning process. arXiv preprint arXiv:2506.17728, 2025.", + "[1032] Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. ReST-MCTS*: LLM self-training via process reward guided tree search. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=8rcFOqEud5.", + "[1033] Di Zhang, Xiaoshui Huang, Dongzhan Zhou, Yuqiang Li, and Wanli Ouyang. Accessing gpt-4 level mathematical olympiad solutions via monte carlo tree self-refine with llama-3 8b. arXiv preprint arXiv:2406.07394, 2024.", + "[1034] Di Zhang, Jianbo Wu, Jingdi Lei, Tong Che, Jiatong Li, Tong Xie, Xiaoshui Huang, Shufei Zhang, Marco Pavone, Yuqiang Li, et al. Llama-berry: Pairwise optimization for o1-like olympiad-level mathematical reasoning. arXiv preprint arXiv:2410.02884, 2024.", + "[1035] Fengji Zhang, Linquan Wu, Huiyu Bai, Guancheng Lin, Xiao Li, Xiao Yu, Yue Wang, Bei Chen, and Jacky Keung. Humaneval-v: Evaluating visual understanding and reasoning abilities of large multimodal models through coding tasks. arXiv preprint arXiv:2410.12381, 2024.", + "[1036] Hanning Zhang, Pengcheng Wang, Shizhe Diao, Yong Lin, Rui Pan, Hanze Dong, Dylan Zhang, Pavlo Molchanov, and Tong Zhang. Entropy-regularized process reward model. arXiv preprint arXiv:2412.11006, 2024." + ], + "bbox": [ + 171, + 90, + 826, + 912 + ], + "page_idx": 100 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 42, + 308, + 71 + ], + "page_idx": 100 + }, + { + "type": "page_number", + "text": "101", + "bbox": [ + 485, + 935, + 509, + 946 + ], + "page_idx": 100 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1037] Haoyue Zhang, Hualei Zhang, Xiaosong Ma, Jie Zhang, and Song Guo. Lazyeviction: Lagged kv eviction with attention pattern observation for efficient long reasoning. arXiv preprint arXiv:2506.15969, 2025.", + "[1038] Hongbo Zhang, Han Cui, Guangsheng Bao, Linyi Yang, Jun Wang, and Yue Zhang. Direct value optimization: Improving chain-of-thought reasoning in llms with refined values. arXiv preprint arXiv:2502.13723, 2025.", + "[1039] Jiayi Zhang, Jinyu Xiang, Zhaoyang Yu, Fengwei Teng, Xionghui Chen, Jiaqi Chen, Mingchen Zhuge, Xin Cheng, Sirui Hong, Jinlin Wang, et al. Aflow: Automating agentic workflow generation. arXiv preprint arXiv:2410.10762, 2024.", + "[1040] Jinghan Zhang, Xiting Wang, Fengran Mo, Yeyang Zhou, Wanfu Gao, and Kunpeng Liu. Entropy-based exploration conduction for multi-step reasoning. arXiv preprint arXiv:2503.15848, 2025.", + "[1041] Jintian Zhang, Yuqi Zhu, Mengshu Sun, Yujie Luo, Shuofei Qiao, Lun Du, Da Zheng, Huajun Chen, and Ningyu Zhang. Lighthinker: Thinking step-by-step compression. arXiv preprint arXiv:2502.15589, 2025.", + "[1042] Kaiyi Zhang, Ang Lv, Jinpeng Li, Yongbo Wang, Feng Wang, Haoyuan Hu, and Rui Yan. Stephint: Multi-level stepwise hints enhance reinforcement learning to reason. arXiv preprint arXiv:2507.02841, 2025.", + "[1043] Kechi Zhang, Ge Li, Jia Li, Yihong Dong, and Zhi Jin. Focused-dpo: Enhancing code generation through focused preference optimization on error-prone points. arXiv preprint arXiv:2502.11475, 2025.", + "[1044] Kechi Zhang, Ge Li, Jia Li, Huangzhao Zhang, Jingjing Xu, Hao Zhu, Lecheng Wang, Yihong Dong, Jing Mai, Bin Gu, et al. Computational thinking reasoning in large language models. arXiv preprint arXiv:2506.02658, 2025.", + "[1045] Kexun Zhang, Shang Zhou, Danqing Wang, William Yang Wang, and Lei Li. Scaling llm inference with optimized sample compute allocation. arXiv preprint arXiv:2410.22480, 2024.", + "[1046] Kongcheng Zhang, Qi Yao, Baisheng Lai, Jiaxing Huang, Wenkai Fang, Dacheng Tao, Mingli Song, and Shunyu Liu. Reasoning with reinforced functional token tuning. arXiv preprint arXiv:2502.13389, 2025.", + "[1047] Kongcheng Zhang, Qi Yao, Shunyu Liu, Yingjie Wang, Baisheng Lai, Jieping Ye, Mingli Song, and Dacheng Tao. Consistent paths lead to truth: Self-rewarding reinforcement learning for lIm reasoning. arXiv preprint arXiv:2506.08745, 2025.", + "[1048] Lunjun Zhang, Arian Hosseini, Hritik Bansal, Mehran Kazemi, Aviral Kumar, and Rishabh Agarwal. Generative verifiers: Reward modeling as next-token prediction. arXiv preprint arXiv:2408.15240, 2024.", + "[1049] Ming Zhang, Yu jiong Shen, Zelin Li, Huayu Sha, Binze Hu, Yuhui Wang, Chenhao Huang, Shichun Liu, Jingqi Tong, Changhao Jiang, et al. Llmeval-med: A real-world clinical benchmark for medical llms with physician validation. arXiv preprint arXiv:2506.04078, 2025.", + "[1050] Ming-Liang Zhang, Fei yin, and Cheng-Lin Liu. A multi-modal neural geometric solver with textual clauses parsed from diagram. In Edith Elkind, editor, Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence, IJCAI-23, pages 3374-3382. International Joint Conferences on Artificial Intelligence Organization, 8 2023. doi: 10.24963/ijcai.2023/376. URL https://doi.org/10.24963/ijcai.2023/376. Main Track.", + "[1051] Qingjie Zhang, Han Qiu, Di Wang, Haoting Qian, Yiming Li, Tianwei Zhang, and Minlie Huang. Understanding the dark side of llms' intrinsic self-correction. arXiv preprint arXiv:2412.14959, 2024.", + "[1052] Qiyuan Zhang, Fuyuan Lyu, Zexu Sun, Lei Wang, Weixu Zhang, Zhihan Guo, Yufei Wang, Irwin King, Xue Liu, and Chen Ma. What, how, where, and how well? a survey on test-time scaling in large language models. arXiv preprint arXiv:2503.24235, 2025.", + "[1053] Qiyuan Zhang, Fuyuan Lyu, Zexu Sun, Lei Wang, Weixu Zhang, Wenyue Hua, Haolun Wu, Zhihan Guo, Yufei Wang, Niklas Muennighoff, et al. A survey on test-time scaling in large language models: What, how, where, and how well? arXiv preprint arXiv:2503.24235, 2025." + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 101 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 42, + 308, + 71 + ], + "page_idx": 101 + }, + { + "type": "page_number", + "text": "102", + "bbox": [ + 485, + 935, + 511, + 946 + ], + "page_idx": 101 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1054] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Yu Qiao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? In European Conference on Computer Vision, pages 169-186. Springer, October 2024. URL https://link.springer.com/chapter/10.1007/978-3-031-73242-3_10.", + "[1055] Shaowei Zhang and Deyi Xiong. BackMATH: Towards backward reasoning for solving math problems step by step. In Owen Rambow, Leo Wanner, Marianna Apidianaki, Hend Al-Khalifa, Barbara Di Eugenio, Steven Schockaert, Kareem Darwish, and Apoorv Agarwal, editors, Proceedings of the 31st International Conference on Computational Linguistics: Industry Track, pages 466-482, Abu Dhabi, UAE, January 2025. Association for Computational Linguistics. URL https://aclanthology.org/2025.coling-industry.40/.", + "[1056] Shenao Zhang, Yaqing Wang, Yinxiao Liu, Tianqi Liu, Peter Grabowski, Eugene Ie, Zhaoran Wang, and Yunxuan Li. Beyond markovian: Reflective exploration via bayes-adaptive rl for llm reasoning. arXiv preprint arXiv:2505.20561, 2025.", + "[1057] Shengjia Zhang, Junjie Wu, Jiawei Chen, Changwang Zhang, Xingyu Lou, Wangchunshu Zhou, Sheng Zhou, Can Wang, and Jun Wang. Othink-r1: Intrinsic fast/slow thinking mode switching for over-reasoning mitigation. arXiv preprint arXiv:2506.02397, 2025.", + "[1058] Shengyu Zhang, Linfeng Dong, Xiaoya Li, Sen Zhang, Xiaofei Sun, Shuhe Wang, Jiwei Li, Runyi Hu, Tianwei Zhang, Fei Wu, et al. Instruction tuning for large language models: A survey. arXiv preprint arXiv:2308.10792, 2023.", + "[1059] Shimao Zhang, Xiao Liu, Xin Zhang, Junxiao Liu, Zheheng Luo, Shujian Huang, and Yeyun Gong. Process-based self-rewarding language models. arXiv preprint arXiv:2503.03746, 2025.", + "[1060] Weizhi Zhang, Yangning Li, Yuanchen Bei, Junyu Luo, Guancheng Wan, Liangwei Yang, Chenxuan Xie, Yuyao Yang, Wei-Chieh Huang, Chunyu Miao, et al. From web search towards agentic deep research: Incentivizing search with reasoning agents. arXiv preprint arXiv:2506.18959, 2025.", + "[1061] Wenjing Zhang, Xuejiao Lei, Zhaoxiang Liu, Ning Wang, Zhenhong Long, Peijun Yang, Jiaojiao Zhao, Minjie Hua, Chaoyang Ma, Kai Wang, et al. Safety evaluation of deepseek models in Chinese contexts. arXiv preprint arXiv:2502.11137, 2025.", + "[1062] Wenqi Zhang, Yongliang Shen, Linjuan Wu, Qiuying Peng, Jun Wang, Yueting Zhuang, and Weiming Lu. Self-contrast: Better reflection through inconsistent solving perspectives. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 3602–3622, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.197. URL https://aclanthology.org/2024.acl-long.197/.", + "[1063] Xiaoyun Zhang, Jingqing Ruan, Xing Ma, Yawen Zhu, Haodong Zhao, Hao Li, Jiansong Chen, Ke Zeng, and Xunliang Cai. When to continue thinking: Adaptive thinking mode switching for efficient reasoning. arXiv preprint arXiv:2505.15400, 2025.", + "[1064] Xinyu Zhang, Yuxuan Dong, Yanrui Wu, Jiaxing Huang, Chengyou Jia, Basura Fernando, Mike Zheng Shou, Lingling Zhang, and Jun Liu. Physreason: A comprehensive benchmark towards physics-based reasoning. arXiv preprint arXiv:2502.12054, 2025.", + "[1065] Xuan Zhang, Chao Du, Tianyu Pang, Qian Liu, Wei Gao, and Min Lin. Chain of preference optimization: Improving chain-of-thought reasoning in llms. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 333-356. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/00d80722b756de0166523a87805dd00f-Paper-Conference.pdf.", + "[1066] Xuanliang Zhang, Dingzirui Wang, Keyan Xu, Qingfu Zhu, and Wanxiang Che. Rot: Enhancing table reasoning with iterative row-wise traversals. arXiv preprint arXiv:2505.15110, 2025.", + "[1067] Yifan Zhang, Yang Yuan, and Andrew Chi-Chih Yao. On the diagram of thought. arXiv preprint arXiv:2409.10038, 2024." + ], + "bbox": [ + 171, + 90, + 826, + 912 + ], + "page_idx": 102 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 102 + }, + { + "type": "page_number", + "text": "103", + "bbox": [ + 485, + 935, + 511, + 946 + ], + "page_idx": 102 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1068] Yifan Zhang, Wenyu Du, Dongming Jin, Jie Fu, and Zhi Jin. Finite state automata inside transformers with chain-of-thought: A mechanistic study on state tracking. arXiv preprint arXiv:2502.20129, 2025.", + "[1069] Yong Zhang, Bingyuan Zhang, Zhitao Li, Ming Li, Ning Cheng, Minchuan Chen, Tao Wei, Jun Ma, Shaojun Wang, and Jing Xiao. Self-enhanced reasoning training: Activating latent reasoning in small models for enhanced reasoning distillation. arXiv preprint arXiv:2502.12744, 2025.", + "[1070] Yongheng Zhang, Qiguang Chen, Min Li, Wanxiang Che, and Libo Qin. AutoCAP: Towards automatic cross-lingual alignment planning for zero-shot chain-of-thought. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 9191–9200, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.546. URL https://aclanthology.org/2024-findings-acl.546/.", + "[1071] Yongheng Zhang, Qiguang Chen, Jingxuan Zhou, Peng Wang, Jiasheng Si, Jin Wang, Wenpeng Lu, and Libo Qin. Wrong-of-thought: An integrated reasoning framework with multi-perspective verification and wrong information. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 6644-6653, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.388. URL https://aclanthology.org/2024-findings-emnlp.388/.", + "[1072] Yongheng Zhang, Xu Liu, Ruihan Tao, Qiguang Chen, Hao Fei, Wanxiang Che, and Libo Qin. Vitcot: Video-text interleaved chain-of-thought for boosting video understanding in large language models. arXiv preprint arXiv:2507.09876, 2025.", + "[1073] Yongheng Zhang, Xu Liu, Ruoxi Zhou, Qiguang Chen, Hao Fei, Wenpeng Lu, and Libo Qin. Cchall: A novel benchmark for joint cross-lingual and cross-modal hallucinations detection in large language models. arXiv preprint arXiv:2505.19108, 2025.", + "[1074] Yudi Zhang, Lu Wang, Meng Fang, Yali Du, Chenghua Huang, Jun Wang, Qingwei Lin, Mykola Pechenizkiy, Dongmei Zhang, Saravan Rajmohan, et al. Distill not only data but also rewards: Can smaller language models surpass larger ones? arXiv preprint arXiv:2502.19557, 2025.", + "[1075] Yunxiang Zhang, Muhammad Khalifa, Lajanugen Logeswaran, Jaekyeom Kim, Moontae Lee, Honglak Lee, and Lu Wang. Small language models need strong verifiers to self-correct reasoning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 15637–15653, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.924. URL https://aclanthology.org/2024 findings-acl.924/.", + "[1076] Yuxiang Zhang, Shangxi Wu, Yuqi Yang, Jiangming Shu, Jinlin Xiao, Chao Kong, and Jitao Sang. o1-coder: an o1 replication for coding. arXiv preprint arXiv:2412.00154, 2024.", + "[1077] Yuxiang Zhang, Yuqi Yang, Jiangming Shu, Yuhang Wang, Jinlin Xiao, and Jitao Sang. Openrft: Adapting reasoning foundation model for domain-specific tasks with reinforcement fine-tuning. arXiv preprint arXiv:2412.16849, 2024.", + "[1078] Zhenru Zhang, Chujie Zheng, Yangzhen Wu, Beichen Zhang, Runji Lin, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. The lessons of developing process reward models in mathematical reasoning. arXiv preprint arXiv:2501.07301, 2025.", + "[1079] Zhihao Zhang, Qiaole Dong, Qi Zhang, Jun Zhao, Enyu Zhou, Zhiheng Xi, Senjie Jin, Xiaoran Fan, Yuhao Zhou, Yanwei Fu, et al. Reinforcement fine-tuning enables mllms learning novel tasks stably. arXiv preprint arXiv:2506.23508, 2025.", + "[1080] Zhongwang Zhang, Pengxiao Lin, Zhiwei Wang, Yaoyu Zhang, and Zhi-Qin John Xu. Complexity control facilitates reasoning-based compositional generalization in transformers. arXiv preprint arXiv:2501.08537, 2025.", + "[1081] Zhuosheng Zhang, Aston Zhang, Mu Li, hai zhao, George Karypis, and Alex Smola. Multi-modal chain-of-thought reasoning in language models. Transactions on Machine Learning Research, June 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=y1pPWFVfvR." + ], + "bbox": [ + 171, + 90, + 826, + 912 + ], + "page_idx": 103 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 103 + }, + { + "type": "page_number", + "text": "104", + "bbox": [ + 485, + 935, + 511, + 946 + ], + "page_idx": 103 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1082] Deji Zhao, Donghong Han, Jia Wu, Zhongjiang He, Bo Ning, Ye Yuan, Yongxiang Li, Chao Wang, and Shuangyong Song. Enhancing math reasoning ability of large language models via computation logic graphs. Knowledge-Based Systems, page 113905, 2025.", + "[1083] Eric Zhao, Pranjal Awasthi, and Sreenivas Gollapudi. Sample, scrutinize and scale: Effective inference-time search by scaling verification. arXiv preprint arXiv:2502.01839, 2025.", + "[1084] Han Zhao, Haotian Wang, Yiping Peng, Sitong Zhao, Xiaoyu Tian, Shuaiying Chen, Yunjie Ji, and Xiangang Li. 1.4 million open-source distilled reasoning dataset to empower large language model training. arXiv preprint arXiv:2503.19633, 2025.", + "[1085] Jun Zhao, Jingqi Tong, Yurong Mou, Ming Zhang, Qi Zhang, and Xuanjing Huang. Exploring the compositional deficiency of large language models in mathematical reasoning through trap problems. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 16361-16376, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.915. URL https://aclanthology.org/2024.emnlp-main.915/.", + "[1086] Lili Zhao, Yang Wang, Qi Liu, Mengyun Wang, Wei Chen, Zhichao Sheng, and Shijin Wang. Evaluating large language models through role-guide and self-reflection: A comparative study. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=E36NHwe7Zc.", + "[1087] Shangziqi Zhao, Jiahao Yuan, Guisong Yang, and Usman Naseem. Can pruning improve reasoning? revisiting long-cot compression with capability in mind for better reasoning. arXiv preprint arXiv:2505.14582, 2025.", + "[1088] Weixiang Zhao, Jiahe Guo, Yang Deng, Xingyu Sui, Yulin Hu, Yanyan Zhao, Wanxiang Che, Bing Qin, Tat-Seng Chua, and Ting Liu. Exploring and exploiting the inherent efficiency within large reasoning models for self-guided efficiency enhancement. arXiv preprint arXiv:2506.15647, 2025.", + "[1089] Xuandong Zhao, Zhewei Kang, Aosong Feng, Sergey Levine, and Dawn Song. Learning to reason without external rewards. arXiv preprint arXiv:2505.19590, 2025.", + "[1090] Xueliang Zhao, Wei Wu, Jian Guan, and Lingpeng Kong. Promptcot: Synthesizing olympiad-level problems for mathematical reasoning in large language models. arXiv preprint arXiv:2503.02324, 2025.", + "[1091] Xufeng Zhao, Mengdi Li, Wenhao Lu, Cornelius Weber, Jae Hee Lee, Kun Chu, and Stefan Wermter. Enhancing zero-shot chain-of-thought reasoning in large language models through logic. In Nicoletta Calzolari, Min-Yen Kan, Veronique Hoste, Alessandro Lenci, Sakriani Sakti, and Nianwen Xue, editors, Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 6144-6166, Torino, Italia, May 2024. ELRA and ICCL. URL https://aclanthology.org/2024.lrec-main.543/.", + "[1092] Yachao Zhao, Bo Wang, and Yan Wang. Explicit vs. implicit: Investigating social bias in large language models through self-reflection. arXiv preprint arXiv:2501.02295, 2025.", + "[1093] Yang Zhao, Kai Xiong, Xiao Ding, Li Du, Zhouhao Sun, Jiannan Guan, Wenbin Zhang, Bin Liu, Dong Hu, Bing Qin, et al. Ufo-rl: Uncertainty-focused optimization for efficient reinforcement learning data selection. arXiv preprint arXiv:2505.12457, 2025.", + "[1094] Yichong Zhao and Susumu Goto. Can frontier llms replace annotators in biomedical text mining? analyzing challenges and exploring solutions. arXiv preprint arXiv:2503.03261, 2025.", + "[1095] Yu Zhao, Huifeng Yin, Bo Zeng, Hao Wang, Tianqi Shi, Chenyang Lyu, Longyue Wang, Weihua Luo, and Kaifu Zhang. Marco-o1: Towards open reasoning models for open-ended solutions. arXiv preprint arXiv:2411.14405, 2024.", + "[1096] Yurui Zhao, Xiang Wang, Jiahong Liu, Irwin King, and Zhitao Huang. Towards geometry problem solving in the large model era: A survey. arXiv preprint arXiv:2506.02690, 2025.", + "[1097] Zhonghan Zhao, Wenwei Zhang, Haian Huang, Kuikun Liu, Jianfei Gao, Gaoang Wang, and Kai Chen. Rig: Synergizing reasoning and imagination in end-to-end generalist policy. arXiv preprint arXiv:2503.24388, 2025." + ], + "bbox": [ + 173, + 90, + 828, + 912 + ], + "page_idx": 104 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 104 + }, + { + "type": "page_number", + "text": "105", + "bbox": [ + 485, + 934, + 511, + 946 + ], + "page_idx": 104 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1098] Zilong Zhao, Yao Rong, Dongyang Guo, Emek Gözlüklü, Emir Gülboy, and Enkelejda Kasneci. Stepwise self-consistent mathematical reasoning with large language models. arXiv preprint arXiv:2402.17786, 2024.", + "[1099] Zirui Zhao, Wee Sun Lee, and David Hsu. Large language models as commonsense knowledge for large-scale task planning. Advances in Neural Information Processing Systems, 36:31967-31987, December 2023. URL https://openreview.net/pdf?id=ted747HURfX.", + "[1100] Bowen Zheng, Xiaolei Wang, Enze Liu, Xi Wang, Lu Hongyu, Yu Chen, Wayne Xin Zhao, and Ji-Rong Wen. Deeprec: Towards a deep dive into the item space with large language model based recommendation. arXiv preprint arXiv:2505.16810, 2025.", + "[1101] Chuanyang Zheng, Zhengying Liu, Enze Xie, Zhenguo Li, and Yu Li. Progressive-hint prompting improves reasoning in large language models. In AI for Math Workshop @ ICML 2024, June 2024. URL https://openreview.net/forum?id=UkFEs3ciz8.", + "[1102] Chujie Zheng, Zhenru Zhang, Beichen Zhang, Runji Lin, Keming Lu, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. Processbench: Identifying process errors in mathematical reasoning. arXiv preprint arXiv:2412.06559, 2024.", + "[1103] Da Zheng, Lun Du, Junwei Su, Yuchen Tian, Yuqi Zhu, Jintian Zhang, Lanning Wei, Ningyu Zhang, and Huajun Chen. Knowledge augmented complex problem solving with large language models: A survey. arXiv preprint arXiv:2505.03418, 2025.", + "[1104] Ge Zheng, Bin Yang, Jiajin Tang, Hong-Yu Zhou, and Sibei Yang. Ddcot: Duty-distinct chain-of-thought prompting for multimodal reasoning in language models. Advances in Neural Information Processing Systems, 36:5168-5191, 2023.", + "[1105] Hang Zheng, Hongshen Xu, Yuncong Liu, Lu Chen, Pascale Fung, and Kai Yu. Enhancing llm reliability via explicit knowledge boundary modeling. arXiv preprint arXiv:2503.02233, 2025.", + "[1106] Jiani Zheng, Lu Wang, Fangkai Yang, Chaoyun Zhang, Lingrui Mei, Wenjie Yin, Qingwei Lin, Dongmei Zhang, Saravan Rajmohan, and Qi Zhang. Vem: Environment-free exploration for training gui agent with value environment model. arXiv preprint arXiv:2502.18906, 2025.", + "[1107] Kunhao Zheng, Juliette Decugis, Jonas Gehring, Taco Cohen, benjamin negrevergne, and Gabriel Synnaeve. What makes large language models reason in (multi-turn) code generation? In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=Zk9guO19NS.", + "[1108] Tianyu Zheng, Ge Zhang, Tianhao Shen, Xueling Liu, Bill Yuchen Lin, Jie Fu, Wenhu Chen, and Xiang Yue. OpenCodeInterpreter: Integrating code generation with execution and refinement. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 12834–12859, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.762. URL https://aclanthology.org/2024-findings-acl.762/.", + "[1109] Xin Zheng, Jie Lou, Boxi Cao, Xueru Wen, Yuqiu Ji, Hongyu Lin, Yaojie Lu, Xianpei Han, Debing Zhang, and Le Sun. Critic-cot: Boosting the reasoning abilities of large language model via chain-of-thoughts critic. arXiv preprint arXiv:2408.16326, 2024.", + "[1110] Zhi Zheng, Zhuoliang Xie, Zhenkun Wang, and Bryan Hooi. Monte carlo tree search for comprehensive exploration in llm-based automatic heuristic design. arXiv preprint arXiv:2501.08603, 2025.", + "[1111] Jianyuan Zhong, Zeju Li, Zhijian Xu, Xiangyu Wen, and Qiang Xu. Dyve: Thinking fast and slow for dynamic process verification. arXiv preprint arXiv:2502.11157, 2025.", + "[1112] Qihuang Zhong, Kang Wang, Ziyang Xu, Juhua Liu, Liang Ding, and Bo Du. Achieving> 97% on gsm8k: Deeply understanding the problems makes llms better solvers for math word problems. arXiv preprint arXiv:2404.14963, 2024.", + "[1113] Tianyang Zhong, Zhengliang Liu, Yi Pan, Yutong Zhang, Yifan Zhou, Shizhe Liang, Zihao Wu, Yanjun Lyu, Peng Shu, Xiaowei Yu, et al. Evaluation of openai o1: Opportunities and challenges of agi. arXiv preprint arXiv:2409.18486, 2024." + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 105 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 42, + 308, + 71 + ], + "page_idx": 105 + }, + { + "type": "page_number", + "text": "106", + "bbox": [ + 485, + 935, + 511, + 946 + ], + "page_idx": 105 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1114] Andy Zhou, Kai Yan, Michal Shlapentokh-Rothman, Haohan Wang, and Yu-Xiong Wang. Language agent tree search unifies reasoning, acting, and planning in language models. In *Forty-first International Conference on Machine Learning*, May 2024. URL https://openreview.net/forum?id=njwv9BsGHF.", + "[1115] Aojun Zhou, Ke Wang, Zimu Lu, Weikang Shi, Sichun Luo, Zipeng Qin, Shaoqing Lu, Anya Jia, Linqi Song, Mingjie Zhan, and Hongsheng Li. Solving challenging math word problems using GPT-4 code interpreter with code-based self-verification. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=c8McWs4Av0.", + "[1116] Changzhi Zhou, Xinyu Zhang, Dandan Song, Xiancai Chen, Wanli Gu, Huipeng Ma, Yuhang Tian, Mengdi Zhang, and Linmei Hu. Refinecoder: Iterative improving of large language models via adaptive critique refinement for code generation. arXiv preprint arXiv:2502.09183, 2025.", + "[1117] Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc V Le, and Ed H. Chi. Least-to-most prompting enables complex reasoning in large language models. In The Eleventh International Conference on Learning Representations, February 2023. URL https://openreview.net/forum?id=WZH7099tgfM.", + "[1118] Fan Zhou, Haoyu Dong, Qian Liu, Zhoujun Cheng, Shi Han, and Dongmei Zhang. Reflection of thought: Inversely eliciting numerical reasoning in language models via solving linear systems. arXiv preprint arXiv:2210.05075, 2022.", + "[1119] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's\" aha moment\" in visual reasoning on a 2b non-sft model. arXiv preprint arXiv:2503.05132, 2025.", + "[1120] Jin Peng Zhou, Charles E Staats, Wenda Li, Christian Szegedy, Kilian Q Weinberger, and Yuhuai Wu. Don't trust: Verify – grounding LLM quantitative reasoning with autoformalization. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=V5tdi14ple.", + "[1121] Jin Peng Zhou, Kaiwen Wang, Jonathan Chang, Zhaolin Gao, Nathan Kallus, Kilian Q Weinberger, Kianté Brantley, and Wen Sun. q#: Provably optimal distributional rl for llm post-training. arXiv preprint arXiv:2502.20548, 2025.", + "[1122] Kaiwen Zhou, Chengzhi Liu, Xuandong Zhao, Shreedhar Jangam, Jayanth Srinivasa, Gaowen Liu, Dawn Song, and Xin Eric Wang. The hidden risks of large reasoning models: A safety assessment of r1. arXiv preprint arXiv:2502.12659, 2025.", + "[1123] Lexin Zhou, Wout Schellaert, Fernando Martínez-Plumed, Yael Moros-Daval, César Ferri, and José Hernández-Orallo. Larger and more instructable language models become less reliable. Nature, 634(8032):61–68, 2024. URL https://www.nature.com/articles/s41586-024-07930-y.", + "[1124] Li Zhou, Ruijie Zhang, Xunlian Dai, Daniel Hershcovich, and Haizhou Li. Large language models penetration in scholarly writing and peer review. arXiv preprint arXiv:2502.11193, 2025.", + "[1125] Ruochen Zhou, Minrui Xu, Shiqi Chen, Junteng Liu, Yunqi Li, LIN Xinxin, Zhengyu Chen, and Junxian He. AI for math or math for AI? on the generalization of learning mathematical problem solving. In The 4th Workshop on Mathematical Reasoning and AI at NeurIPS'24, 2024. URL https://openreview.net/forum?id=xlnvZ85CSo.", + "[1126] Shuyan Zhou, Frank F. Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Tianyue Ou, Yonatan Bisk, Daniel Fried, Uri Alon, and Graham Neubig. Webarena: A realistic web environment for building autonomous agents. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=oKn9c6ytLx.", + "[1127] Xiangxin Zhou, Zichen Liu, Anya Sims, Haonan Wang, Tianyu Pang, Chongxuan Li, Liang Wang, Min Lin, and Chao Du. Reinforcing general reasoning without verifiers. arXiv preprint arXiv:2505.21493, 2025." + ], + "bbox": [ + 173, + 90, + 828, + 911 + ], + "page_idx": 106 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 106 + }, + { + "type": "page_number", + "text": "107", + "bbox": [ + 485, + 934, + 511, + 946 + ], + "page_idx": 106 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1128] Xiaofeng Zhou, Heyan Huang, and Lizi Liao. Debate, reflect, and distill: Multi-agent feedback with tree-structured preference optimization for efficient language model enhancement. arXiv preprint arXiv:2506.03541, 2025.", + "[1129] Xin Zhou, Yiwen Guo, Ruotian Ma, Tao Gui, Qi Zhang, and Xuanjing Huang. Self-consistency of the internal reward models improves self-rewarding language models. arXiv preprint arXiv:2502.08922, 2025.", + "[1130] Yang Zhou, Hongyi Liu, Zhuoming Chen, Yuandong Tian, and Beidi Chen. Gsm-infinite: How do your llms behave over infinitely increasing context length and reasoning complexity? arXiv preprint arXiv:2502.05252, 2025.", + "[1131] Yifei Zhou, Song Jiang, Yuandong Tian, Jason Weston, Sergey Levine, Sainbayar Sukhbaatar, and Xian Li. Sweet-rl: Training multi-turn llm agents on collaborative reasoning tasks. arXiv preprint arXiv:2503.15478, 2025.", + "[1132] Yufa Zhou, Shaobo Wang, Xingyu Dong, Xiangqi Jin, Yifang Chen, Yue Min, Kexin Yang, Xingzhang Ren, Dayiheng Liu, and Linfeng Zhang. Reasoning like an economist: Posttraining on economic problems induces strategic generalization in llms. arXiv preprint arXiv:2506.00577, 2025.", + "[1133] Zhanke Zhou, Zhaocheng Zhu, Xuan Li, Mikhail Galkin, Xiao Feng, Sanmi Koyejo, Jian Tang, and Bo Han. Landscape of thoughts: Visualizing the reasoning process of large language models. arXiv preprint arXiv:2503.22165, 2025.", + "[1134] Zhi Zhou, Tan Yuhao, Zenan Li, Yuan Yao, Lan-Zhe Guo, Xiaoxing Ma, and Yu-Feng Li. Bridging internal probability and self-consistency for effective and efficient lIm reasoning. arXiv preprint arXiv:2502.00511, 2025.", + "[1135] Bin Zhu, Hailong Yin, Jingjing Chen, and Yu-Gang Jiang. Reasoning models are more easily gaslighted than you think. arXiv preprint arXiv:2506.09677, 2025.", + "[1136] Dawei Zhu, Xiyu Wei, Guangxiang Zhao, Wenhao Wu, Haosheng Zou, Junfeng Ran, Xun Wang, Lin Sun, Xiangzheng Zhang, and Sujian Li. Chain-of-thought matters: Improving long-context language models with reasoning path supervision. arXiv preprint arXiv:2502.20790, 2025.", + "[1137] Jason Zhu and Hongyu Li. Towards concise and adaptive thinking in large reasoning models: A survey. arXiv preprint arXiv:2507.09662, 2025.", + "[1138] Junda Zhu, Lingyong Yan, Shuaiqiang Wang, Dawei Yin, and Lei Sha. Reasoning-to-defend: Safety-aware reasoning can defend large language models from jailbreaking. arXiv preprint arXiv:2502.12970, 2025.", + "[1139] King Zhu, Hanhao Li, Siwei Wu, Tianshun Xing, Dehua Ma, Xiangru Tang, Minghao Liu, Jian Yang, Jiaheng Liu, Yuchen Eleanor Jiang, et al. Scaling test-time compute for llm agents. arXiv preprint arXiv:2506.12928, 2025.", + "[1140] Kunlun Zhu, Hongyi Du, Zhaochen Hong, Xiaocheng Yang, Shuyi Guo, Zhe Wang, Zhenhailong Wang, Cheng Qian, Xiangru Tang, Heng Ji, et al. Multiagentbench: Evaluating the collaboration and competition of lIm agents. arXiv preprint arXiv:2503.01935, 2025.", + "[1141] Rongzhi Zhu, Yi Liu, Zequn Sun, Yiwei Wang, and Wei Hu. When can large reasoning models save thinking? mechanistic analysis of behavioral divergence in reasoning. arXiv preprint arXiv:2505.15276, 2025.", + "[1142] Tinghui Zhu, Kai Zhang, Jian Xie, and Yu Su. Deductive beam search: Decoding deducible rationale for chain-of-thought reasoning. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=S1XnUsqwr7.", + "[1143] Xinyu Zhu, Junjie Wang, Lin Zhang, Yuxiang Zhang, Yongfeng Huang, Ruyi Gan, Jiaxing Zhang, and Yujiu Yang. Solving math word problems via cooperative reasoning induced language models. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 4471–4485, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.245. URL https://aclanthology.org/2023.acl-long.245/." + ], + "bbox": [ + 171, + 90, + 828, + 912 + ], + "page_idx": 107 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 107 + }, + { + "type": "page_number", + "text": "108", + "bbox": [ + 485, + 935, + 511, + 946 + ], + "page_idx": 107 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1144] Zihao Zhu, Hongbao Zhang, Ruotong Wang, Ke Xu, Siwei Lyu, and Baoyuan Wu. To think or not to think: Exploring the unthinking vulnerability in large reasoning models. arXiv preprint arXiv:2502.12202, 2025.", + "[1145] Zihao Zhu, Hongbao Zhang, Mingda Zhang, Ruotong Wang, Guanzong Wu, Ke Xu, and Baoyuan Wu. Bot: Breaking long thought processes of o1-like large language models through backdoor attack. arXiv preprint arXiv:2502.12202, 2025.", + "[1146] Ren Zhuang, Ben Wang, and Shuifa Sun. Accelerating chain-of-thought reasoning: When goal-gradient importance meets dynamic skipping. arXiv preprint arXiv:2505.08392, 2025.", + "[1147] Ziyu Zhuang, Qiguang Chen, Longxuan Ma, Mingda Li, Yi Han, Yushan Qian, Haopeng Bai, Weinan Zhang, and Liu Ting. Through the lens of core competency: Survey on evaluation of large language models. In Proceedings of the 22nd Chinese National Conference on Computational Linguistics (Volume 2: Frontier Forum), pages 88–109, Harbin, China, August 2023. Chinese Information Processing Society of China. URL https://aclanthology.org/2023.ccl-2.8/.", + "[1148] Alireza S Ziabari, Nona Ghazizadeh, Zhivar Sourati, Farzan Karimi-Malekabadi, Payam Piray, and Morteza Dehghani. Reasoning on a spectrum: Aligning llms to system 1 and system 2 thinking. arXiv preprint arXiv:2502.12470, 2025.", + "[1149] Henry Peng Zou, Zhengyao Gu, Yue Zhou, Yankai Chen, Weizhi Zhang, Liancheng Fang, Yibo Wang, Yangning Li, Kay Liu, and Philip S Yu. Testnuc: Enhancing test-time computing approaches through neighboring unlabeled data consistency. arXiv preprint arXiv:2502.19163, 2025.", + "[1150] Yuxin Zuo, Shang Qu, Yifei Li, Zhangren Chen, Xuekai Zhu, Ermo Hua, Kaiyan Zhang, Ning Ding, and Bowen Zhou. Medxpertqa: Benchmarking expert-level medical reasoning and understanding. arXiv preprint arXiv:2501.18362, 2025." + ], + "bbox": [ + 171, + 90, + 826, + 454 + ], + "page_idx": 108 + }, + { + "type": "header", + "text": "LARG LANGUAGE ANALYSIS REASONING GROUP", + "bbox": [ + 173, + 41, + 308, + 71 + ], + "page_idx": 108 + }, + { + "type": "page_number", + "text": "109", + "bbox": [ + 485, + 934, + 511, + 946 + ], + "page_idx": 108 + } +] \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09567/17e53201-29b3-43fd-8f2e-78d7b00a58a6_model.json b/data/2025/2503_09xxx/2503.09567/17e53201-29b3-43fd-8f2e-78d7b00a58a6_model.json new file mode 100644 index 0000000000000000000000000000000000000000..122ec6581a420b1cf1292ab90f57df89d71d7366 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/17e53201-29b3-43fd-8f2e-78d7b00a58a6_model.json @@ -0,0 +1,21164 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.234, + 0.12, + 0.3, + 0.165 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.308, + 0.135, + 0.771, + 0.213 + ], + "angle": 0, + "content": "Towards Reasoning Era: A Survey of Chain-of-Thought for Reasoning Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.263, + 0.788, + 0.308 + ], + "angle": 0, + "content": "Qiguang Chen† Libo Qin‡ Jinhao Liu† Dengyun Peng† Jiannan Guan† Peng Wang‡ Mengkang Hu◇ Yuhang Zhou Te Gao† Wanxiang Che† LARG," + }, + { + "type": "text", + "bbox": [ + 0.292, + 0.31, + 0.725, + 0.339 + ], + "angle": 0, + "content": "\\(\\dagger\\) Research Center for Social Computing and Interactive Robotics, \\(\\dagger\\) Harbin Institute of Technology" + }, + { + "type": "text", + "bbox": [ + 0.269, + 0.34, + 0.75, + 0.355 + ], + "angle": 0, + "content": "\\(^{\\ddagger}\\) School of Computer Science and Engineering, Central South University" + }, + { + "type": "text", + "bbox": [ + 0.403, + 0.355, + 0.616, + 0.369 + ], + "angle": 0, + "content": "The University of Hong Kong" + }, + { + "type": "text", + "bbox": [ + 0.445, + 0.369, + 0.574, + 0.383 + ], + "angle": 0, + "content": "Fudan University" + }, + { + "type": "text", + "bbox": [ + 0.301, + 0.384, + 0.718, + 0.397 + ], + "angle": 0, + "content": "{qgchen,car}@ir.hit.edu.cn,lbqin@csu.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.345, + 0.41, + 0.67, + 0.426 + ], + "angle": 0, + "content": "Project: https://long-cot.github.io/" + }, + { + "type": "text", + "bbox": [ + 0.224, + 0.438, + 0.792, + 0.453 + ], + "angle": 0, + "content": "Github: LightChen233/Awesome-Long-Chain-of-Thought-Reasoning" + }, + { + "type": "image", + "bbox": [ + 0.186, + 0.493, + 0.833, + 0.826 + ], + "angle": 0, + "content": null + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.286, + 0.059, + 0.712 + ], + "angle": 270, + "content": "arXiv:2503.09567v5 [cs.AI] 18 Jul 2025" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.09, + 0.542, + 0.108 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.122, + 0.77, + 0.429 + ], + "angle": 0, + "content": "Recent advancements in reasoning with large language models (RLLMs), such as OpenAI-o1 and DeepSeek-R1, have demonstrated their impressive capabilities in complex domains like mathematics and coding. A central factor in their success lies in the application of long chain-of-thought (Long CoT) characteristics, which enhance reasoning abilities and enable the solution of intricate problems. However, despite these developments, a comprehensive survey on Long CoT is still lacking, limiting our understanding of its distinctions from traditional short chain-of-thought (Short CoT) and complicating ongoing debates on issues like \"overthinking\" and \"inference-time scaling\". This survey seeks to fill this gap by offering a unified perspective on Long CoT. Specifically, (1) We first distinguish Long CoT from Short CoT and introduce a novel taxonomy to categorize current reasoning paradigms. (2) Next, we explore the key characteristics of Long CoT: deep reasoning, extensive exploration, and feasible reflection, which enable models to handle more complex tasks and produce more efficient, coherent outcomes compared to the shallower Short CoT. (3) We then investigate key phenomena such as the emergence of Long CoT with these characteristics, including overthinking, and inference-time scaling, offering insights into how these processes manifest in practice. (4) Finally, we identify significant research gaps and highlight promising future directions, including the integration of multi-modal reasoning, efficiency improvements, and enhanced knowledge frameworks. By providing a structured overview, this survey aims to inspire future research and further the development of reasoning large language models1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.454, + 0.317, + 0.47 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.486, + 0.828, + 0.653 + ], + "angle": 0, + "content": "In recent years, as shown in Figure 1, the emergence of reasoning large language models (RLLMs) such as OpenAI o1 [307] and DeepSeek R1 [227] has sparked a growing body of research into Long Chain-of-Thought (Long CoT) reasoning, greatly improving their mathematical reasoning, programming tasks, and multidisciplinary knowledge reasoning capabilities [696, 980, 722, 79, 961, 200, 1113, 793], even passing Turing Test [334]. This shift marks a significant departure from traditional approaches to task handling in large language models (LLMs) [1147, 619, 622, 599]. Unlike the shorter chain-of-thought (Short CoT) used in traditional LLMs, Long CoT reasoning entails a more detailed, iterative process of exploration and reflection within a given problem space by inference-time scaling [419, 733, 524]. This process has led to notable advancements in mathematical and logical reasoning, as well as in exploring how supervised fine-tuning (SFT) and reinforcement learning (RL) techniques can enhance the learning and exploration of extended reasoning chains [623, 550]." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.658, + 0.828, + 0.743 + ], + "angle": 0, + "content": "However, there is no comprehensive survey to systematically understand the main factors and recent efforts of Long CoT for RLLMs, which hinders the development of RLLMs. As a result, there are ongoing debates about the effectiveness of simple \"inference-time scaling\" for Longer CoT [864, 486] versus the argument that \"over-thinking\" from excessively long scaling can harm LLMs and introduce unnecessary complexity [103, 142, 357]. Moreover, some researchers argue that, when solving specific problems, there is no clear relationship between length and accuracy [886]." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.747, + 0.829, + 0.888 + ], + "angle": 0, + "content": "To address this gap, we provide an extensive and comprehensive survey of Long CoT. Specifically, as illustrated in Figure 2, we first define and examine the distinctions between Long CoT and traditional Short CoT, focusing on the following key aspects: (1) Deep Reasoning, which requires a sufficient depth of logical processing to manage an extensive set of logical nodes; (2) Extensive Exploration, which involves generating parallel uncertain nodes and transitioning from known to unknown logic; and (3) Feasible Reflection, which involves feedback and refinement of logical connections. These characteristics enable Long CoT paradigms to integrate more intricate reasoning and accommodate a broader range of logical structures, ultimately leading to more efficient and coherent outcomes. Subsequently, we systematically explore the underlying explanations for key phenomena associated with Long CoT, such as its emergence, the overthinking phenomenon," + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.898, + 0.813, + 0.913 + ], + "angle": 0, + "content": "Our logo refers to a cute cartoon image - Snake Puppy. Header Image is inspired by Yaoting et al. [959]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.069 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.093, + 0.825, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.343, + 0.828, + 0.428 + ], + "angle": 0, + "content": "Figure 1: Evolution of selected Long CoT over the past three years, where colored branches represent different characteristics: deep reasoning, feasible reflection, and extensive exploration. Each characteristic is further divided into key areas: Deep reasoning includes its format and learning methods. Feasible reflection focuses on feedback and refinement techniques during reflection process as optimization strategies. Extensive exploration addresses scaling, internal, and external exploration as key improvements to Long CoT." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.456, + 0.825, + 0.513 + ], + "angle": 0, + "content": "inference-time scaling during testing, and the \"Aha Moment,\" among others. To our knowledge, This is the first comprehensive survey dedicated to these specific topics. Finally, considering the extensive body of literature, we highlight promising areas for future research and suggest valuable open-resource frameworks and datasets that can serve as a foundation for future investigations." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.518, + 0.51, + 0.532 + ], + "angle": 0, + "content": "The main contributions of this work are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.54, + 0.825, + 0.581 + ], + "angle": 0, + "content": "- Systematic Distinction: In this work, we first introduce the concept of Long CoT reasoning and distinguish it from the traditional Short CoT, thereby providing a clear framework for understanding both paradigms and their respective characteristics." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.585, + 0.825, + 0.627 + ], + "angle": 0, + "content": "- Explanation of Hot Phenomena: We systematically investigate the notable phenomena associated with Long CoT reasoning, such as overthinking, inference-time scaling, and the \"Aha Moment\", offering valuable insights into the cognitive processes involved in complex reasoning." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.63, + 0.825, + 0.686 + ], + "angle": 0, + "content": "- Emerging Challenges and Frontiers: We explore the emerging challenges within the field of Long CoT reasoning and identify key research frontiers. Given the vast body of literature, we highlight areas where further inquiry could significantly advance the development of Long CoT methodologies." + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.54, + 0.825, + 0.686 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.709, + 0.531, + 0.726 + ], + "angle": 0, + "content": "2 Discussion of Long CoT v.s. Short CoT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.741, + 0.827, + 0.826 + ], + "angle": 0, + "content": "This section formalizes the key differences between Long Chain-of-Thought (Long CoT) and Short Chain-of-Thought (Short CoT), emphasizing reasoning depth, revisiting connections, and logical node exploration [858]. These distinctions are clearly separate from System 1 and System 2 thinking. The comparison between Long CoT and Short CoT is framed within System 2, with Long CoT involving more thorough reasoning, reflection, and exploration, while Short CoT generally prioritizes shallow and efficient logic over exhaustive reasoning." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.844, + 0.376, + 0.858 + ], + "angle": 0, + "content": "2.1 Overview of Short CoT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.827, + 0.913 + ], + "angle": 0, + "content": "As illustrated by Figure 2, Short CoT is typically characterized by a shallow, linear reasoning process, where conclusions are drawn sequentially, often relying on a limited number of logical nodes [551]. This reasoning is usually rapid and straightforward, with simple, surface-level transitions and minimal" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.226, + 0.071 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.065 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.065, + 0.307, + 0.07 + ], + "angle": 0, + "content": "REASONING GROUP" + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.095, + 0.775, + 0.108 + ], + "angle": 0, + "content": "Proof of Number Theory Problem: For any positive integer \\( n \\), there exists a positive integer \\( m \\) such that \\( m^2 + 1 \\) is divisible by \\( n \\)." + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.114, + 0.818, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.278, + 0.828, + 0.321 + ], + "angle": 0, + "content": "Figure 2: The differences between advanced Long CoT and traditional Short CoT are characterized by three key characteristics: deep reasoning, feasible reflection, and extensive exploration. Moreover, Long CoT integrates all these characteristics to achieve substantial logical efficacy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.351, + 0.825, + 0.38 + ], + "angle": 0, + "content": "exploration of alternative paths, which restricts its generalizability [683]. Formally, given a reasoning model \\(\\mathcal{R}\\), we can define the rationale of Short CoT \\((\\mathsf{C}\\circ \\mathsf{T}_S)\\) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.214, + 0.389, + 0.826, + 0.407 + ], + "angle": 0, + "content": "\\[\n\\mathrm {C o T} _ {S} = \\mathcal {R} \\left(\\left\\{n _ {i} \\right\\} _ {i = 1} ^ {k} | (k \\leq \\mathcal {B} _ {s}) \\wedge (j = 1 \\Leftrightarrow \\forall i \\leq k, n _ {i} \\rightarrow n _ {i + j}) \\wedge (\\forall i \\neq j \\leq k, n _ {i} \\neq n _ {j})\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.416, + 0.827, + 0.473 + ], + "angle": 0, + "content": "where \\( n_1 \\) to \\( n_k \\) represent a sequence of logical nodes, which naturally satisfy that \\( \\forall i, n_i \\to n_{i+1} \\). Here, \\( \\mathcal{B}_s \\) denotes the upper boundary on the number of logical nodes, as defined by Chen et al. [90]. In this paradigm, the reasoning progresses sequentially from one node to the next, with minimal revisitation of previous nodes and little exploration of alternative logical paths." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.492, + 0.375, + 0.507 + ], + "angle": 0, + "content": "2.2 Overview of Long CoT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.519, + 0.827, + 0.589 + ], + "angle": 0, + "content": "In contrast, Long CoT involves deeper reasoning, reflective analysis, and a broader exploration of logical structures. It facilitates reasoning across a wider range of logical steps, addressing both known and unknown elements of a problem [194, 858]. Building on this, Long CoT expands upon the constraints presented in Equation 1 based on tree structures by incorporating three critical components: deep reasoning, exploration, and reflection." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.595, + 0.827, + 0.706 + ], + "angle": 0, + "content": "These components play distinct yet complementary roles in enhancing cognitive processes. Deep reasoning ensures each logical step is executed rigorously, even within complex structures, fostering robust logic across intricate relationships. Exploration encourages the identification of new pathways, revealing potential avenues that may not be immediately obvious. Reflection enables iterative analysis and reassessment of conclusions, allowing reasoning to evolve throughout problem-solving. By distinguishing these three categories, Long CoT enhances its ability to address a broader range of problems with precision and depth. As shown in Figure 3, we will now discuss these key differences in detail." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.724, + 0.439, + 0.74 + ], + "angle": 0, + "content": "2.2.1 Deep Reasoning for Long CoT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.75, + 0.828, + 0.82 + ], + "angle": 0, + "content": "As shown by Figure 2, deep reasoning refers to the capability to perform deep and thorough logical analysis across multiple interconnected logical nodes, where Short CoT generally can never achieve. This capability is essential when tackling complex problems that require a massive number of logical deductions to arrive at a valid conclusion. To better define and understand deep reasoning, we frame it as a capability that primarily relaxes the first constraint in Equation 1, as expressed by the following:" + }, + { + "type": "equation", + "bbox": [ + 0.393, + 0.83, + 0.825, + 0.845 + ], + "angle": 0, + "content": "\\[\nk \\leq \\mathcal {B} _ {s} \\mapsto k \\leq \\mathcal {B} _ {l} \\wedge \\mathcal {B} _ {s} \\ll \\mathcal {B} _ {l}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.826, + 0.913 + ], + "angle": 0, + "content": "where \\(\\mathcal{B}_l\\) represents the upper boundary for Long CoT reasoning, which can accommodate much more intricate logical nodes compared to the smaller boundary \\(\\mathcal{B}_s\\) for Short CoT. The larger boundary \\(\\mathcal{B}_l\\) alleviates issues related to insufficient depth in reasoning, thereby reducing the risk of generating unresolved answers or hallucinated responses in short-form reasoning." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.066 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.066, + 0.306, + 0.07 + ], + "angle": 0, + "content": "REASONING GROUP" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.079, + 0.833, + 0.54 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.545, + 0.825, + 0.574 + ], + "angle": 0, + "content": "Figure 3: Taxonomy of Long CoT, which includes deep reasoning, feasible reflection, and extensive exploration methodologies." + }, + { + "type": "title", + "bbox": [ + 0.205, + 0.605, + 0.431, + 0.621 + ], + "angle": 0, + "content": "Key Difference: Reasoning Depth" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.628, + 0.801, + 0.657 + ], + "angle": 0, + "content": "- Short CoT typically addresses a limited set of logical nodes, involving shallow reasoning, and struggles with problems requiring complex or intricate logical structures." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.659, + 0.801, + 0.689 + ], + "angle": 0, + "content": "- Long CoT is designed to accommodate a significantly larger set of logical nodes, allowing for deeper logic and more thorough analysis during the reasoning process." + }, + { + "type": "list", + "bbox": [ + 0.2, + 0.628, + 0.801, + 0.689 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.716, + 0.48, + 0.731 + ], + "angle": 0, + "content": "2.2.2 Extensive Exploration for Long CoT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.742, + 0.825, + 0.824 + ], + "angle": 0, + "content": "As shown by Figure 2, Long CoT encourages branching out to extensively explore uncertain or unknown logical nodes, thereby expanding the potential set of reasoning paths. This exploration is particularly critical when solving problems characterized by ambiguity, incomplete information, or multiple possible solutions [43, 1016, 871]. More specifically, we describe how extensive exploration primarily addresses the relaxation of the second constraint in Equation 1, which can be formalized as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.292, + 0.831, + 0.825, + 0.848 + ], + "angle": 0, + "content": "\\[\nj = 1 \\Leftrightarrow \\forall i \\leq k, n _ {i} \\rightarrow n _ {i + j} \\mapsto \\exists m, \\forall i, \\forall j \\leq m, n _ {i} \\rightarrow n _ {i + j}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.825, + 0.913 + ], + "angle": 0, + "content": "where the condition indicates that for a logical node \\( n_i \\), there are \\( m \\) nodes that are explored in parallel. The acceptability of parallel exploration allows for a more systematic approach, enabling the exploration of previously unconsidered logical paths. This, in turn, helps maximize the understanding of all possible solutions, ultimately leading to the correct final answer." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.226, + 0.071 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.065 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.065, + 0.307, + 0.07 + ], + "angle": 0, + "content": "REASONING GROUP" + }, + { + "type": "title", + "bbox": [ + 0.205, + 0.092, + 0.513, + 0.107 + ], + "angle": 0, + "content": "Key Difference: Exploration of Logical Nodes" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.115, + 0.798, + 0.144 + ], + "angle": 0, + "content": "- Short CoT generally restricts exploration to a fixed set of logical nodes, often resulting in oversimplified reasoning and limited exploration." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.147, + 0.798, + 0.176 + ], + "angle": 0, + "content": "- Long CoT explores more various paths, including uncertain or uncharted areas, fostering more nuanced and comprehensive problem-solving." + }, + { + "type": "list", + "bbox": [ + 0.2, + 0.115, + 0.798, + 0.176 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.199, + 0.456, + 0.215 + ], + "angle": 0, + "content": "2.2.3 Feasible Reflection for Long CoT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.224, + 0.825, + 0.294 + ], + "angle": 0, + "content": "As shown by Figure 2, Long CoT involves revisiting previous logical nodes to verify their connections are valid and accurate, and then correcting them or selecting an alternative logical path. Formally, feasible reflection relaxes the third constraint in Equation 1, which originally requires acyclic reasoning such that \\( n_i \\neq n_j \\) for all \\( i \\neq j \\leq k \\). In contrast, feasible reflection permits the reasoning path to return to a previously visited node, captured as:" + }, + { + "type": "equation", + "bbox": [ + 0.343, + 0.302, + 0.826, + 0.319 + ], + "angle": 0, + "content": "\\[\n\\forall i \\neq j \\leq k, n _ {i} \\neq n _ {j} \\mapsto \\exists i < j \\leq k, n _ {i} = n _ {j}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.326, + 0.825, + 0.37 + ], + "angle": 0, + "content": "where this condition indicates that, for a logical node \\( n_{j-1} \\), the subsequent node is not limited to the original next node \\( \\hat{n}_j \\). Instead, it may transition to \\( n_i \\) (i.e., the next logical node becomes \\( n_j \\), where \\( n_j = n_i \\)). Practically, reflection implementation consists of two components:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.38, + 0.826, + 0.463 + ], + "angle": 0, + "content": "Feedback refers to evaluating both overall and intermediate outputs for correctness and quality, also known as critique or verification. It can be derived from external sources, validation checks, or by reflecting on prior conclusions within the reasoning process. Formally, at each step \\( n_i \\), a verification process \\( \\mathcal{V}_i \\) ensures the correctness, feasibility, and consistency of the reasoning. If an issue is identified, the process redirects \\( n_i \\) to the nearest correct node \\( n_j \\), where \\( j < i \\). This relationship is formalized as:" + }, + { + "type": "equation", + "bbox": [ + 0.405, + 0.464, + 0.825, + 0.481 + ], + "angle": 0, + "content": "\\[\n\\mathcal {F} _ {i}, n _ {j} \\leftarrow \\operatorname {F e e d b a c k} \\left(\\mathrm {C o T} _ {L} ^ {i}\\right) \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.486, + 0.825, + 0.515 + ], + "angle": 0, + "content": "where \\(\\mathrm{CoT}_L^i = \\{n_1,\\dots ,n_i\\}\\) represents the current logical path up to the \\(i\\) -th logical node for Long CoT." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.526, + 0.827, + 0.567 + ], + "angle": 0, + "content": "Refinement involves adjusting intermediate steps or modifying the logical flow to correct inconsistencies or address gaps based on the given feedback. This process can be expressed mathematically as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.374, + 0.568, + 0.825, + 0.585 + ], + "angle": 0, + "content": "\\[\n\\widetilde {n} _ {i + 1} \\leftarrow \\operatorname {R e f i n e} \\left(n _ {i + 1} \\mid \\mathrm {C o T} _ {L} ^ {i}, \\mathcal {F} _ {i}, n _ {j}\\right), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.589, + 0.825, + 0.619 + ], + "angle": 0, + "content": "where \\(\\widetilde{n}_{i+1}\\) represents the refined version of the subsequent logical node \\(n_{i+1}\\), according to the current logic \\(\\mathbb{C} \\circ \\mathbb{T}_L^i\\), feedback result \\(\\mathcal{F}_i\\), and previous logical node \\(n_j\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.624, + 0.826, + 0.667 + ], + "angle": 0, + "content": "Overall, incorporating reflection ensures that errors are identified and corrected promptly. This capability enables LLMs to quickly shift to alternative reasoning paths or correct their current trajectory. By doing so, error propagation is minimized, resulting in more accurate conclusions." + }, + { + "type": "title", + "bbox": [ + 0.205, + 0.676, + 0.476, + 0.691 + ], + "angle": 0, + "content": "Key Difference: Feedback & Refinement" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.699, + 0.798, + 0.729 + ], + "angle": 0, + "content": "- Short CoT typically moves in a straightforward, non-repetitive manner from one node to the next, so that cannot correct their logic." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.731, + 0.8, + 0.76 + ], + "angle": 0, + "content": "- Long CoT allows for revisiting and revising earlier decisions by feedback and refinement, ensuring that estimizable and prior logical conclusions during the reasoning progress." + }, + { + "type": "list", + "bbox": [ + 0.201, + 0.699, + 0.8, + 0.76 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.784, + 0.695, + 0.799 + ], + "angle": 0, + "content": "2.2.4 Unified Application and Development History of Three Capabilities" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.808, + 0.826, + 0.864 + ], + "angle": 0, + "content": "The Long CoT discussed here represents a unified reasoning system that seamlessly integrates and applies three key capabilities: deep reasoning, reflective mechanisms, and exploration capabilities. In contrast, during the Short CoT era, these capabilities developed independently, each evolving in isolation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.826, + 0.913 + ], + "angle": 0, + "content": "As shown in Figure 2, early efforts primarily focused on enhancing deep reasoning within traditional CoT paradigms. This was followed by the gradual introduction of reflective mechanisms, which were initially based on human-designed pipelines. Over time, exploration capabilities were added, and" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.065 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.235, + 0.068, + 0.301, + 0.071 + ], + "angle": 0, + "content": "REASONING GROU" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "these components were ultimately merged, giving rise to the modern concept of Long CoT, a unified approach to reasoning that seeks to enhance all three capabilities in harmony." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.127, + 0.827, + 0.239 + ], + "angle": 0, + "content": "The progression of Long CoT is gradual, rather than a sudden emergence through isolated models like o1 [307] and R1 [227]. Instead, it develops gradually. For example, earlier systems, such as ToT [955], enhance exploration but lack reflective mechanisms, disqualifying them as Long CoT [95]. While GoT [48] incorporates self-reflection based on ToT, its original model still lacked robust deep reasoning, preventing it from qualifying as Long CoT at that time. It is also notable that modern Long CoT systems, often neglect earlier technologies. This article addresses this gap by tracing the evolution of each capability, with the final section offering a comprehensive analysis of the integrated Long CoT system." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.244, + 0.828, + 0.301 + ], + "angle": 0, + "content": "In summary, Long CoT and Short CoT represent distinct paradigms. Long CoT features a deeper, broader, and more reflective reasoning process, enhancing both accuracy and coherence. Short CoT, by contrast, is better suited to simpler, well-defined problems. This distinction highlights the scalability and adaptability of Long CoT, making it particularly effective for more complex reasoning." + }, + { + "type": "title", + "bbox": [ + 0.205, + 0.309, + 0.592, + 0.325 + ], + "angle": 0, + "content": "Key Difference: Unified Application of Three Capabilities" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.33, + 0.801, + 0.373 + ], + "angle": 0, + "content": "It is important to highlight that Long CoT integrates these three distinct capabilities to perform complex reasoning. In contrast, traditional Short CoT optimization typically focuses on only one of these characteristics." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.398, + 0.485, + 0.416 + ], + "angle": 0, + "content": "3 Long CoT Analysis & Evaluation" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.429, + 0.48, + 0.445 + ], + "angle": 0, + "content": "3.1 Analysis & Explanation for Long CoT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.455, + 0.827, + 0.538 + ], + "angle": 0, + "content": "Research on Long CoT has significantly enhanced RLLMs by improving reasoning accuracy, reducing errors, and supporting dynamic decision-making. However, several phenomena and their corresponding mechanisms remain inadequately summarized. This section addresses key topics, including the mechanisms of Long CoT and their underlying principles [644, 63, 545, 642]. Methodologically, two main perspectives have emerged to explain Long CoT: (1) External Behavior Analysis (§ 3.1.1) and (2) Internal Mechanism Analysis (§ 3.1.2)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.552, + 0.492, + 0.567 + ], + "angle": 0, + "content": "3.1.1 Long CoT External Behavior Analysis" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.576, + 0.825, + 0.606 + ], + "angle": 0, + "content": "The primary research stream focuses on explaining RLLM behaviors for Long CoT [25]. As illustrated in Figure 4, six key phenomena are identified and discussed for Long CoT in this part." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.615, + 0.827, + 0.755 + ], + "angle": 0, + "content": "Long CoT Emergence Phenomenon Research shows that contextual examples improve large models' generative abilities by guiding the formation of reasoning chains [1012, 671, 417, 343, 532, 846, 1017, 1141]. Wang et al. [759] and Lippmann and Yang [461] demonstrate that these examples standardize reasoning chain generation relevant to the answers both in in-context-learning and supervised-finetuning. In an experiment by Madaan et al. [538], removing problem-specific entities from contextual examples, while retaining only the logical structure, led to similar performance as using complete examples, highlighting the logical structure imitation of Long CoT during inference. From a learning perspective, Ye et al. [963] analyzes and reveals the three-stage developmental trajectory of Long CoT: early memorization, followed by in-distribution generalization, and ultimately cross-distribution generalization, thereby enabling the model to exhibit Long CoT capabilities." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.76, + 0.829, + 0.913 + ], + "angle": 0, + "content": "More recently, Stechly et al. [688] and Wang and Zhou [815] have shown that modifying the decoding process or designing specific prompts can activate the Long CoT within pre-trained models. They propose that CoT is embedded during pre-training and requires specific activation [941]. Further, Sadr et al. [642] focus the Long CoT source from the training data, and build on this with the notion of \"model attribution\", to specifically identify the training data most influential for specific outputs. Building on this, Guo et al. [227] and Xie et al. [886] investigate using rule-based reinforcement learning to directly activate Long CoT during pre-training, aiming to enhance performance [881]. Furthermore, Gandhi et al. [194] identify four key cognitive behaviors, including verification, backtracking, sub-target setting, and backward chaining, which successfully facilitate Long CoT. Qwen series models [926] inherently demonstrate these behaviors, which can be easily triggered by rule-based reinforcement. In contrast, the models of Llama series [168] lack these" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.947 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.233, + 0.06, + 0.307, + 0.066 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.233, + 0.066, + 0.306, + 0.07 + ], + "angle": 0, + "content": "REASONING GROUP" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.09, + 0.339, + 0.241 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.344, + 0.091, + 0.475, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.481, + 0.091, + 0.657, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.662, + 0.091, + 0.815, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.251, + 0.828, + 0.335 + ], + "angle": 0, + "content": "Figure 4: Analysis of the six classic phenomena of Long CoT external behavior: (a) emergence of Long CoT in current RLLMs; (b) reasoning boundaries and limitations of current Long CoT systems; (c) overthinking caused by scaling beyond RLLMs' reasoning boundaries, leading to performance decay; (d) inference-time scaling, discussing mainstream scaling methods, corresponding scaling laws and their limitations; (e) use of process reward model (PRM) or outcome reward model (ORM); (f) exploration of the \"aha\" moment and its underlying causes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.368, + 0.825, + 0.438 + ], + "angle": 0, + "content": "capabilities and thus requires example-based reinforcement learning to improve significantly [65]. Moreover, Wang et al. [812] identify a pretraining scaling law that explains how increasing calculation size in RLLMs enhances their reasoning capabilities. Wang et al. [796] further explore the scaling law of Long CoT, showing that more fine-grained Long CoT granularity leads to more efficient and effective generalization performance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.455, + 0.827, + 0.593 + ], + "angle": 0, + "content": "Reasoning Boundary Phenomenon Recent research has highlighted the upper bounds and limitations of RLLMs across various reasoning tasks [303, 283, 684, 261, 185, 252]. Specifically, Bi et al. [53] investigate these bounds in code generation, showing that RLLMs struggle with tasks that exceed certain complexity thresholds [600], especially when imitating Long CoT samples of varying complexity. In the context of upper-bound performance, Merrill and Sabharwal [548] and Li et al. [430] focus on single-step arithmetic tasks, concluding that model performance is constrained by input length. Moreover, Feng et al. [177] proposes a mathematical model indicating that fixed-size models cannot produce accurate numerical answers beyond specific limits. However, increasing the number of reasoning steps improves a model's capability requirements to solve more complex problems." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.6, + 0.827, + 0.753 + ], + "angle": 0, + "content": "Inspired by these explorations, Chen et al. [90] first define the \"reasoning boundary\" phenomenon and quantify these limits, showing that surpassing an RLLM's reasoning capacity leads to performance decline [92]. Similarly, Zhou et al. [1130] introduce GSM-Infinite, linking different upper limits to accuracy levels. Chen et al. [90] also examine the interaction between these boundaries across tasks of varying complexity, providing insights into the effectiveness of Long CoT strategies [1085]. Moreover, Amiri et al. [12] propose a \"tight lower bound\" for Long CoT further guiding reasoning error reductions. Further, Baeumel et al. [28] suggest that due to its reliance on a single-digit lookahead heuristic, there are inherent boundaries in performing addition with multiple operands, which thus hinders the fundamental limitation of LLMs in scaling to more complex numerical reasoning. Liu et al. [483] further investigate the role of reinforcement learning in expanding these reasoning boundaries instead of relying solely on pretraining capabilities." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.773, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Overthinking Phenomenon Research has highlighted the overthinking phenomenon [103, 330, 574, 142, 357, 595], where performance improves with longer reasoning chains up to a threshold, after which it declines. In contrast, Xie et al. [886] and Ma et al. [534] find no significant correlation between reasoning length and accuracy. To explain this, one line of research suggests that Long CoT strategies [21, 441], like avoiding \"snowball errors\" [192]. Alternatively, Chen et al. [90], Wolf et al. [851] highlight a performance drop when the reasoning boundaries are exceeded, providing an explanation for the overthinking phenomenon. This suggests that reasoning length and logical complexity should be kept below a certain boundary [1080]. Building on this, Wu et al. [867] mathematically determine the feasible reasoning length for Long CoT. Finally, Chen et al. [93] introduces Ohm's law of Long CoT, which accurately predicts and controls performance." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.092, + 0.828, + 0.176 + ], + "angle": 0, + "content": "Inference-Time Scaling Phenomenon Recent advances in inference-time scaling algorithms [524, 843] have garnered significant attention, particularly for their ability to extend reasoning length and improve performance [524, 455, 875]. Specifically, Brown et al. [57] identify a phenomenon called \"Large Language Monkeys\", in which a series of reasoning tasks show that with enough trials, a correct result can be achieved. Additionally, o1 [307] and R1 [227] demonstrated that directly scaling the length of model inference improves final performance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.182, + 0.828, + 0.394 + ], + "angle": 0, + "content": "To understand inference-time scaling, we will discuss these two paradigms: (1) Sequential Scaling: Sequential scaling involves increasing the reasoning path length. While this can enhance performance, studies by Jin et al. [330] show that, beyond a certain point, longer reasoning paths can degrade performance due to error accumulation. They suggest an optimal path length that depends on the model's capabilities and task complexity [15, 652, 31]. Furthermore, Chen et al. [90] and Wu et al. [867] explain that excessive exploration lengths beyond the RLLM's inherent reasoning boundary lead to performance decay, which guides RLLMs for deeper reasoning capabilities [32]. (2) Parallel Scaling: Parallel scaling involves performing multiple reasoning steps and verifying the results. While it shows promise, Parashar et al. [583] and Wang et al. [820] argue that simply increasing inference time does not guarantee improved performance. Wu et al. [864] show that the computational FLOPs \\( N \\) of inference are correlated with the lower bound of performance error, which scales with \\( \\log N \\). Additionally, Chen et al. [93] establish an upper bound for parallel scaling, showing that RLLMs cannot exceed Pass@k verification through various verifiers. They further argue that sampling optimization cannot exceed the model's internal reasoning limitations, demonstrating that for \\( N \\) samples, accuracy is proportional to \\( \\frac{m}{(k / \\log N + b)^2} \\), where \\( m \\), \\( n \\), and \\( b \\) are model-dependent constants." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.403, + 0.828, + 0.639 + ], + "angle": 0, + "content": "PRM & ORM Selection Phenomenon As RLLMs evolve, it is crucial to navigate the debate around the selection between process supervision and outcome supervision, two key reinforcement learning paradigms for complex reasoning tasks. The phenomenon of choosing between these two approaches has become a pivotal issue, as it is essential to differentiate and decide which supervision strategy is more suitable for specific tasks [899, 187, 1059]. While process supervision is intuitively advantageous for long-term reward assignments, the exact relationship between the two approaches remains unclear. It is commonly believed that process supervision is more challenging due to the trajectory-level coverage problem, which demands significant effort to collect fine-grained supervision data [1102, 679]. Additionally, PRM faces the issue of reward hacking [13, 152, 573, 30, 399], where agents exploit flaws in the reward function to produce unintended behaviors [227]. Addressing this to surpass rule-based reward systems has become an important research area [227, 886, 594]. Furthermore, Lampinen et al. [368] and Tan [708] establish a causal link between intermediate steps and final answers in qualitative experiments. Building on this, Jia et al. [317] demonstrate that, under the standard data coverage assumption, reinforcement learning with outcome supervision is not statistically more challenging than process supervision, aside from polynomial factors. More strictly, He et al. [247] mathematically demonstrate that outcome-level rewards suffice for online reinforcement learning in RLLMs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.648, + 0.828, + 0.858 + ], + "angle": 0, + "content": "Aha Moment Phenomenon Earlier, Guo et al. [227] demonstrated that direct RL using rule-based rewards can trigger the aha moment, fostering natural self-reflection without supervision [172]. Following this, Team [721], Xie et al. [886] replicate this phenomenon. Further, Zhou et al. [1119] and Meng et al. [547] further extend this phenomenon to multimodal scenarios. However, Liu et al. [498] argue that the aha moment may not emerge in R1-Zero-like training. Instead, they observe that self-reflection patterns, such as superficial self-reflection (SSR), appear at epoch 0, the stage of base models. In this case, self-reflections do not necessarily lead to correct answers. Upon closer examination of R1-Zero training via RL, they find that the increasing response length results not from self-reflection, but from RL optimizing well-designed rule-based rewards. Moreover, Yang et al. [939] demonstrate that the \"aha moment\" is externally marked by increased use of anthropomorphic language during self-reflection and a dynamic adjustment of uncertainty in response to problem difficulty. This process enables the model to maintain reasoning without succumbing to \"Reasoning Collapse.\" Internally, it is characterized by a clear distinction between anthropomorphic traits and logical reasoning, with anthropomorphic language intensifying as the problem becomes more complex." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.826, + 0.914 + ], + "angle": 0, + "content": "Reinforcement Learning Entropy Phenomenon In reinforcement learning for Long CoT, the entropy mechanism is a crucial factor influencing the performance of RLLMs. Policy entropy measures the diversity and exploratory strength of a model's outputs. By managing this entropy" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.092, + 0.828, + 0.218 + ], + "angle": 0, + "content": "effectively, a model preserves exploration and thus excels on complex reasoning tasks. Earlier, Jang and Kim [310] investigate how initial entropy affects exploration in deep RL and proposed an entropy-aware initialization to encourage effective exploration. Building on this, Zhang et al. [1036] developed an Entropy-Regularized PRM that balances policy updates against large deviations from the starting distribution, thereby improving reasoning. Cheng et al. [116] found that high-entropy regions correlate positively with three exploratory reasoning behaviors: (1) key tokens linking logical steps, (2) self-verification and correction, and (3) rare behaviors underrepresented in the base model. Most recently, Agarwal et al. [5] introduced an Entropy Minimization method and demonstrated its strong impact on LLM performance in mathematical, physical, and coding tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.223, + 0.828, + 0.349 + ], + "angle": 0, + "content": "However, recent research indicates that, during early training, policy entropy declines sharply, causing the model to converge prematurely on specific output patterns and limiting further reasoning improvement [144]. In reinforcement learning, policy entropy \\((H)\\) and downstream task performance \\((R)\\) follow an exponential relation: \\(R = -a\\cdot e^{H} + b\\), so a drop in entropy produces a rapid performance decline until saturation. This \"policy entropy collapse\" is common without entropy control, as reduced entropy constrains exploration and stalls reasoning gains [144]. To counter this collapse, two methods, Clip-Cov and KL-Cov, regulate entropy by constraining updates on high-covariance tokens. Clip-Cov clips their update magnitudes, whereas KL-Cov imposes a Kullback-Leibler penalty. Empirical results show both techniques prevent collapse and enhance reasoning performance [144]." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.364, + 0.506, + 0.379 + ], + "angle": 0, + "content": "3.1.2 Long CoT Internal Mechanism Analysis" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.388, + 0.816, + 0.404 + ], + "angle": 0, + "content": "The second stream of research investigates the internal mechanisms of Long CoT-related RLLMs." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.416, + 0.827, + 0.583 + ], + "angle": 0, + "content": "Reasoning Internal Mechanism Recent studies have explored the internal mechanisms underlying the coherent rationale outputs of Long CoT, with particular emphasis on attention mechanisms [675, 632]. These studies primarily examine neural substructures in RLLMs, framing CoT reasoning from a white-box perspective [819, 992, 233, 169]. Weston and Sukhbaatar [849] introduces the concept of System 2 Attention (S2A), which demonstrates Long CoT generation by selectively focusing attention on relevant information. Additionally, Li et al. [407] explore gradient distributions between direct output and Long CoT layers, revealing that Long CoT layers help maintain stability by distinguishing relevant from irrelevant reasoning [840]. Finally, Zhang et al. [1068] conceptualize RLLMs as finite state automata, offering further insight into how internal dynamics influence external behavior. Despite Short CoT's struggles with self-correction, Bertolazzi et al. [47] show that these models rely on consistency heads (attention heads) to assess the alignment of numerical values in arithmetic solutions through internal shortcuts." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.594, + 0.828, + 0.777 + ], + "angle": 0, + "content": "Knowledge Incorporating Mechanism Current RLLMs primarily focus on mathematics and coding but have shown potential for generalization to other knowledge-rich domains, sparking growing interest in the mechanism for integrating domain-specific knowledge into Long CoT [860, 886, 1105]. Prystawski et al. [609] suggest that generative models store entity knowledge learned during pre-training independently, with the reasoning process in Long CoT linking this knowledge across entities. Radha and Goktas [630] recently introduced the Probabilistic Mixture Model (PMM), which categorizes model outputs into reasoning, memorization, and guessing. They also propose an Information-Theoretic Consistency (ITC) analysis to quantify the relationship between model confidence and strategy selection. Additionally, Jin et al. [331] define \"Concept Depth\" as the lowest layers at which complex concepts are understood, demonstrating varying levels of knowledge integration in RLLMs. Ou et al. [572] examine RLLM knowledge internalization through knowledge loop evolution, arguing that new knowledge acquisition is shaped by its connection to existing knowledge, with the loop evolving from formation to optimization and from shallow to deep." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.792, + 0.373, + 0.807 + ], + "angle": 0, + "content": "3.2 Long CoT Evaluations" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.818, + 0.281, + 0.832 + ], + "angle": 0, + "content": "3.2.1 Metrics" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.843, + 0.826, + 0.913 + ], + "angle": 0, + "content": "In benchmarking, various metrics assess model performance across reasoning tasks, each focusing on different aspects of reasoning ability. These metrics evaluate both RLLMs' effectiveness in achieving desired outcomes and their learning efficiency. As a result, metrics for RLLMs have gained increasing attention in recent research. For mathematical or code-related tasks, three key metrics are commonly used: Accuracy, Pass@k, and Cons@k based on regex extraction:" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.093, + 0.556, + 0.107 + ], + "angle": 0, + "content": "Accuracy measures the proportion of correct outputs." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.109, + 0.806, + 0.123 + ], + "angle": 0, + "content": "- Pass@k evaluates the likelihood of generating at least one correct solution within \\( k \\) attempts." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.125, + 0.825, + 0.154 + ], + "angle": 0, + "content": "- Cons@k assesses consistency by determining the model's ability to consistently produce correct or logically coherent solutions across multiple attempts." + }, + { + "type": "list", + "bbox": [ + 0.175, + 0.093, + 0.825, + 0.154 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.159, + 0.826, + 0.202 + ], + "angle": 0, + "content": "In scientific or commonsense question-answering tasks, evaluation often uses Exact Match (EM) and Accuracy based on regex extraction, where EM determines whether the model's output exactly matches the expected solution." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.207, + 0.783, + 0.222 + ], + "angle": 0, + "content": "For feedback techniques like ORM or PRM, Rank and Best-of-N metrics are often used:" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.228, + 0.825, + 0.255 + ], + "angle": 0, + "content": "- Rank measures whether the reward model correctly prioritizes the best reasoning processes from the top \\( k \\) candidates." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.258, + 0.826, + 0.286 + ], + "angle": 0, + "content": "- Best-of-N selects the highest-scoring solution from \\( N \\) generated reasoning trajectories, indirectly measuring the reward model's effectiveness based on final outcomes." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.228, + 0.826, + 0.286 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.303, + 0.365, + 0.318 + ], + "angle": 0, + "content": "3.2.2 Decoding Strategies" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.327, + 0.825, + 0.398 + ], + "angle": 0, + "content": "Decoding strategies are essential for controlling the inference process. Common approaches include Greedy Decoding, Beam Search, and Major@k. Both Greedy Decoding and Beam Search limit the sampling range to reduce randomness, guiding the model toward more consistent outputs. In contrast, Major@k identifies the most reliable solution by selecting the one with the highest consistency from a set of \\( k \\) candidate solutions." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.414, + 0.315, + 0.428 + ], + "angle": 0, + "content": "3.2.3 Benchmarks" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.438, + 0.825, + 0.495 + ], + "angle": 0, + "content": "In the realm of Benchmarks, the focus lies on assessing the reasoning capabilities of RLLMs across diverse domains. There are two primary categories: (1) Outcome Benchmarks, which focus on the holistic view of Long CoT reasoning, and (2) Process Benchmarks, which concentrate on the local view of the Long CoT process or individual capabilities." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.507, + 0.825, + 0.536 + ], + "angle": 0, + "content": "Outcome Benchmarks In the realm of Outcome Benchmarks, the first focus lies on evaluating the logical reasoning capabilities:" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.543, + 0.827, + 0.653 + ], + "angle": 0, + "content": "- Complex Mathematics: A central focus in complex mathematics is evaluating benchmarks like GSM8K [141] and MATH [253], which assess basic mathematical problem-solving abilities [1125, 1112]. Recent additions, such as AIME 2024 [8], AIME 2025 [571], MATH-500 [449], AMC 2023 [9], USAMO [598], OlympiadBench [239], and OlympiadArena [298], expand the evaluation of LLM performance in mathematics. Moreover, Putnam-AXIOM [224] and FrontierMath [210] introduce more complex problems that challenge future reasoning systems. Additionally, ThinkBench [291] and MATH-Perturb [288] focus on robust evaluation for Long CoT [38, 987]." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.657, + 0.827, + 0.74 + ], + "angle": 0, + "content": "- Complex Coding: Complex coding benchmarks are also vital, with competitions like Codeforces, SWEbench [327], CodeContests [427], and LiveCodeBench [309] evaluating LLM coding and problem-solving skills. Notable additions such as MHPP [148], ProBench [934], HumanEval Pro, MBPP Pro [993], and EquiBench [833] enhance the scope and complexity of coding challenges. Moreover, some studies have explored applying these benchmarks in real-world code development scenarios for automatic code generation and evaluation [243, 744]." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.744, + 0.827, + 0.828 + ], + "angle": 0, + "content": "- Commonsense Puzzle: Commonsense puzzle benchmarks, including LiveBench [850], BIG-Bench Hard [705] and ZebraLogic [450], assess models' ability to reason about commonsense situations. The ARC [131] and DRE-Bench [947] is often viewed as a challenging commonsense-based AGI test. JustLogic [87] further contributes to the evaluation of deductive reasoning and commonsense problem-solving. Moreover, Li et al. [382] introduce QuestBench, a benchmark designed to evaluate the ability of RLLMs to generate insightful and meaningful questions." + }, + { + "type": "list", + "bbox": [ + 0.175, + 0.543, + 0.827, + 0.828 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.834, + 0.825, + 0.864 + ], + "angle": 0, + "content": "The second focus area concerns Knowledge Benchmarks, essential for evaluating a model's capability in complex reasoning across various tasks for out of distribution evaluation [776]:" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.87, + 0.827, + 0.913 + ], + "angle": 0, + "content": "- Scientific Reasoning: Scientific Reasoning benchmarks, such as GPQA Diamond [637], MMLU-Pro [821], and SuperGPQA [165], assess multi-domain reasoning in fields like chemistry, biology, and physics [157]. These benchmarks test models' ability to not only accumulate knowledge" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.507, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.092, + 0.825, + 0.146 + ], + "angle": 0, + "content": "but also integrate it for problem-solving. Humanity's Last Exam (HLE) [602] further challenges models by requiring deep interdisciplinary reasoning across scientific disciplines. Further, Chung et al. [140] propose TPBench to evaluate the effectiveness of RLLMs in solving theoretical physics problems." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.15, + 0.828, + 0.262 + ], + "angle": 0, + "content": "- Medical Reasoning: In the realm of Medical Reasoning, the need for complex, domain-specific, and accurate reasoning is paramount [1094, 1024, 905, 589]. Benchmarks, such as MedQA [328], JAMA Clinical Challenge [76], LLMEval-Med [1049] and Medbullets [76], simulate diagnostic and treatment decision-making processes, reflecting real-world medical practice. These benchmarks evaluate a model's handling of medical knowledge and reasoning, from diagnosis to treatment planning. Additionally, MedXpertQA [1150] introduces a comprehensive evaluation framework combining text and multimodal data, specifically assessing AI's reasoning capabilities in healthcare." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.279, + 0.367, + 0.294 + ], + "angle": 0, + "content": "3.2.4 Process Evaluations" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.304, + 0.827, + 0.403 + ], + "angle": 0, + "content": "Deep Reasoning Benchmarks Recent progress in RLLMs underscores the need for specialized benchmarks to evaluate their deep reasoning abilities in Long CoT [375, 1133]. Notably, Lin et al. [450] introduces ZebraLogic, a framework for assessing logical reasoning, especially in complex non-monotonic scenarios. Similarly, BigGSM [90] and GSM-Ranges [670] focus on perturbing numerical values to test logical and arithmetic reasoning in edge cases beyond the models' training distribution. ROSCOE [212], ReCEval [606], DiVeRSe [425], HLV [71], and CoT-Kinetics [51] are designed to assess each step in the deep reasoning process during Long CoT tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.415, + 0.828, + 0.502 + ], + "angle": 0, + "content": "Exploration Benchmarks Several studies assess RLLMs' exploration capabilities in Long CoT tasks. Specifically, Sys2Bench [583] evaluates the exploration and scaling abilities of RLLMs, emphasizing generalization across diverse tasks. BanditBench [566] extends this by testing model performance in interactive environments, offering insights into practical applications. Additionally, Heyman and Zylberberg [254] introduce a graph coloring problem to assess reasoning and spatial exploration in complex problem-solving scenarios." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.513, + 0.829, + 0.709 + ], + "angle": 0, + "content": "Reflection Benchmarks Reflection benchmarks measure RLLMs' ability to identify, reflect upon, and correct errors in Long CoT reasoning. These benchmarks fall into two categories: feedback and refinement. (1) Feedback Benchmark: These benchmarks assess the ability of LLMs to detect errors and respond to feedback for improvement. For example, Lambert et al. [367] introduces RewardBench to evaluate RLLMs' reward capabilities. This framework is extended by Multimodal RewardBench[960], and CodeCriticBench [1025] to include multimodal and code contexts, respectively. Benchmarks such as ProcessBench [1102], PRMBench [679], MR-Ben [1021], and DeltaBench [250] focus on error detection and correction across various tasks at the step level. Additionally, RealL Mistake [337] and JudgeBench [709] address more real-world error evaluation. (2) Refinement Benchmark: These benchmarks focus on error correction in complex tasks. CriticBench [456] assesses critique-correction capabilities, while MLDebugging [287], and ErrorRadar [922] specializes in coding or multimodal reasoning error detection and refinement. FinerReason [72] introduces a commonsense puzzle for broader feedback and refinement evaluations. Medec [1] adapts error correction to healthcare, addressing medical issues." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.721, + 0.377, + 0.735 + ], + "angle": 0, + "content": "3.2.5 Advanced Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.746, + 0.829, + 0.913 + ], + "angle": 0, + "content": "Agentic & Embodied Reasoning Agentic and Embodied reasoning requires models to demonstrate an understanding of real-world interactions, tool use, and adaptive reasoning in response to change. To assess real-world understanding, Wang et al. [798] introduce a benchmark that evaluates agents' ability to reason about physical concepts. Zhang et al. [1064] extend this by assessing agents' interactions with real-world physics. Additionally, realistic tasks often demand complex planning and tool usage, necessitating benchmarks to evaluate agent reasoning. These benchmarks assess agents' abilities to navigate and complete tasks in digital environments. Building on this, Huang et al. [283] propose a framework for evaluating decision-making in multi-agent, competitive settings. Nath et al. [562] introduce ToolComp, a benchmark designed to evaluate multi-step tool-use reasoning. To analyze adaptive reasoning in the face of real-world change, OSWorld [887], CogAgent [260], Mobile-Agent-E [828], WebShop [954], WebArena [1126], WGSR-Bench [972], and WebGames [735] assess AI systems across domains such as operating systems, mobile GUIs, browser tasks, and interactive" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.828, + 0.123 + ], + "angle": 0, + "content": "entertainment [1106, 780, 512, 552]. Hu et al. [272] present Text2World, which evaluates agents' ability to generate interactive environments from text to test agent adaptability [995]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.136, + 0.828, + 0.18 + ], + "angle": 0, + "content": "Multimodal Reasoning Multimodal reasoning refers to a system's ability to integrate and reason across diverse input types, including text, images [316]. This capability is crucial for solving complex problems that require information from diverse formats." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.186, + 0.828, + 0.257 + ], + "angle": 0, + "content": "- Complex Mathematics: Mathematical reasoning often integrates both textual and visual components, such as equations, graphs, or diagrams [921]. Specifically, challenges like MathVista [508], MathVision [782], MathVerse [1054], M3CoT-Math [91], CMMaTH [433], EnigmaEval [763], CoMT-Geometry [125], and PGPS9K [1050] aim to advance multimodal reasoning in mathematics, improving the evaluation of multimodal Long CoT logic." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.258, + 0.828, + 0.329 + ], + "angle": 0, + "content": "- Complex Code: The second area of focus involves code-related reasoning, where systems interpret textual descriptions and code snippets. Benchmarks like HumanEval-V [1035], Code-Vision [767], Plot2Code [852], and ChartMimic [931] evaluate systems' capabilities to generate or interpret code from natural language and multimodal inputs for assessing systems that integrate natural language processing with programming tasks." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.331, + 0.828, + 0.401 + ], + "angle": 0, + "content": "- **Complex Science:** This area involves integrating scientific texts with related diagrams or experimental data. Benchmarks like ScienceQA [507], M3CoT-Science [91], BMMR [874], and ScienceBoard [698] evaluate how well models combine science information with Long CoT reasoning across various scientific domains [966]. Further, Guo et al. [229] propose MolPuzzle for the evaluation of molecular structure elucidation." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.404, + 0.828, + 0.515 + ], + "angle": 0, + "content": "- Commonsense Puzzle: This area focuses on commonsense reasoning, where systems combine reasoning cues and images to make deeper conclusions. Chen et al. [91] introduce M3CoT-Commensense, which incorporates commonsense Long CoT reasoning for complex multimodal interactions. Further, PuzzleVQA [128], MMReason [953] and LEGO-Puzzles [711] focus more on abstract and spatial puzzle reasoning, respectively. Additionally, Wang et al. [760] propose two benchmarks: Clue-Visual Question Answering (CVQA), which tests visual comprehension through three task types, and Clue of Password-Visual Question Answering (CPVQA), which features two task types focusing on the interpretation and application of visual data." + }, + { + "type": "list", + "bbox": [ + 0.175, + 0.186, + 0.828, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.531, + 0.829, + 0.645 + ], + "angle": 0, + "content": "AI for Research Recent advancements in AI have significantly advanced scientific research [94, 1124, 817, 215], with platforms like SciWorld [798] improving the research process. Simultaneously, Pricope [608] and Chan et al. [67] introduce a machine-learning platform to evaluate the potential of RLLMs in automating experiments. Several studies also examine RLLMs' ability to generate innovative research ideas. For instance, Si et al. [672] conduct evaluations with over 100 NLP researchers to assess RLLMs' creativity, revealing notable limitations [404, 856, 726]. Additionally, Li et al. [434] introduce SolutionBench, a benchmark for assessing systems' ability to generate feasible solutions for complex engineering problems." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.663, + 0.462, + 0.681 + ], + "angle": 0, + "content": "4 Deep Reasoning for Long CoT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.697, + 0.827, + 0.809 + ], + "angle": 0, + "content": "Deep reasoning capabilities primarily require profound depth and comprehensiveness in cognitive and reasoning processes. In the absence of such capabilities, RLLMs suffer significant performance declines [758, 823]. Current methods for enhancing deep reasoning can be categorized into two main approaches: (1) Deep Reasoning Format (\\(\\S\\) 4.1), which involves utilizing various reasoning execution formats to maximize the reasoning step length \\(k\\) within reasoning boundary \\(\\mathcal{B}_l\\) in Equation (2), by selecting the most suitable reasoning format; and (2) Deep Reasoning Learning (\\(\\S\\) 4.2), which focuses on improving the model's internal capabilities to enhance its deep reasoning abilities, thereby extending the reasoning boundary \\(\\mathcal{B}_l\\) in Equation (2) intrinsically." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.829, + 0.383, + 0.844 + ], + "angle": 0, + "content": "4.1 Deep Reasoning Format" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.826, + 0.913 + ], + "angle": 0, + "content": "As illustrated in Figure 5, deep reasoning formats can be categorized into three main types: natural language (\\(\\S\\) 4.1.1), structured language (\\(\\S\\) 4.1.2), and latent-space reasoning (\\(\\S\\) 4.1.3), the latter of which is further subdivided into token-, vector-, and manager-driven latent reasoning. The reasoning performance across these formats is presented in Table 1." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.065 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.065, + 0.306, + 0.07 + ], + "angle": 0, + "content": "REASONING GROUP" + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.093, + 0.398, + 0.106 + ], + "angle": 0, + "content": "(a) Natural Language Deep Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.112, + 0.483, + 0.143 + ], + "angle": 0, + "content": "To predict the output of the given input for Conway's Game of Life, we need to apply the rules of the game to each cell on the board. The rules are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.144, + 0.483, + 0.164 + ], + "angle": 0, + "content": "1. Any live cell with fewer than two live neighbors dies (underpopulation)..." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.165, + 0.312, + 0.174 + ], + "angle": 0, + "content": "Given Input Board: ..." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.175, + 0.329, + 0.184 + ], + "angle": 0, + "content": "\\(\\spadesuit\\) Step-by-Step Analysis: ..." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.184, + 0.439, + 0.194 + ], + "angle": 0, + "content": "\\(\\spadesuit\\) Final Output: After applying the rules to each cell..." + }, + { + "type": "list", + "bbox": [ + 0.198, + 0.144, + 0.483, + 0.194 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.442, + 0.165, + 0.49, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.508, + 0.093, + 0.739, + 0.107 + ], + "angle": 0, + "content": "(b) Structured Language Deep Reasoning" + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.113, + 0.538, + 0.132 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.54, + 0.112, + 0.694, + 0.131 + ], + "angle": 0, + "content": "import necessary packages from collections import Cou" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.132, + 0.67, + 0.141 + ], + "angle": 0, + "content": "import necessary packages" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.142, + 0.691, + 0.149 + ], + "angle": 0, + "content": "from collections import Counter" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.15, + 0.787, + 0.16 + ], + "angle": 0, + "content": "all class and function definitions in the code" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.16, + 0.585, + 0.169 + ], + "angle": 0, + "content": "file, if any" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.169, + 0.646, + 0.177 + ], + "angle": 0, + "content": "class Solution(object):" + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.177, + 0.735, + 0.194 + ], + "angle": 0, + "content": "defgameOfLifeInfinite(self, live): ctr = Counter((I, J) for i, j i" + }, + { + "type": "image", + "bbox": [ + 0.736, + 0.169, + 0.808, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.191, + 0.211, + 0.38, + 0.225 + ], + "angle": 0, + "content": "(c) Latent Space Deep Reasoning" + }, + { + "type": "image", + "bbox": [ + 0.192, + 0.226, + 0.23, + 0.3 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.23, + 0.226, + 0.424, + 0.308 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.424, + 0.209, + 0.623, + 0.29 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.45, + 0.291, + 0.6, + 0.309 + ], + "angle": 0, + "content": "Reasoning Vector Driven Latent Space Deep Reasoning" + }, + { + "type": "image", + "bbox": [ + 0.623, + 0.209, + 0.813, + 0.291 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.65, + 0.291, + 0.785, + 0.309 + ], + "angle": 0, + "content": "Reasoning Manager Driven Latent Space Deep Reasoning" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.319, + 0.828, + 0.361 + ], + "angle": 0, + "content": "Figure 5: Three main categories of deep reasoning formats: natural language, structured language, and latent-space reasoning (subdivided into token-, vector-, and manager-driven latent reasoning), with examples drawn from Li et al. [401]." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.39, + 0.47, + 0.405 + ], + "angle": 0, + "content": "4.1.1 Natural Language Deep Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.414, + 0.828, + 0.54 + ], + "angle": 0, + "content": "Traditionally, researchers have sought to adapt natural language for intuitive and free-flowing deep reasoning [836, 1118, 303, 617, 1070, 765, 205]. Early work by Wei et al. [836] demonstrated that the use of natural language Long CoT significantly enhances the reasoning capabilities of RLLMs. Further, the Natural Program framework [460] allows RLLMs to engage in deeper natural language reasoning by ensuring a more structured and rigorous logical analysis. More recently, CodeI/O [401] has introduced a technique that reorganizes code-based reasoning patterns into natural language formats, further boosting the reasoning potential of RLLMs [36]. Similarly, Li et al. [387] propose CoRT, which integrates code into reasoning to facilitate a mixture of formats, resulting in improved cognitive performance." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.556, + 0.492, + 0.571 + ], + "angle": 0, + "content": "4.1.2 Structured Language Deep Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.58, + 0.828, + 0.734 + ], + "angle": 0, + "content": "Structured language deep reasoning encompasses various approaches designed to program [100, 464, 687, 591, 198, 845, 830, 1044] or symbolic language [605, 158, 451, 372, 933, 604, 37, 40, 797, 380] format for enhanced deep reasoning. In this context, most studies focus on utilizing code to better enhance the mathematical reasoning capabilities [389, 107, 978, 85]. Xu et al. [897] propose a neural-symbol self-training framework guided by the environment, addressing both the scarcity of symbolic data and the limitations of symbolic processing in LLMs. Additionally, Liao et al. [443] present SKIntern, which refines symbolic RLLMs through curriculum learning and linear attenuation, enabling the internalization of symbolic knowledge with fewer examples, reducing computational costs, and accelerating inference. Furthermore, Ranaldi et al. [634] introduce QuaSAR, a CoT variant that directs LLMs to operate at higher abstraction levels through quasi-symbolic reasoning, thus improving natural language reasoning and providing more precise structural representations." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.75, + 0.434, + 0.765 + ], + "angle": 0, + "content": "4.1.3 Latent Space Deep Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.773, + 0.828, + 0.913 + ], + "angle": 0, + "content": "Latent space deep reasoning encompasses techniques designed to enhance the reasoning abilities of LLMs by leveraging operations within continuous latent spaces [684, 151, 640, 324]. These approaches can be categorized into three main paradigms: (1) Reasoning Token-Driven Latent Space Deep Reasoning: Early work [810, 1013] introduce the concept of \"planning tokens\" or \"thought tokens\" to guide reasoning within latent spaces [949, 1008]. Further, Coconut [236] expands on this through the maintenance of multiple alternative reasoning paths, increasing both complexity and efficiency [1069, 706]. At the extreme, Heima [662] condenses the entire Long CoT process into a single token, yielding substantial computational savings. (2) Reasoning Vector Driven Latent Space Deep Reasoning: Building on the previous paradigm, LTM [356] conceptualizes the layers of LLMs as \"thought blocks\" and introduces the concept of \"thought vectors\" for each layer. This" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.043, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.066 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.066, + 0.306, + 0.07 + ], + "angle": 0, + "content": "REASONING GROUP" + }, + { + "type": "table", + "bbox": [ + 0.182, + 0.089, + 0.816, + 0.56 + ], + "angle": 0, + "content": "
ModelBase ModelGSM8kMATHGPQAOlympiadBenchLiveCodeBench
Latent Space Deep Reasoning
No-CoT [151]Mistral-7B [318]38.0----
SQ-VAE [810]Llama-2-7B [743]40.07.0---
RecurrentBlock-3.5B [204]-42.1----
ICoT-SI [151]Mistral-7B [318]51.0----
Natural Language Deep Reasoning
Self-Rewarding [114]Llama-2-7B [743]40.010.7---
Llama-3.1-8B [168]-56.720.3---
MetaMath [983]Llama-2-7B [743]66.5----
OVM [979]Llama-2-7B [743]73.7----
NuminaMath-7B-CoT [397]-75.455.2-19.9-
Qwen2-7B [925]-79.944.2-21.3-
Qwen2-Math-7B [927]-80.450.4-38.2-
Internlm2-math-plus-7B [974]-84.054.4-18.8-
OMI2 [401]Qwen2.5-Coder-7B [301]84.172.336.2-27.2
Llama-3.1-70B [168]-85.541.4---
CODEI/O++ [401]Qwen2.5-Coder-7B [301]85.772.140.6-29.1
CODEI/O [401]Qwen2.5-Coder-7B [301]86.471.943.3-28.5
WI [401]Qwen2.5-Coder-7B [301]87.071.439.1-26.0
WI (Full) [401]Qwen2.5-Coder-7B [301]87.071.142.9-27.6
OMI2 (Full) [401]Qwen2.5-Coder-7B [301]88.573.240.9-28.4
DeepSeekMath-7B-RL [658]-88.251.7-19.0-
Llama-3.1-405B [168]-89.053.8---
CoMAT [371]GPT-4 [3]93.7-40.4--
CoT [634]GPT-4 [3]94.5-41.850.2-
FCoT [523]GPT-4 [3]95.0----
Qwen2.5-Math-7B-Instruct [927]-95.283.6-41.6-
MathPrompter [303]GPT-4 [3]95.6----
Qwen2.5-Math-72B-Instruct [927]-95.985.9-49.0-
DeepSeek-R1-Distill-Qwen-7B [227]--92.8-49.137.6
DeepSeek-R1-Distill-Qwen-32B [227]--94.3-62.157.2
Structured Language Deep Reasoning
STaR [1012]Llama-2-7B [743]58.216.0---
ENVISIONS [897]Llama-2-7B [743]59.019.0---
MAmmoTH [1006]Code-Llama-7B [639]59.4----
MathCoder-CL [783]Code-Llama-7B [639]67.830.2---
ToRA-Code [217]Llama-2-7B [743]72.6----
Brain [107]Code-Llama-7B [639]74.0----
DeepSeek-Coder-7B [226]-77.444.4---
SIaM [978]Qwen-2-Math-Base81.550---
OC-SFT-1 [401]Qwen2.5-Coder-7B [301]86.770.937.7-27.5
PyEdu [401]Qwen2.5-Coder-7B [301]85.871.440.9-25.8
Qwen2.5-Math-7B-Instruct [927]-94.685.2-55.6-
Qwen2.5-Math-72B-Instruct [927]-95.888.1-60.6-
QuaSAR [634]GPT-4 [3]96.5-55.444.6-
MathDivide [687]GPT-4 [3]96.8---
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.566, + 0.828, + 0.595 + ], + "angle": 0, + "content": "Table 1: Performance of various deep reasoning formats, sorted primarily by GSM8K scores. “-” indicates that the paper did not report this score." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.621, + 0.828, + 0.816 + ], + "angle": 0, + "content": "approach allows for the scaling of inference-time computations by implicitly performing reasoning within the latent space through recurrent depth. (3) Reasoning Manager Driven Latent Space Deep Reasoning: Inspired by these, Schone et al. [647], Geiping et al. [204], and Saunshi et al. [646] propose a mechanism similar to a continuous reasoning manager, which iteratively governs a trained \"recurrent block\" as a recurrent \"thought block\" [511]. This method integrates deeper model layers during reasoning, enhancing performance without needing specialized training data, and even outperforming larger RLLMs. Additionally, ITT [109] leverages the original transformer layer as a recurrent \"thought block\", selecting key tokens via adaptive token routing and controlling reasoning depth with residual thinking connections, enabling more efficient processing of critical tokens. Further, System-1.5 Reasoning [808] defines two dynamic shortcuts. The Model Depth Shortcut (DS) lets non-critical tokens exit early via lightweight adapter branches while routing critical tokens through deeper Transformer layers, thus supporting adaptive, vertical reasoning. The Step Shortcut (SS) reuses hidden states across decoding steps to bypass trivial iterations and enable horizontal reasoning in latent space." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.831, + 0.395, + 0.846 + ], + "angle": 0, + "content": "4.2 Deep Reasoning Learning" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.828, + 0.914 + ], + "angle": 0, + "content": "Insufficient deep reasoning in RLLMs can significantly degrade performance [758, 823]. As a result, research has focused on improving reasoning through training. Supervised fine-tuning (SFT) [1058] stabilizes model outputs by serving as a memory process [883], while reinforcement learning (RL) enables generalization and self-learning [227, 137, 276, 898]. Recent studies for deep reasoning" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.226, + 0.071 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.308, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.069 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "image_caption", + "bbox": [ + 0.223, + 0.093, + 0.445, + 0.111 + ], + "angle": 0, + "content": "(a) Deep Reasoning Imitation" + }, + { + "type": "image", + "bbox": [ + 0.188, + 0.113, + 0.484, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.524, + 0.094, + 0.78, + 0.111 + ], + "angle": 0, + "content": "(b) Deep Reasoning Self-Learning" + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.113, + 0.811, + 0.188 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.202, + 0.825, + 0.245 + ], + "angle": 0, + "content": "Figure 6: The different learning strategies of deep reasoning learning, including deep reasoning imitation of the data from advanced deep reasoning systems, like advanced RLLMs, MCTS, etc.; deep reasoning self-learning from preference-based RL by implicit reward." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.273, + 0.825, + 0.356 + ], + "angle": 0, + "content": "learning have explored using SFT to imitate advanced reasoning in RLLMs and applying RL to enhance self-improvement in reasoning. As illustrated in Figure 6, this section outlines two key approaches to improve deep reasoning: (1) Deep Reasoning Imitation (§ 4.2.1), which involves learning reasoning from human-annotated or distilled data through SFT, and (2) Deep Reasoning Self-Learning (§ 4.2.2), where models improve reasoning through preference-based RL with implicit rewards. The performance of these methods is shown in Table 2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.372, + 0.407, + 0.387 + ], + "angle": 0, + "content": "4.2.1 Deep Reasoning Imitation" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.395, + 0.827, + 0.7 + ], + "angle": 0, + "content": "Deep reasoning in RLLMs can be effectively achieved by mimicking advanced reasoning systems, such as human reasoning [558, 61, 115, 403], advanced RLLMs [227, 58, 957, 370, 102], and scaling-augmented RLLMs [410, 1003, 596, 1136, 41]. This approach enables the model to learn complex reasoning patterns and generalize across tasks [937, 416]. Specifically, (1) Imitation from Human: Earlier, Cobbe et al. [141] first propose the deep reasoning imitation paradigm using human examples. ALT [558] improves RLLM reasoning by generating larger datasets of human-annotated logical templates, which fosters deeper reasoning [241]. To enhance diversity, EIT [61] promotes simpler human-generated plans, while LLMs contribute more nuanced reasoning, facilitating collaboration between human input and AI. (2) Imitation from Advanced RLLMs: A body of work utilizes zero-shot prompting to guide large teacher RLLMs in generating reasoning rationale, which is then used to fine-tune smaller RLLMs, marking the beginning of deep reasoning imitation [256, 352, 938, 521]. Additionally, AceMath [500] applies few-shot prompting to distill Long CoT samples from advanced LLMs, followed by multi-stage quality-guided SFT to enhance performance. Chen et al. [107] separate the data synthesis process into planning and reasoning stages, thereby improving reasoning quality. DART-Math [738] effectively distills complex queries requiring deeper reasoning during synthesis, advancing deep reasoning capabilities. Further, Ahmad et al. [7] propose OpenCodeReasoning, expanding this paradigm to the code scenarios. (3) Imitation from Scaling-augmented RLLMs: Earlier, Bansal et al. [34] enhance data quality by scaling the sampling size and length, boosting imitation performance [481, 1005]. Yang et al. [927] and Zhao et al. [1090] further improve data quality by scaling sampling and selecting samples through sample feature or an additional reward model. Additionally, Li et al. [410] identify optimal deep reasoning paths through MCTS, advancing imitation effectiveness." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.705, + 0.827, + 0.845 + ], + "angle": 0, + "content": "Recent studies [299, 550] show that distilling knowledge from advanced RLLM APIs like O1 [307] and R1 [227] significantly enhances the performance of smaller LLMs [424, 223]. This method, employing supervised fine-tuning, boosts model performance on complex mathematical reasoning tasks, sometimes surpassing the teacher models' performance. Building on these findings, LIMO [967], S1 [560], and RedStar [902] argue that a large number of imitation samples is unnecessary. They demonstrate that even a minimal set of samples can activate deep reasoning capabilities in foundational LLMs. For practical applications, Turtel et al. [747] showcase how these techniques can predict future events beyond a model's knowledge cutoff. Sun et al. [701], Yang et al. [928] and Zhao et al. [1093] further enhance deep reasoning imitation by selecting high-quality samples from large datasets, thereby improving the quality of the imitation data." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.86, + 0.439, + 0.875 + ], + "angle": 0, + "content": "4.2.2 Deep Reasoning Self-Learning" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "While simple imitation can yield strong performance, current models still rely heavily on human annotations or outputs from more advanced models for both imitation and distillation [502]. To" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.043, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.069 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "table", + "bbox": [ + 0.179, + 0.091, + 0.82, + 0.482 + ], + "angle": 0, + "content": "
ModelData SizeBase ModelGSM8KMATHMATH-500AIME2024GPQAOlympiadBench
Deep Reasoning Imitation
SFT [938]200KLlama-3.1-8B [168]---54.13.5-
Retro-Enh [115]14MLlama-3-8B [168]45.121.7----
Query-Exp [115]24MLlama-3-8B [168]51.323.1----
Res-Div [115]14MLlama-3-8B [168]53.023.2----
MetaMath [738]0.40MMistral-7B [318]76.529.8---5.9
ALT-FLDx2 [558]100KLlama-3.1-70B [168]83.324.4----
EIT [61]15KLlama-2-70B [743]84.132.5----
MathScale [738]2.0MMistral-7B [318]74.835.2----
Tutor-Amp [115]11MLlama-3-8B [168]64.435.9----
MMIQC [738]2.3MMistral-7B [318]75.437.4---9.4
VRT [738]0.59MMistral-7B [318]82.338.7---8.7
KPMath-Plus [738]1.6MMistral-7B [318]82.146.8----
Llama-2-70B-Xwin-Math-V1.1 [385]1.4MLlama-2-70B [743]90.252.5---16.3
DART-Math-Mistral-7B [738]591KMistral-7B [318]81.145.5---14.7
DART-Math-Llama-3-70B [738]591KLlama-3-70B [168]89.656.1---20.0
Rejection Sampling [410]197KQwen2.5-7B [926]87.170.0-10.0-27.1
Evol-Instruct-7B [514]905KQwen2.5-Math-7B [927]88.5-77.416.7--
FastMCTS [410]288KQwen2.5-7B [926]88.974.0-20.0-27.5
KPDDS-7B [295]800KQwen2.5-Math-7B [927]89.9-76.010.0--
DeepSeek-R1-Distill-Qwen-7B [227]800KQwen2.5-7B-Instruct [926]91.7-91.643.3--
Openmathinstruct-7B [740]14MQwen2.5-Math-7B [927]92.0-79.610.0--
NuminaMath [967]100KQwen2.5-Math-7B [927]92.9-81.820.0--
PromptCoT-DS-7B [1090]115KDeepSeek-R1-Distill-Qwen-7B [227]92.6-93.060.0--
PromptCoT-Qwen-7B [1090]905KQwen2.5-Math-7B [927]93.3-84.026.7--
AceMath-7B-Instruct [500]1.2MQwen2-Math-7B-Instruct [927]93.783.1---42.2
AceMath-72B-Instruct [500]1.2MQwen2.5-Math-72B-Instruct [927]96.486.1---48.4
NuminaMath [967]100KQwen2.5-32B-Instruct [926]--59.26.525.836.7
OpenThoughts [967]114KQwen2.5-32B-Instruct [926]--80.650.242.956.3
Sky-T1-32B-Preview [724]17KQwen2.5-32B-Instruct [926]--82.443.356.8-
Journey Learning [299]5KQwen2.5-Math-72B [927]--87.243.3--
STILL-2 [550]3.9KQwen2.5-32B-Instruct [926]--90.246.755.1-
Bespoke-32B [362]17KQwen2.5-32B-Instruct [926]--93.063.358.1-
s1 [560]1KQwen2.5-32B-Instruct [926]--93.056.759.6-
DeepSeek-R1-Distill-Qwen-32B [227]800KQwen2.5-32B-Instruct [926]--94.372.662.1-
LIMO [967]817Qwen2.5-32B-Instruct [926]--94.815.866.766.8
Deep Reasoning Self-Learning
DPO [302]40KDeepSeek-Math-7B-Base [658]74.834.9----
RefT [302]40KDeepSeek-Math-7B-Base [658]71.436.0----
Self-Explore [302]40KDeepSeek-Math-7B-Base [658]78.637.7----
SimPO [723]10KQwen2.5-Math-7B-Instruct [927]88.840.056.6---
DPO [446]11KDeepSeek-Math-7B-Instruct [658]-48.7----
TPO [446]11KDeepSeek-Math-7B-Instruct [658]-51.3----
DPO [446]11KQwen2-7B-Instruct [925]-54.3----
TPO [446]11KQwen2-7B-Instruct [925]-55.5----
MCTS [74]15KDeepSeek-Math-7B-Base [658]83.264.0----
SBS [74]15KDeepSeek-Math-7B-Base [658]84.166.3----
FastMCTS+Branch-DPO [410]152KFastMCTS-7B [410]89.975.4-20.0-29.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.489, + 0.825, + 0.518 + ], + "angle": 0, + "content": "Table 2: Performance of various deep reasoning learning methods, sorted primarily by Math or Math-500 scores. “-” indicates that the paper did not report this score." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.601, + 0.825, + 0.644 + ], + "angle": 0, + "content": "address this limitation, recent research has focused on enabling more advanced reasoning through techniques like self-play and self-learning [948, 1077, 409, 624]. Specifically, self-learning methods can be classified into two paradigms, differentiated by their sampling strategies:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.649, + 0.827, + 0.913 + ], + "angle": 0, + "content": "(1) Self-Learning from Direct Sampling: The earliest method, STaR [1012], utilizes In-Context Learning (ICL) to sample deep reasoning results [657] and uses the correctness of the final answer as an implicit reward for self-learning [258, 581, 582, 1059, 826, 462]. Further, ReST [225] extends this by introducing a Grow-Improve paradigm, where self-generated reasoning is first annotated with rewards and then enhanced via offline RL algorithms. However, these approaches can be fragile, especially when the reward process lacks robustness. Inspired by the Expectation-Maximization (EM) algorithm, Singh et al. [674] propose a method that generates rewards and iteratively optimizes LLMs to achieve the best performance on a validation set, significantly improving robustness. To further strengthen the reward process, a series of work introduces a method to adapt incorrect solutions, training a verifier [155, 262] or utilize entropy [809, 1040] to select or refine the reward process and improve self-learning quality. (2) Self-Learning from Tree Search: Early deep learning methods, such as EXIT [18], combined MCTS with deep neural networks for reinforcement learning, iteratively self-training the network to guide the tree search and enhance reasoning. Building on this, CPO [1065] and TPO [446] align each step of Long CoT reasoning with the corresponding tree search path, using Tree of Thoughts (ToT) [955] preference information to support deeper reasoning [951, 302]. Li [422] propose Policy-Guided Tree Search (PGTS), integrating RL with structured tree exploration for more efficient navigation of reasoning paths. Further developments, such as AlphaMath [74], AlphaLLM-CPL [814], and TongGeometry [1029], refine MCTS behavior through stepwise trajectory pair extraction and curriculum preference learning, boosting LLM reasoning abilities [611, 412, 872]." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.507, + 0.947 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.226, + 0.071 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.308, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.066 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.066, + 0.301, + 0.07 + ], + "angle": 0, + "content": "REASONING GROUP" + }, + { + "type": "title", + "bbox": [ + 0.205, + 0.092, + 0.461, + 0.108 + ], + "angle": 0, + "content": "Takeaways: Imitation & Self-Learning" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.115, + 0.798, + 0.144 + ], + "angle": 0, + "content": "- Imitating deep reasoning from advanced RLLMs, and scaling-augmented methods like MCTS can help models learn complex reasoning patterns with fewer samples." + }, + { + "type": "text", + "bbox": [ + 0.202, + 0.147, + 0.798, + 0.175 + ], + "angle": 0, + "content": "- Self-learning techniques, including reinforcement learning and tree search, allow RLLMs to enhance their reasoning abilities over time." + }, + { + "type": "text", + "bbox": [ + 0.202, + 0.178, + 0.8, + 0.207 + ], + "angle": 0, + "content": "- The combination of imitation from advanced RLLMs and self-learning techniques strengthens RLLM reasoning, leading to strong performance on complex tasks." + }, + { + "type": "list", + "bbox": [ + 0.201, + 0.115, + 0.8, + 0.207 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.235, + 0.482, + 0.253 + ], + "angle": 0, + "content": "5 Feasible Reflection for Long CoT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.266, + 0.825, + 0.336 + ], + "angle": 0, + "content": "Feasible Reflection is a pivotal component of Long CoT reasoning, enabling LLMs to handle complex tasks through iterative feedback and refinement [406, 192]. Specifically, it comprises two primary stages: (1) Feedback (\\(\\S\\) 5.1), which generates feedback signals \\(\\mathcal{F}_i\\) to correct node \\(n_j\\) in Equation (5); and (2) Refinement (\\(\\S\\) 5.2), which adjusts the subsequent node \\(n_{i+1}\\) according to the feedback in Equation (6)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.352, + 0.28, + 0.366 + ], + "angle": 0, + "content": "5.1 Feedback" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.378, + 0.825, + 0.462 + ], + "angle": 0, + "content": "Feedback refers to the process of providing evaluations of both overall outputs and the processes that lead to them, with the goal of assessing their accuracy and quality [394, 396, 838, 220, 862]. This process, also referred to as critique or verification, can be executed using either natural language or structured data formats, which serve as the foundation for tree-search methods [113]. Specifically, as shown in Figure 7, feedback can be categorized into three distinct types: (1) Overall Feedback (\\(\\S\\) 5.1.1); (2) Process Feedback (\\(\\S\\) 5.1.2); (3) Hybrid Feedback (\\(\\S\\) 5.1.3)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.475, + 0.348, + 0.489 + ], + "angle": 0, + "content": "5.1.1 Overall Feedback" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.499, + 0.825, + 0.569 + ], + "angle": 0, + "content": "The overall feedback focuses on providing a global view of the entire process and results, rather than assessing each step individually. This feedback significantly enhances reasoning skills and reward modeling in reinforcement learning for RLLMs. Specifically, as shown in Figure 7 (a), the overall feedback can be categorized into three main sources: Outcome Reward Model, Rule Extraction, and RLLMs Feedback. The performance across these categories is summarized in Table 3." + }, + { + "type": "image", + "bbox": [ + 0.254, + 0.59, + 0.416, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.424, + 0.594, + 0.579, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.582, + 0.594, + 0.739, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.256, + 0.707, + 0.432, + 0.83 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.447, + 0.708, + 0.734, + 0.83 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.838, + 0.825, + 0.907 + ], + "angle": 0, + "content": "Figure 7: The feedback capabilities framework for feasible reflection consists of Overall Feedback and Process Feedback. Overall Feedback includes the Outcome Reward Model (ORM) in a value format, rule extraction for correctness judgment, and overall RLLMs based on RLLMs. Process Feedback includes the Process Reward Model (PRM) in a value format and step-level RLLMs, also based on RLLMs." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.069 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "table", + "bbox": [ + 0.199, + 0.088, + 0.8, + 0.395 + ], + "angle": 0, + "content": "
ModelBase ModelChatChat_HardSafetyReasoningOverall
RLLMs
GPT-4o-mini [3]-95.060.780.883.780.1
Llama3.1-70B-Instruct [168]-97.270.286.082.884.0
Llama3.1-405B-Instruct [168]-97.274.687.177.684.1
GPT-4 [3]-95.374.386.987.686.0
GPT-4o [3]-96.176.186.688.186.7
Gemini-1.5-pro [719]-92.380.687.992.088.2
Self-taught Evaluator [803]Llama-3.1-70B-Instruct [168]96.684.281.091.588.3
SFR-LLMA-3.1-8B-Judge [791]Llama-3.1-70B-Instruct [168]95.577.786.295.188.7
SFR-NeMo-12B-Judge [791]Mistral-NeMo-Instruct-12B [725]97.282.286.595.190.3
SFR-LLMA-3.1-70B-Judge [791]Llama-3.1-70B-Instruct [168]96.984.891.697.692.7
Skywork-Critic-Llama-3.1-70B [791]Llama-3.1-70B-Instruct [168]96.687.993.195.593.3
LMUnit [641]Llama-3.1-70B-Instruct [168]----93.4
EvalPlanner [643]Llama-3.1-70B-Instruct [168]97.589.493.095.593.9
Outcome Reward Models
tulu-v2.5-13b-uf-rm [306]TULU-2-13B [305]39.442.355.547.446.1
Prometheus-2-7B [353]Mistral-7B-Instruct-v0.2 [318]85.549.177.176.572.0
Prometheus-8x7b-v2 [353]Mixtral-8x7B-Instruct [319]93.047.180.577.474.5
Critic-RM-Rank [991]Llama-3.1-70B-Instruct [168]97.058.084.092.082.8
RM [689]Llama-3.1-70B-Instruct [168]98.374.583.888.086.4
SynRM [968]Llama-3.1-70B-Instruct [168]97.576.886.388.587.3
CLoud [17]Llama-3-70B-Instruct [168]98.075.687.689.087.6
FLAME-RM-24B [753]PaLM-2-24B [16]92.275.789.693.887.8
SteerLM-RM 70B [829]Llama-2-70B-chat [743]91.380.390.692.888.8
Llama-3-OffsetBias-RM-8B [585]Llama-3-8B-Instruct [168]97.281.886.891.989.4
InternLM-20B-Reward [62]InternLM2-8B-Instruct [62]98.976.589.995.890.2
ArmoRM-Llama3-8B-v0.1 [771]Llama-3-8B-Instruct [168]96.976.892.297.390.8
Nemotron-4-340B-Reward [829]Nemotron-4-340B [4]95.887.192.293.692.2
Skywork-Reward-Llama-3.1-8B [466]Llama-3.1-70B-Instruct [168]95.887.390.696.292.5
Skywork-Reward-Gemma-2-27B [466]Gemma-2-27B-it [720]95.891.492.096.193.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.401, + 0.825, + 0.43 + ], + "angle": 0, + "content": "Table 3: Performance of various overall feedback methods, sorted primarily by Overall scores in RewardBench [367]. “-” indicates that the paper did not report this score." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.467, + 0.828, + 0.606 + ], + "angle": 0, + "content": "Overall Feedback from Outcome Reward Model Since many tasks cannot be directly evaluated using accuracy or other standard metrics, research has increasingly focused on Outcome Reward Models (ORM), which provide value-based rewards for more general and quantifiable feedback [1127, 986, 467]. In 2021, OpenAI [141] has proposed a \"Gen-Verifier\" paradigm, which uses a specialized ORM to evaluate the accuracy of generated rationales, showing significant progress in feedback capabilities [658]. Ji et al. [315] introduce a trained knowledge scorer to analyze hallucinations in the reasoning process, providing feedback to RLLMs and improving the accuracy of their outputs over time. Moreover, Generative Reward Models [1048] use next-token prediction for overall feedback, which seamlessly integrates with instruction adjustments, leveraging inference-time calculations to improve ORM feedback." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.612, + 0.829, + 0.697 + ], + "angle": 0, + "content": "However, specifically trained ORMs are often costly and not sufficiently robust. Building on this, Self-Rewarding Language Models (SRLMs) [1129] incorporate a self-consistency framework, optimizing feedback to improve model alignment and consistency [1047]. Yu et al. [991] introduce Critic-RM, combining RLLM-generated natural language criticism with corresponding feedback. This method filters high-quality feedback while jointly fine-tuning reward prediction and criticism generation, optimizing ORM performance." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.718, + 0.828, + 0.913 + ], + "angle": 0, + "content": "Overall Feedback from Rule Extraction Although ORM has achieved significant improvements, its accuracy still falls short of \\(100\\%\\), preventing it from outperforming rule-based answer correction feedback [955, 234, 1079]. Previous studies, such as STaR [1012], ReST [225], and ReFT [745], have demonstrated that feedback based on final answer rewards is more effective than both PRM and ORM in mathematical scenarios [197]. Furthermore, Guo et al. [227] and Xie et al. [886] introduce a multi-stage RL framework that incorporates rule-based rewards, significantly enhancing both output accuracy and length while mitigating reward hacking through simple yet robust rules [30], such as format validation and result verification. In coding scenarios where direct rule-based feedback is difficult, OpenCodeInterpreter [1108], AceCoder [1014], O1-Coder [1076], and VerMCTS [56] address this challenge by implementing an automated test-case synthesis pipeline, deriving rewards based on program performance [564, 216, 1115]. Additionally, Ma et al. [536] propose an automated approach to training a test case generator, which alleviates the scarcity of test cases and demonstrates that increasing the number of test cases correlates with improved reward quality. Moreover, Ma et al. [535] decompose problem-solving into structured coding subtasks: file localization, function" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "localization, line localization, and code editing generation, and applies multi-viewed rule-based rewards." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.827, + 0.248 + ], + "angle": 0, + "content": "Overall Feedback from RLLMs Research on feedback from RLLMs centers on detecting errors and biases through natural language feedback, also known as LLM-as-Judge, self-reflection or self-critique [274, 336, 29, 638, 549, 802, 1002, 895, 529]. This method has led to significant improvements across various tasks, particularly in self-correction [848, 1109, 206, 184, 1075]. Huang et al. [286] contend that traditional LLMs struggle to generate effective feedback without external signals, requiring the development of RLLMs with enhanced feedback capabilities [645, 398]. As a result, many studies leverage RLLMs' error-identification strengths, often stemming from their pretraining phase, to improve feedback generation and correction [965, 39, 40, 282]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.251, + 0.828, + 0.475 + ], + "angle": 0, + "content": "Earlier, McAleese et al. [544] found that training RLLMs to learn self-critique and deep reasoning can further boost performance. Zhang et al. [1062] propose a self-contrast mechanism that compares multiple perspectives, identifies differences, and summarizes insights to resolve inconsistencies. However, these methods often offer task-independent feedback. To address this, Hao et al. [235] introduce AutoRace, which tailors evaluation criteria for specific tasks. The Reversal of Thought (RoT) framework [999] introduces a novel paradigm combining reverse reasoning with self-reflection, helping models identify the limits of their knowledge and enhance reasoning efficiency. Furthermore, ACR [1116] implements a scoring system for coding tasks, using LLM-as-a-Judge for quality assessment and LLM-as-a-Critic for critiquing low-quality code, improving consistency across benchmarks. Zheng et al. [1107] integrate code execution error data and feedback from RLLMs to improve code generation performance. Liu et al. [484] present AGSER, a method using attention-guided self-reflection to address hallucinations by splitting input queries into attentive and nonattentive components. Finally, Saha et al. [643] introduce EvalPlanner, which separates feedback into planning and reasoning components for more streamlined expression using existing RLLMs. More comprehensively, Hu et al. [274] outline the complete pipeline, key insights, and practical lessons for training RLLMs to function as judges." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.486, + 0.35, + 0.501 + ], + "angle": 0, + "content": "5.1.2 Process Feedback" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.511, + 0.828, + 0.583 + ], + "angle": 0, + "content": "Techniques combine process feedback with MCTS or RL rewards to provide automated, step-by-step guidance, reducing the need for labor-intensive annotations while enhancing reasoning capabilities [749, 344]. These techniques can be categorized into two main types based on the source of feedback: process reward models (PRMs) and prompted LLMs. The performance comparison are mainly shown in Table 4." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.594, + 0.829, + 0.913 + ], + "angle": 0, + "content": "Process Feedback from Process Rewarded Model Recent studies highlight the significance of feedback in developing effective PRMs for complex reasoning tasks, particularly in a step-level view [134, 423, 528]. (1) Process Annotated PRM Training: Earlier, Lightman et al. [449] demonstrate that training process feedback with human-annotated data (PRM800K) surpasses outcome supervision in creating reliable reward models. However, this approach requires significant human effort. To address this, Wang et al. [792] introduce Math-Shepherd, a dataset that generates step-by-step supervision using a Tree Search-inspired method [73, 1001]. Following this, methods like QwQ [731], Skywork-o1 [570], AceMath [500], and PRIME [143] adopt similar techniques to enhance PRM performance. Additionally, Zhang et al. [1036] propose entropy regularization to improve model convergence. Rather than focusing solely on the first error step, Full-Step-DPO [903] assigns rewards for the entire reasoning chain, including error steps. VersaPRM [1015] extends PRMs across multiple domains, broadening their applicability. Similarly, Gu et al. [219] and Zhang et al. [1074] suggest training models with student preferences aligned to teacher preferences, ensuring effective preference distillation. Further, Wang et al. [807] propose VisualPRM400K and expand this paradigm to multimodal scenarios. (2) Outcome Annotated PRM Training: Alternative approaches, such as ReST-MCTS* [1032], OVM [979], Implicit PRM [1000], AutoPSV [506], and DVO [1038], leverage outcome supervision or implicit feedback to train PRMs, reducing the need for extensive human-annotated data [891, 643]. UAS [981] incorporates uncertainty-aware value models [275] into feedback predictions [495, 167, 945, 1089]. Additionally, Aurora [710] utilizes ensemble prompting strategies and reference answers for reverse verification, training stronger PRMs that better align with the Long CoT data distribution. Furthermore, PAV [651] suggests that rewards should reflect reasoning progress, as measured by changes in the likelihood of producing a correct future response before and after each step. Yang et al. [932], Lee et al. [376], Yoon et al. [975] extend these paradigms" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.511, + 0.949 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.065 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.235, + 0.068, + 0.3, + 0.073 + ], + "angle": 0, + "content": "REASONING SONG" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.087, + 0.824, + 0.352 + ], + "angle": 0, + "content": "
ProcessBenchPRMBench
GSM8KMATHOlympiadBenchOmniMATHSimplicitySoundnessSensitivity
Process Reward Models
Qwen2.5-Math-7B-PRM [1102]Qwen2.5-Math-7B [927]39.452.239.433.1---
Math-Shepherd-PRM-7B [792]Mistral-7B [318]47.929.524.823.847.145.760.7
RLHFlow-PRM-Mistral-8B [156]Mistral-7B [318]50.433.413.815.846.757.568.5
RLHFlow-PRM-DeepSeek-8B [156]DeepSeek-7B [52]38.833.816.916.947.657.568.1
Skywork-PRM-1.5B [466]Qwen2.5-Math-1.5B-Instruct [926]59.048.019.319.233.628.648.8
Skywork-PRM-7B [466]Qwen2.5-Math-7B-Instruct [926]70.853.622.921.038.432.754.3
Qwen2-1.5B-PRM800k [700]Qwen2-Math-1.5B-Instruct [927]34.055.334.241.0---
Qwen2-1.5B-Math-Shepherd [700]Qwen2-Math-1.5B-Instruct [927]48.934.19.813.7---
Qwen2-1.5B-Epic50k [700]Qwen2-Math-1.5B-Instruct [927]55.636.120.230.0---
Qwen2.5-Math-7B-PRM800KQwen2.5-Math-7B-Instruct [927]68.262.650.744.3---
Qwen2.5-Math-PRM-7B [1102]Qwen2.5-Math-7B-Instruct [927]82.477.667.566.3---
Universal-PRM-7B [710]Qwen2.5-Math-7B-Instruct [927]85.877.767.666.4---
Critic Model
Llama-3.1-8B-Instruct [168]-27.526.718.519.2---
GPT-4o [3]-61.953.948.344.659.770.975.8
QwQ-32B-Preview [731]Qwen2.5-32B-Instruct [926]62.352.746.243.9---
DeepSeek-R1-Distill-Qwen-14B [227]Qwen2.5-14B-Instruct [926]67.338.829.932.1---
Dyve-14B [1111]DeepSeek-R1-Distill-Qwen-14B [227]68.558.349.047.2---
Qwen2.5-72B-Instruct [926]-76.261.854.652.2---
SCRIT [713]Qwen2.5-72B-Instruct [926]80.260.032.527.8---
ol-mini [307]-93.288.987.282.464.672.175.5
LLemma-PRM800k-7B [679]LLemma-7B [26]----51.450.966.0
LLemma-MetaMath-7B [679]LLemma-7B [26]----50.349.066.0
LLemma-oprn-7B [679]LLemma-7B [26]----49.049.864.1
MATHMinos-Mistral-7B [195]Mistral-7B [318]----51.454.466.5
ReasonEval-7B [877]LLemma-7B [26]----55.563.971.0
ReasonEval-34B [877]LLemma-34B [26]----51.563.073.1
Gemini-2.0-flash-exp [679]-----62.767.375.4
Gemini-2.0-thinking-exp-1219 [679]-----66.271.875.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.36, + 0.827, + 0.389 + ], + "angle": 0, + "content": "Table 4: Performance of various process feedback methods on ProcessBench [1102] and PRM-Bench [679]. “-” indicates that the paper did not report this score." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.416, + 0.827, + 0.487 + ], + "angle": 0, + "content": "to the token level. Moreover, Chen et al. [110] expand these into interactive agent scenarios, allowing for automatically learning reward models from the environment without additional manual annotation. Wang et al. [832] equip a dual-layer MLP module to evaluate the reward at each step, successfully integrating the policy model and PRM into a unified interface without additional process annotations, reducing over \\(99\\%\\) of PRM parameters for efficient reasoning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.5, + 0.828, + 0.846 + ], + "angle": 0, + "content": "Process Feedback from RLLMs As PRM training remains heavily dependent on manually annotated data, recent research has explored methods for enabling models to generate their natural language feedback to optimize performance [910]. These approaches fall into two primary categories: (1) Model-Driven Feedback Reasoning: Earlier work such as React [956] and Reflexion [669] enhances RLLMs with natural language feedback at each action and reasoning step [196, 135, 89], improving decision-making in diverse tasks. Similarly, Step-DPO [365] uses RLLM to self-verify step-level positive and negative pairs for training through the DPO paradigm, achieving strong performance. Additionally, Sun et al. [702] propose a dynamic error classification framework that adapts based on model outputs, improving performance in mathematical reasoning tasks by addressing specific error patterns in math word problems. Furthermore, Xie et al. [889] and He et al. [245] iteratively apply MCTS to collect preference data, utilizing its forward-looking capabilities to decompose instance-level rewards into more precise step-level signals, thereby enhancing feedback accuracy. However, step-wise feedback often suffers from reliability issues, which can be mitigated by uncertainty quantification [973, 969], improving the reliability of step-wise verification in reward models for mathematical reasoning tasks. Moreover, Fu et al. [187] define the CoT Average Causal Effect (CACE) to capture causal relationships between steps, resulting in a causalized Long CoT where all steps are both correct and comprehensible. (2) Environment-Driven Feedback Reasoning: Given the increasing complexity of large models, there is growing interest in combining prompt-based LLMs with external environments to generate more interpretable and controllable feedback [885, 271]. For example, ORPS [996] and Drori et al. [162] minimize dependence on human annotations by using execution feedback, enabling models to autonomously refine their solutions. Additionally, Shrestha et al. [670] contribute by translating model outputs into Python code, helping to identify logical errors, gain insights into flawed reasoning processes, and guide improvements in mathematical reasoning. Xu et al. [897] integrate reasoning models with an interactive environment, enabling learning in more dynamic scenarios and creating a more generalizable self-learning framework." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.86, + 0.355, + 0.875 + ], + "angle": 0, + "content": "5.1.3 Hybrid Feedbacks" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Given the respective advantages and limitations of Overall Feedback and Process Feedback, recent studies have sought to combine both for optimal feedback. Specifically, Zhang et al. [1078] propose" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.507, + 0.948 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.065 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.235, + 0.063, + 0.302, + 0.071 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.827, + 0.162 + ], + "angle": 0, + "content": "a consensus filtering mechanism that integrates Monte Carlo estimation with an LLM-as-judge to enhance both overall and stepwise feedback, thus improving reasoning accuracy. In a similar vein, Lin et al. [454] introduce Step-KTO, a framework combining stepwise process-level and outcome-level binary feedback, using PRM and ORM to guide language models toward coherent reasoning, with a focus on error correction through reflection mechanisms." + }, + { + "type": "title", + "bbox": [ + 0.206, + 0.171, + 0.353, + 0.186 + ], + "angle": 0, + "content": "Takeaways: Feedback" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.194, + 0.798, + 0.223 + ], + "angle": 0, + "content": "- Evolving Feedback Models: Feedback mechanisms, including overall, process, and hybrid feedback, are crucial for improving the reasoning capabilities of RLLMs." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.226, + 0.798, + 0.255 + ], + "angle": 0, + "content": "- Innovative Approaches in Process Feedback: Process feedback using techniques like PRMs with MCTS enhances Long CoT, though challenges like reward hacking remain." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.257, + 0.798, + 0.3 + ], + "angle": 0, + "content": "- Self-Reflection and Model-Driven Feedback: Self-reflection and model-driven feedback improve RLLM performance by enabling error detection, task-specific insights, and more autonomous learning." + }, + { + "type": "list", + "bbox": [ + 0.2, + 0.194, + 0.798, + 0.3 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.325, + 0.294, + 0.338 + ], + "angle": 0, + "content": "5.2 Refinement" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.347, + 0.825, + 0.403 + ], + "angle": 0, + "content": "Refinement refers to the process of addressing errors in reasoning based on prior feedback. As shown in Figure 8, refinement methods can be grouped into three primary categories: prompt-based refinement generation (§ 5.2.1), SFT-based refinement imitation (§ 5.2.2), and RL-based refinement learning (§ 5.2.3)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.417, + 0.492, + 0.433 + ], + "angle": 0, + "content": "5.2.1 Prompt-based Refinement Generation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.441, + 0.828, + 0.636 + ], + "angle": 0, + "content": "Research on prompt-based refine generation focuses on enhancing the performance of LLMs through iterative self-refinement mechanisms [578, 1091, 98, 469, 1028, 754, 818, 546]. A prominent approach involves prompting RLLMs to generate initial outputs, followed by self-feedback that iteratively refines and improves performance across tasks such as dialogue generation and mathematical reasoning [645, 539, 1101, 669, 549, 345, 750, 482], which even much reduce the hallucinations [289, 315]. Noteworthy methods, like Self-Backtracking [944], Refiner [590], and BackMath [1055], allow LLMs to adjust their reasoning autonomously, reducing unnecessary complexity in decision-making [868]. Further, Havrilla et al. [238] extend the paradigm by integrating overall-level and step-level refinements, improving refinement performance. Yang et al. [950] propose a method to decompose the self-correction capability of LLMs into \"confidence\" and \"critique\" capacities, designing probabilistic metrics to evaluate them and exploring the role of reflection mechanisms in model behavior. Additionally, MCTSr [1033], LLM2 [930], ReST-MCTS* [1032] and ReARTeR [703] emphasize dynamic reflection through iterative error correction and confidence adjustments, allowing models to autonomously refine reasoning strategies [186]. He et al. [240]" + }, + { + "type": "image_caption", + "bbox": [ + 0.184, + 0.661, + 0.478, + 0.678 + ], + "angle": 0, + "content": "(a) Prompt-based Refinement Generation" + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.68, + 0.47, + 0.777 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.526, + 0.666, + 0.785, + 0.681 + ], + "angle": 0, + "content": "(b) SFT-based Refinement Imitation" + }, + { + "type": "image", + "bbox": [ + 0.497, + 0.683, + 0.808, + 0.777 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.391, + 0.789, + 0.631, + 0.804 + ], + "angle": 0, + "content": "(c) RL-based Refinement Learning" + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.806, + 0.298, + 0.863 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.302, + 0.809, + 0.352, + 0.85 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.354, + 0.818, + 0.448, + 0.844 + ], + "angle": 0, + "content": "Reinforcement Learning" + }, + { + "type": "image", + "bbox": [ + 0.446, + 0.806, + 0.501, + 0.858 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.808, + 0.568, + 0.85 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.579, + 0.805, + 0.684, + 0.861 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.691, + 0.805, + 0.788, + 0.86 + ], + "angle": 0, + "content": "Aha! I think \\(1 + 1 = 3\\) should be corrected \\(1 + 1 = 2!\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.88, + 0.825, + 0.908 + ], + "angle": 0, + "content": "Figure 8: The three main categories of refinement methods, including Prompt-based Refinement Generation, SFT-based Refinement Imitation, and RL-based Refinement Learning." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.065 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.065, + 0.307, + 0.07 + ], + "angle": 0, + "content": "REASONING GROUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.827, + 0.189 + ], + "angle": 0, + "content": "extend this paradigm to multi-agent scenarios, improving both reasoning and agent system performance [936, 1128]. Moreover, Yuksekgonul et al. [1009] and Peng et al. [593] further expand the paradigm by enabling automatic prompt optimization driven by LLMs. This approach facilitates more generalized and automated refinement of input prompts across a range of tasks, as opposed to focusing solely on refining output results. However, without oracle feedback, RLLM's self-refinement process fails, causing instability in both intermediate and final answers, leading to biases in simple factual queries and introducing cognitive biases in complex tasks [1051, 908]." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.208, + 0.453, + 0.221 + ], + "angle": 0, + "content": "5.2.2 SFT-based Refinement Imitation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.233, + 0.828, + 0.469 + ], + "angle": 0, + "content": "Recent advancements in reflection-based reasoning for LLMs have led to frameworks that enhance model reasoning through self-refinement and error correction. A key approach is directly supervised fine-tuning, which allows models to learn error correction processes from advanced LLMs, thereby improving their reflective capabilities [14, 104, 406, 822, 99, 873]. Notable frameworks, such as rStar [615], improve smaller language models through self-play mutual reasoning, while Recursive Introduction [627] and RealCritic [714] use iterative feedback mechanisms to identify and correct errors to better self-improve [393]. Yan et al. [924] propose constructing step-wise self-correction data and implementing a training strategy that uses the above-constructed data to equip LLMs with spontaneous step-level self-correction capacities. Building upon these, Gao et al. [196] and Zhang et al. [1027] propose Math-Minos, which employs step-by-step natural language feedback as rationale tags, offering both correctness and detailed explanations for each step to train feedback mechanisms that justify and refine the reasoning process. Journey Learning [623] employs MCTS to parse node backtracking as natural language refinement, enhancing supervised fine-tuning and, thereby, improving reasoning performance. Additionally, approaches like ProgCo [682] emphasize iterative feedback and program-driven refinement to enhance critique and self-correction. Expanding these ideas to multimodal settings, frameworks, such as R3V [120] and MM-Verify [697], focus on integrating visual and textual reasoning [519, 813]." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.487, + 0.446, + 0.502 + ], + "angle": 0, + "content": "5.2.3 RL-based Refinement Learning" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.513, + 0.827, + 0.734 + ], + "angle": 0, + "content": "In recent research, several approaches have been proposed to enhance the performance of refinement through reinforcement learning [673, 1056]. Earlier, Kumar et al. [358] observed that SFT of RLLMs often fails to promote self-refinement behaviors. This limitation stems from a distributional mismatch between data collection strategies and model responses, as well as the risk of behavioral collapse. To address this, SCoRe [358] enhances self-refinement by training the model on its own self-generated correction trajectories and employing regularization to guide the learning process. This method prioritizes fostering self-refinement during testing, rather than merely maximizing reward for specific prompts [1018]. Further, Guo et al. [227] demonstrate that applying outcome-level rewarded RL can trigger an \"Aha moment,\" activating the model's natural feedback and refinement behaviors without the need for human guidance. Moreover, Guo et al. [227], Zeng et al. [1017] and Ma et al. [529] explore initializing LLMs with iterative self-verification and self-correction behaviors, which are strengthened through supervised fine-tuning and further enhanced by outcome-level RL. Ma et al. [529] and Yang et al. [935] extend these capabilities with process-level RL, minimizing resource usage while enabling adaptive reasoning refinements during inference. More recently, Lee et al. [374] introduce an intrinsic verifier module to decide when refinements should be applied, using RL to further encourage self-refinement when errors are detected." + }, + { + "type": "title", + "bbox": [ + 0.205, + 0.748, + 0.365, + 0.763 + ], + "angle": 0, + "content": "Takeaways: Refinement" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.77, + 0.798, + 0.813 + ], + "angle": 0, + "content": "- Prompt-Based Refinement for Iterative Improvement: Iterative self-refinement through feedback loops helps LLMs improve reasoning and reduce errors like hallucinations but requires stable feedback to maintain accuracy." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.816, + 0.798, + 0.858 + ], + "angle": 0, + "content": "- Supervised Fine-Tuning (SFT) for Error Correction: Supervised fine-tuning enhances LLMs by using iterative feedback and self-correction strategies to improve reasoning accuracy, especially for smaller models." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.861, + 0.8, + 0.904 + ], + "angle": 0, + "content": "- Reinforcement Learning (RL) for Refinement: Reinforcement learning enhances self-refinement in LLMs by using self-generated corrections and adaptive strategies, reducing human intervention and resource consumption." + }, + { + "type": "list", + "bbox": [ + 0.201, + 0.77, + 0.8, + 0.904 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.226, + 0.071 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.308, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.065 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.065, + 0.306, + 0.07 + ], + "angle": 0, + "content": "REASONING GROUP" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.091, + 0.48, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.484, + 0.091, + 0.814, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.213, + 0.828, + 0.268 + ], + "angle": 0, + "content": "Figure 9: Schematic representations of two common inference-time scaling strategies: (a) sequential scaling, which extends the length of Long CoT but is constrained by the reasoning boundaries of RLLMs; and (b) parallel scaling, which increases the sample size and aggregates multiple outcomes, yet does not surpass the performance of Pass@k." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.3, + 0.513, + 0.318 + ], + "angle": 0, + "content": "6 Extensive Exploration for Long CoT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.335, + 0.825, + 0.392 + ], + "angle": 0, + "content": "Exploration is a key capability in Long CoT reasoning, allowing models to navigate complex problem spaces through strategic branching and iterative refinement [1019, 381, 784, 751]. Recent studies emphasize exploration mechanisms, such as hypothesis branching and error backtracking via reflection, as essential for overcoming the constraints of linear reasoning paths [227]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.397, + 0.828, + 0.496 + ], + "angle": 0, + "content": "Current research focuses on three key areas: (1) Exploration Scaling (§ 6.1), which explores the breadth and depth of exploration and its impact on downstream applications, particularly in improving the size of the exploration path \\( m \\) in Equation (3); (2) Internal Exploration (§ 6.2), which focuses on training models to develop internal exploration capabilities, enabling more efficient and effective generation of \\( m \\) exploration paths \\( \\{n_{i+j}\\}_{j=1}^{m} \\) in Equation (3); and (3) External Exploration (§ 6.3), which examines how models can leverage external systems to enhance their exploratory abilities, facilitating the selection of the most effective path \\( n_{i+j} \\) from the \\( m \\) exploration paths in Equation (3)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.517, + 0.353, + 0.533 + ], + "angle": 0, + "content": "6.1 Exploration Scaling" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.545, + 0.828, + 0.63 + ], + "angle": 0, + "content": "Recent advances in inference-time scaling algorithms [333, 843, 57, 1053, 112] have attracted significant interest, particularly in scaling reasoning length to improve performance [524, 568, 405, 779]. Following Chen et al. [93], as shown in Figure 9, exploration scaling can be understood through two paradigms: (1) sequential scaling, akin to a series of resistors, which connects multiple reasoning processes using reflection; and parallel scaling, similar to parallel resistors, where a unified verification/feedback mechanism selects the most effective reasoning processes." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.651, + 0.356, + 0.667 + ], + "angle": 0, + "content": "6.1.1 Sequential Scaling" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.677, + 0.828, + 0.913 + ], + "angle": 0, + "content": "Sequential scaling refers to extending the reasoning output within a single model generation, significantly boosting model performance [383, 1052, 348]. Early works by Fu et al. [189] and Jaech et al. [307] show that increasing the length of the reasoning path can greatly improve performance. Tian et al. [736] enhances model reasoning iteratively by using prior answers as prompts for each successive round, thus enabling sequential scaling of the reasoning process. Building on this, later studies [314, 391] further explore enhancing logical depth through tree-based searches within a fixed compute budget, resulting in notable performance gains [11, 614]. Building upon this, Muennighoff et al. [560] introduce a inference-time scaling method that improves reasoning by fine-tuning and budget forcing, yielding substantial gains with additional computing at inference time. To address the constraints of attention spans, some studies focus on expanding reasoning length in latent spaces. Geiping et al. [204] and Chen et al. [109] enhance inference-time reasoning performance by implicitly scaling computation in latent space through recurrent depth. Setlur et al. [653] identified three core aspects of sequential scaling: (1) linking skills to asymmetric capabilities in base LLMs, such as connecting easy verification with difficult exploration; (2) enhancing exploration in reinforcement learning by utilizing the \"negative\" gradient of error trajectories, which extends search paths and links additional asymmetries; and (3) creating dynamic exploration by aligning task difficulty with training token budgets through tailored curricula." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.065 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.065, + 0.307, + 0.07 + ], + "angle": 0, + "content": "REASONING GROUP" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.092, + 0.336, + 0.107 + ], + "angle": 0, + "content": "6.1.2 Parallel Scaling" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.116, + 0.827, + 0.186 + ], + "angle": 0, + "content": "Parallel scaling refers to the process of increasing the number of reasoning iterations during model generation and then verify these results to get the final output, which significantly enhances model performance [2, 864, 57, 485, 59, 1139]. Initially, Wang et al. [816] introduce the concept of self-consistency, demonstrating that multiple sampling processes followed by majority voting for effective exploration." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.198, + 0.828, + 0.518 + ], + "angle": 0, + "content": "Verification Optimization The primary focus of recent research is optimizing verification, which can be categorized into two types: (1) Overall Verification: Recent works [1120, 831] divide the scaling process into two stages: \"reasoning\" and \"self-verification.\" By replacing majority voting in self-consistency with self-verification, these approaches show significant improvements [1083, 81, 1149, 364, 426]. In code scenarios, WoT [1071], CISC [716] and S* [392] scale the Long CoT in parallel, using output confidence or code execution results for verification, effectively assessing reasoning quality [635, 203, 278, 1134]. Further, Nye et al. [569] and Weir et al. [842], Stoisser et al. [690] train RLLMs to simulate code execution, removing the need for test cases in code-related parallel scaling. Chain-of-Verification [93] introduces meta-verification, sampling multiple verification instances to identify the correct one. Kim et al. [351], Chen et al. [111], and Vacareanu et al. [750] validate this approach empirically by evaluating answer correctness based on reasoning path properties. Moreover, Li et al. [421] tune a specific RLLM to verify and aggregate answers, showing improved performance. This suggests that PRM cannot replace a specially trained RLLM for verification due to training goal biases [1078]. Finally, Kang et al. [341] leverage self-uncertainty to select the best results. (2) Step Verification: Building on this, numerous researchers have explored step-level or finer-grained verification [84, 460]. Notably, DIVERSE [425], SSC-CoT [1098], and Fine-grained Self-Consistency [93] combine diverse reasoning paths with step-level verification. In addition, a series of works [676, 864, 517, 770, 853, 486] try to investigate how optimal scaling strategies based on MCTS can enhance smaller language models' performance. Their findings show that a 1B RLLM can outperform a 405B model on complex tasks through parallel scaling [988]. Despite these advancements in verification, Chen et al. [93] demonstrate that these strategies cannot surpass Best-of-N methods, suggesting that breakthroughs cannot solely rely on optimization-based verification [106]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.529, + 0.828, + 0.682 + ], + "angle": 0, + "content": "Sampling Optimization Another key area of research focuses on generating diverse but less paths or strategies for efficient scaling [871, 765, 80, 668, 444, 681]. For instance, Zeng et al. [1020] aggregate the shortest yet most varied reasoning paths for better scalability. Similarly, Du et al. [164] adjust the sampling temperature to increase diversity, leading to improved scaling. Zhang et al. [1045] and Liu et al. [470] optimize both candidate solution generation (e.g., prompts, temperature, and top-p) and reward mechanisms (such as self-evaluation and reward types), offering diverse strategies for parallel scaling. Moreover, Qin et al. [617], Luo et al. [520], and Yu et al. [990] enhance RLLM reasoning by scaling sampling across multiple natural and programming languages or varied expressions. Finally, Yang et al. [943] introduces a method where a small set of seed data, with varied response lengths, guides the model to engage in deeper reasoning by selecting the shortest correct responses across various inference efforts." + }, + { + "type": "title", + "bbox": [ + 0.205, + 0.692, + 0.421, + 0.708 + ], + "angle": 0, + "content": "Takeaways: Exploration Scaling" + }, + { + "type": "text", + "bbox": [ + 0.202, + 0.715, + 0.798, + 0.757 + ], + "angle": 0, + "content": "- Exploration Mechanisms in Long CoT Reasoning: Exploration strategies like hypothesis branching and error backtracking are vital for overcoming limitations in linear reasoning paths and enhancing model performance." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.76, + 0.798, + 0.789 + ], + "angle": 0, + "content": "- Scaling Exploration: Exploration can be scaled through sequential and parallel strategies to improve reasoning depth and efficiency." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.792, + 0.798, + 0.834 + ], + "angle": 0, + "content": "- Verification and Sampling Optimization: Refining verification techniques and optimizing sampling for diverse reasoning paths are key to improving exploration efficiency and performance in Long CoT tasks." + }, + { + "type": "list", + "bbox": [ + 0.201, + 0.715, + 0.798, + 0.834 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.861, + 0.359, + 0.877 + ], + "angle": 0, + "content": "6.2 Internal Exploration" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.828, + 0.913 + ], + "angle": 0, + "content": "As noted in Chu et al. [137], Shen et al. [661], and Yang et al. [938], SFT serves as a memory process, while RL enhances generalization [359, 82]. Specifically, SFT stabilizes the model's output format," + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.226, + 0.071 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.308, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.066 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.066, + 0.306, + 0.07 + ], + "angle": 0, + "content": "REASONING GROUP" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.09, + 0.816, + 0.204 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.214, + 0.806, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.323, + 0.828, + 0.366 + ], + "angle": 0, + "content": "Figure 10: Two primary approaches for optimizing Internal Exploration: improving RL strategy through reference and value models, and designing reward strategies: either rule-based or model-based rewarding to enhance RL performance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.395, + 0.825, + 0.452 + ], + "angle": 0, + "content": "whereas RL improves its generalization capacity, which can increase learning efficiency by up to eight times in tasks such as mathematical reasoning [650]. Consequently, as shown in Figure 10, leading research emphasizes the role of RL and reward strategies in enhancing the exploration capabilities of LLMs without external assistance. The performance comparison is presented in Table 5." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.463, + 0.321, + 0.478 + ], + "angle": 0, + "content": "6.2.1 RL Strategies" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.484, + 0.827, + 0.526 + ], + "angle": 0, + "content": "Recent advancements in RL strategies for exploration have led to notable improvements in various tasks, particularly in reasoning tasks [699, 369, 313, 542, 882, 1017, 985, 268, 1010, 628, 150, 176, 686]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.533, + 0.827, + 0.755 + ], + "angle": 0, + "content": "(1) Reward-free RL: The first series of work focuses on RL optimization algorithms. Additionally, OREO [773] propose an offline RL method that optimizes the soft Bellman equation, improving credit assignment for multi-step reasoning tasks and outperforming existing approaches in fields like mathematics and agent control. Liu et al. [476] propose Direct Advantage Policy Optimization, a novel offline RL method that leverages a separately trained critic to evaluate the accuracy of each reasoning step. This technique provides dense feedback for policy optimization, addressing both sparse rewards and training instability. Further, some research focuses on adjusting the focus of RL algorithms to optimize exploration in targeted aspects. Specifically, CPL [801], cDPO [457], and Focused-DPO [1043] enhance exploration in Long CoT by prioritizing critical or error-prone areas through preference optimization, improving accuracy in those regions. Bartoldson et al. [42] further adjusts the replay strategy of the training data, aiming to optimize reasoning performance. Li et al. [420] introduce Learning Impact Measurement (LIM), an automated method for evaluating and prioritizing training samples based on their alignment with model learning trajectories. This approach enables efficient resource use and scalable implementation. For instance, ThinkPO [942] uses short CoT reasoning outputs as rejected answers and longer ones as chosen answers for the same question, applying DPO to encourage prioritization of longer reasoning outputs [1131]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.76, + 0.828, + 0.913 + ], + "angle": 0, + "content": "(2) Reward-based RL: Reward-model-based RL refers to approaches that use a reward model or a verifier to guide learning and decision-making in the absence of explicit rewards [1046, 174, 649, 279, 825, 847, 970]. Earlier, Proximal Policy Optimization (PPO) was first introduced by Schulman et al. [648], which alternates between interacting with the environment to collect data and optimizing a surrogate objective function via stochastic gradient ascent, surpassing DPO [306]. Subsequently, ReMax [436] eliminates the need for additional value models in PPOs. By incorporating variance reduction and REINFORCE [704] techniques, it reduces over four hyperparameters, resulting in lower GPU memory usage and faster training. Building on this, DeepSeekMath [658] proposes Group Relative Policy Optimization (GRPO), replacing traditional value models with improved sampling strategies, thus significantly accelerating learning and achieving performance on par with GPT-4 in mathematics. Hu [265] and Liu et al. [499] further refine GRPO with REINFORCE++ and Dr. GRPO," + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.043, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.065 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.065, + 0.307, + 0.07 + ], + "angle": 0, + "content": "REASONING GROUP" + }, + { + "type": "table", + "bbox": [ + 0.182, + 0.089, + 0.818, + 0.472 + ], + "angle": 0, + "content": "
MethodBackboneGSM8KAIME 2024MATH 500GPQALiveCodeBench
Base Model
GPT-4o [3]-92.99.376.653.633.4
Llama-3.1-70B-Instruct [168]-94.113.368.0--
Claude 3.5 Sonnet [19]--16.078.365.038.9
Qwen2.5-Coder-32B-Instruct [301]--20.071.233.825.0
Qwen2.5-70B-Instruct [926]--20.079.449.033.0
Llama-3.3-70B-Instruct [168]--36.773.950.534.8
DeepSeek-V3 [463]--39.290.2-36.2
SFT Strategies
DeepSeek-R1-Distill-Llama-70B [227]--70.0--57.9
DeepSeek-R1-Distill-Qwen-32B [227]--72.6--54.6
START [388]QwQ-32B-preview [731]-66.794.463.647.3
RL Strategies
DPO [631]DeepSeekMath 7B [658]82.4----
KTO [171]DeepSeekMath 7B [658]82.5----
OREO [773]DeepSeekMath 7B [658]86.9----
PPO [648]GLM4-9B-SFT [211]85.5--31.524.3
GRPO [658]GLM4-9B-SFT [211]86.1--31.722.8
Eurus-2-7B-PRIME [143]Qwen2.5-Math-7B-Base [927]-26.779.2--
Search-o1 [418]QwQ-32B-preview [731]-56.786.463.633.0
Reward Strategies
OpenMath2 [739]Llama-3.1-70B [168]94.113.371.8--
Satori [661]Qwen-2.5-Math-7B93.923.383.6--
T1-SFT [264]Qwen2.5-32B [926]-24.983.449.5-
T1 [264]Qwen2.5-32B [926]-50.692.456.1-
DeepSeek-R1-lite [227]--52.591.658.551.6
rStar-Math [222]Qwen2.5-Math-7B [927]95.253.390.0--
QwQ-32B-preview [731]-95.553.390.658.240.6
ol-preview [307]--56.785.573.353.6
o3-mini-low [307]--60.0--61.8
ol-mini [307]--63.690.0-53.8
Kimi k1.5 [722]--77.596.2-62.5
QwQ-32B [731]--79.5--73.1
o3-mini-medium [307]--79.6--72.3
DeepSeek-R1 [227]--79.897.3-71.6
o1 [307]--83.396.4-67.4
o3-mini-high [307]--87.3--84.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.477, + 0.825, + 0.507 + ], + "angle": 0, + "content": "Table 5: Performance of various internal exploration methods on different benchmarks, primarily ordered by AIME 2024. “-” indicates that the paper did not report this score." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.587, + 0.827, + 0.713 + ], + "angle": 0, + "content": "respectively, simplifying the algorithm and enhancing its training. Additionally, Vassoyan et al. [752] and [1121] improve exploration efficiency in smaller models by modifying the KL penalty, thus enhancing performance under distribution shifts. Huang et al. [277] introduce Decoupled Value Policy Optimization (DVPO), a streamlined framework that replaces reward modeling with a pretrained global value model (GVM) and eliminates the interdependence between actor and critic. To address the high-quality demands of reward models, Cui et al. [143] propose PRIME (Process Reinforcement through IMplicit rEwards), which integrates the SFT model as a PRM within a unified reinforcement learning framework, enabling online updates through policy rollouts and outcome labels via implicit process rewards." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.718, + 0.828, + 0.913 + ], + "angle": 0, + "content": "More recently, Liang et al. [439] introduce Self-aware Weakness-driven Problem Synthesis, a reinforcement-learning method that generates challenges tailored to an RLLM's specific weaknesses [863, 183]. By concentrating training on its most difficult aspects, the model achieves more focused and effective reasoning improvements [680]. Wang et al. [805] introduce ROLL, a method designed to support R1-level large-scale training of RLLMs, enabling the efficient exploration and optimization of reasoning paths within the Mixture-of-Experts (MOE) structure [788]. Fu et al. [188] introduce AReaL, a large-scale asynchronous reinforcement learning system for language reasoning, which enhances the efficiency and effectiveness of training RLLMs. Ma et al. [526] propose a novel method combining interleaved SFT and RL to address challenging questions where RL typically fails. This approach enables RLLMs to learn from mistakes and enhance reasoning abilities. Huang et al. [297] and Fu et al. [190] further improve exploration efficiency by integrating SFT and RL with prefix sampling. Frurthermore, Yan et al. [917] and Liang et al. [437] guide RLLMs in reasoning under off-policy reinforcement learning [413, 773], improving both training sample efficiency and learning stability [559]." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.065 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.065, + 0.307, + 0.07 + ], + "angle": 0, + "content": "REASONING GROUP" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.092, + 0.355, + 0.107 + ], + "angle": 0, + "content": "6.2.2 Reward Strategies" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.117, + 0.828, + 0.436 + ], + "angle": 0, + "content": "Rule-rewarded RL The studies explore advancements in training advanced RLLMs using rule-rewarded RL to enhance exploration strategies and reasoning accuracy [296]. These efforts primarily focus on three types of rewards: (1) Correctness Rewarding: Correctness rewards are fundamental for guiding RLLMs toward accurate answers. Specifically, Singh et al. [674] introduce a binary reward system (positive or negative) to facilitate exploration, achieving simple yet effective performance improvements. Similarly, the DeepSeek-R1 [227] employs rule-extracted accuracy as an RL reward, scaling this approach to larger scenarios and training sizes, thereby enhancing both exploration and reasoning tasks [522, 170]. Furthermore, O1-CoderZhang et al. [1076], StepCoder [161], and SWE-RL [841] address challenges in code generation by developing a test case generator, which standardizes code testing, ensuring accurate generation [893, 994]. (2) Format Rewarding: Further, format rewards are used to encourage better reasoning paradigms. Guo et al. [227] introduce this concept to effectively guide reasoning and exploration [886]. Xie et al. [886] expanded on this with a three-stage, rule-based RL approach, enabling the Qwen-7B model to learn complex multi-path exploration, which significantly improved both output format and corresponding length consistency. Additionally, Wu et al. [855] propose TAPO (Thought-Augmented Policy Optimization), a framework that integrates external high-level guidance (\"thought patterns\") into RL, successfully balancing model exploration with external guidance. (3) Scaling rewarding: Moreover, scaling rewards are applied to promote longer reasoning chains and broader exploration. Recent studies [90, 583, 349] highlight the need for progressively scaled reasoning lengths to overcome the limitations of current reasoning approaches. As a result, research has focused on scaling exploration [886, 962]. However, excessive scaling can lead to inefficiency and overcomplicated reasoning [142]. Kimi-K1.5 [722], Yang et al. [943] and Arora and Zanette [22] proposed Long2Short techniques, favoring shorter, more accurate reasoning may also significantly improve efficiency and performance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.455, + 0.828, + 0.609 + ], + "angle": 0, + "content": "Model-rewarded RL It refers to a class of techniques in which RL algorithms are enhanced by leveraging additional reward models, to guide exploration and improve decision-making processes [693]. Earlier in 2021, OpenAI [141] propose a \"Gen-Verifier\" paradigm to train a correctness-oriented ORM and used ORM-rewarded RL to surpass SFT performance. Recently, with rapid advancements in PRM, several studies [755, 1032, 518] have scaled reinforcement learning by enhancing exploration through step-level correctness rewarding [659, 1042]. Building on this, Hou et al. [264] introduce entropy rewards and dynamic regularization to further optimize the reasoning process [116]. STeCa [768] identifies suboptimal actions during exploration by comparing step-level rewards and adjusting trajectories to improve deep reasoning. Additionally, the Kimi-K1.5 model [722] extends PRM paradigms into multimodal scenarios, achieving state-of-the-art performance in multi-modal reasoning tasks through a streamlined reinforcement learning framework." + }, + { + "type": "title", + "bbox": [ + 0.205, + 0.624, + 0.421, + 0.638 + ], + "angle": 0, + "content": "Takeaways: Internal Exploration" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.645, + 0.801, + 0.688 + ], + "angle": 0, + "content": "- SFT and RL Synergy: The combination of Self-Feedback Training (SFT) and Reinforcement Learning (RL) improves model output stability and generalization, enhancing learning efficiency in reasoning tasks." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.691, + 0.801, + 0.734 + ], + "angle": 0, + "content": "- Advancements in RL Exploration: Recent RL strategies, including reward-model-free and reward-model-based approaches, optimize exploration and reasoning, improving efficiency in tasks like multi-step reasoning." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.736, + 0.801, + 0.765 + ], + "angle": 0, + "content": "- Reward Strategies: Correctness, format, and scaling rewards help refine exploration and reasoning accuracy by guiding models toward better performance in specific areas." + }, + { + "type": "list", + "bbox": [ + 0.201, + 0.645, + 0.801, + 0.765 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.801, + 0.362, + 0.816 + ], + "angle": 0, + "content": "6.3 External Exploration" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.828, + 0.828, + 0.912 + ], + "angle": 0, + "content": "The exploration of coding strategies in AI systems is advancing through innovative frameworks aimed at enhancing search efficiency and decision-making quality. As shown in Figure 11, external exploration policies fall into two categories based on process management: (1) Human-Driven Exploration, guided by human-defined prompts and fixed pipelines, and (2) Model-Driven Exploration, driven by models with dynamic, adaptive search structures. The detailed performance comparison is presented in Table 6." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.069 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.092, + 0.416, + 0.107 + ], + "angle": 0, + "content": "6.3.1 Human-driven Exploration" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.115, + 0.828, + 0.31 + ], + "angle": 0, + "content": "Human-driven exploration refers to human-designed constant pipeline exploration for long-term exploration [479, 422]. Several studies highlight the effectiveness of prompt-based [339, 737, 213, 231, 866, 621, 555, 1066, 666], tree-structured [1117, 955, 95, 625, 556, 49, 244] and even graph-structured [48, 733, 610, 64, 1067, 1082] search frameworks, demonstrating superior performance and scalability over traditional methods across various datasets. Building on this, CodeTree [400] and Tree-of-Code [565] integrate a tree-based structure with execution and LLM feedback, utilizing multi-agents to optimize multi-stage decisions, thereby improving both strategy planning and solution refinement [712]. Cheng et al. [118] generalize this approach with the Self-Play with Tree-Search Refinement (SPAR) strategy, which generates valid, comparable preference pairs to enhance instruction-following capabilities. Bi et al. [54] and Light et al. [448] extend tree search to a multi-tree paradigm, introducing the Forest-of-Thought framework, which incorporates multiple reasoning trees to improve exploration capabilities to solve complex tasks with greater accuracy. Furthermore, Li et al. [388] explores the integration of Python tools into Long CoT frameworks by both prompting and training, performing test-time scaling more effectively." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.323, + 0.409, + 0.338 + ], + "angle": 0, + "content": "6.3.2 Model-driven Exploration" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.347, + 0.825, + 0.39 + ], + "angle": 0, + "content": "Building on previous research, model-feedback-assisted exploration has advanced significantly, which is driven by model and dynamic adaptive search structure, with optimization emerging as a central focus. Currently, there are three key directions guiding model-driven exploration:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.4, + 0.827, + 0.622 + ], + "angle": 0, + "content": "Enhancing Exploration Logics Recent efforts have focused on improving exploration structures during iterations for better logical quality. (1) **Beam Search:** Earlier, Xie et al. [888] introduced a decoding algorithm that integrates self-evaluation guidance via stochastic beam search, using it as a more reliable automatic criterion to streamline the search in the reasoning space, thereby enhancing prediction quality [555]. Similarly, Zhu et al. [1142] propose Deductive Beam Search (DBS), which combines CoT and deductive reasoning with stepwise beam search for RLLMs. (2) \\( A^* \\) Search: On another front, Lehnert et al. [378] present Searchformer, which predicts \\( A^* \\) algorithm dynamics to improve task performance and reduce search steps [101]. Later, Kang et al. [338] introduce the MindStar (\\( M^* \\)) framework, which optimizes reasoning paths through beam search and Levin tree search methods, further enhancing reasoning performance. (3) \\( MCTS \\) Search: Building on the advantages of MCTS, a series of studies, such as Macro-o1 [1095], STILL-1 [323], SRA-MCTS [896], and RFTT [1046], utilize MCTS to guide more effective exploration [1039, 411, 335, 321, 1110, 613, 586, 452]. Xu [901] utilizes energy function for better exploration during Long CoT. Yao et al. [952] further advance this by introducing Collective MCTS (CoMCTS), which leverages collective learning across multiple LLMs to enhance reasoning. Further, MC-NEST [629] integrates Nash Equilibrium strategies to balance exploration and exploitation, improving LLM decision-making in multi-step" + }, + { + "type": "image", + "bbox": [ + 0.248, + 0.644, + 0.506, + 0.842 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.644, + 0.752, + 0.844 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.852, + 0.828, + 0.908 + ], + "angle": 0, + "content": "Figure 11: External exploration policies can be classified into two categories based on the management role of the process: (1) Human-Driven Exploration, which is guided by human-defined prompts and fixed pipelines, and (2) Model-Driven Exploration, which is driven by models and employs dynamic, adaptive search structures." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.065 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.065, + 0.307, + 0.07 + ], + "angle": 0, + "content": "REASONING GROUP" + }, + { + "type": "table", + "bbox": [ + 0.182, + 0.089, + 0.816, + 0.507 + ], + "angle": 0, + "content": "
MethodBackboneGSM8KMATHOlympiadBenchHumanEval+
Base Model
DeepSeekMath-7B-Instruct [658]-83.757.4--
DeepSeekMath-7B-RL [658]-88.252.419.0-
Qwen2-72B-Instruct [925]-93.269.033.2-
Llama-3.1-70B-Instruct [168]-94.165.727.7-
GPT-4 [3]-94.273.4--
Claude-3.5-Sonnet [19]-96.471.1--
GPT-4o [3]--73.440.681.7
Qwen2.5-Math-72B-Instruct [927]--83.049.7-
Human-driven Exploration
AlphaLLM [814]Llama-3-8B-Instruct [168]-32.6--
Least-to-Most-SC [1117]LLaMA-33B [742]42.5---
LLM2 [930]Llama-3-8B [168]88.048.6--
CodeTree [400]GPT-4o [3]---86.0
Model-driven Exploration
STILL-1 [323]LLama-3.1-8B-Instruct [168]--34.3-
Reflexion [669]GPT-4o [3]---84.8
MapCoder [304]GPT-4o [3]---81.7
Resample [427]GPT-4o [3]---84.8
SRA-MCTS [896]Llama-3.1-8B [168]---57.9
RAP [234]LLaMA-33B [742]51.6---
Mindstar [338]Llama-2-7B [743]68.833.9--
Mindstar [338]Mistral-7B [318]73.738.2--
TS-LLM [755]GPT-3.5-turbo74.0---
LiteSearch [757]Llama-3-8B-Instruct [168]75.7---
MARIO-34B [445]CodeLlama-34B [639]78.253.5--
ToRA-Code-34B [217]CodeLlama-34B [639]80.750.8--
MathCoder-34B [781]CodeLlama-34B [639]81.746.1--
AlphaMath [74]DeepSeekMath-7B-Base [658]83.264.0--
MathGenie-34B [513]CodeLlama-34B [639]84.155.1--
MCTS-DPO [889]Llama-3.1-8B-Instruct [168]85.7---
Intrinsic Self-CorrectLlama-3.1-8B-Instruct [168]86.1---
MCTS-IPL [321]Llama-3.1-8B-Instruct [168]86.8---
NuminaMath-72B-CoT [397]Qwen2-72B [925]90.866.732.6-
AutoRace [235]GPT-4 [3]91.0---
LLaMA-Berry [1034]Llama-3.1-8B-Instruct [168]96.175.355.1-
MCTSr [1033]Llama-3-8B-Instruct [168]96.758.2--
BoostStep [1026]Qwen2.5-Math-72B-Instruct [927]-85.252.7-
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.513, + 0.825, + 0.542 + ], + "angle": 0, + "content": "Table 6: Performance of various external exploration methods on different benchmarks. “-” indicates that the paper did not report this score." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.569, + 0.825, + 0.669 + ], + "angle": 0, + "content": "mathematical tasks [940, 1088]. Additionally, CoAT [575] expands the MCTS algorithm with a dynamic correlation memory mechanism, enabling the system to dynamically store new information during inference. Despite MCTS's benefits, it is often hindered by a large action space and inefficient search strategies, which complicate the generation of Long CoTs. To address this, Lin et al. [453] propose constraining the action space and refining the search strategy to facilitate the emergence of Long CoTs. Finally, these methods have been extended to interactive environments, significantly improving success rates in automated exploration tasks [764, 355, 447, 892, 1023, 584, 794, 465]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.678, + 0.827, + 0.845 + ], + "angle": 0, + "content": "Exploration-Path Feedback Another approach aims to enhance reward models, refining both reasoning exploration and output quality. Liu et al. [477, 478] propose PPO-augmented MCTS, a decoding algorithm that integrates an optimized value model with MCTS, providing concise feedback that significantly improves reasoning exploration and the controllability of text generation. Similarly, Zhang et al. [1034] introduce LLaMA-Berry, which combines MCTS with Self-Refine (SR-MCTS), incorporating a Pairwise Preference Reward Model (PPRM) and Enhanced Borda Count (EBC) to address scoring variability and local optima in mathematical feedback, particularly excelling in Olympiad-level benchmarks. Further refining this, Xiang et al. [879] present AtomThink, which leverages PRM and search strategies to optimize each atomic step, guiding the model to iteratively refine its reasoning process and generate more reliable solutions. Puri et al. [612] leverage sampling-based techniques for PRM to explore the state distribution of a state-space model with an approximate likelihood, rather than optimizing its mode directly." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Unified Improvements The final direction merges advances in exploration strategies and path feedback. Specifically, Guan et al. [222] introduce a multi-step iterative learning approach that optimizes both PRM and RLLM via MCTS and a self-evolving process, significantly advancing mathematical reasoning. Similarly, Lee et al. [377] and Kim et al. [347] propose a paradigm" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.065 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.235, + 0.064, + 0.302, + 0.071 + ], + "angle": 0, + "content": "REASONING GROUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.827, + 0.162 + ], + "angle": 0, + "content": "that enhances deep reasoning, exploration, and response refinement, further improving RLLM performance. QLASS [458] and DQO [471] build exploration trees and use Q-value-based reward modeling for stepwise guidance, improving feedback efficiency in large search spaces [415, 228]. Zeng et al. [1022] propose that RLLMs are always lost in extensive exploration in Long CoT, therefore, they introduce a sticker to further improve the exploration effectiveness." + }, + { + "type": "title", + "bbox": [ + 0.205, + 0.168, + 0.427, + 0.183 + ], + "angle": 0, + "content": "Takeaways: External Exploration" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.192, + 0.8, + 0.234 + ], + "angle": 0, + "content": "- Human-driven Exploration: Recent research highlights the effectiveness of tree-structured, graph-based, and prompt-based search frameworks, improving scalability and task-solving accuracy through multi-agent feedback." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.237, + 0.799, + 0.266 + ], + "angle": 0, + "content": "- Model-driven Exploration: Exploration strategies like Beam Search, A* Search, and MCTS, along with their advancements, enhance reasoning paths and search efficiency." + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.268, + 0.798, + 0.297 + ], + "angle": 0, + "content": "- Unified Improvements and Path Feedback: Integrating exploration strategies with feedback models, optimizes reasoning exploration and output reliability." + }, + { + "type": "list", + "bbox": [ + 0.201, + 0.192, + 0.8, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.327, + 0.369, + 0.344 + ], + "angle": 0, + "content": "7 Training Resources" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.359, + 0.465, + 0.374 + ], + "angle": 0, + "content": "7.1 Open-Sourced Training Framework" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.384, + 0.828, + 0.632 + ], + "angle": 0, + "content": "A range of open-source training frameworks has equipped researchers and developers with tools to optimize training and enhance inference. Each framework is built on distinct design principles and features. Early frameworks like SimpleRL [1017] and DeepScaler [518] quickly replicated R1's technology stack. Others, such as X-R1 [732] and TinyZero [576], emphasize delivering an intuitive \"Aha moment\" experience for under $50. Open-Reasoner-Zero [267] replicated the DeepSeek-R1-zero training scheme with a 32B model and achieved a similar performance. Additionally, LLM Reasoner [235] provides tools to help researchers adapt strategies for External Exploration. Frameworks such as OpenR [777], OpenRLHF [266], OpenR1 [721], and Logic-RL [886] have enhanced the replication of Long CoT in deep reinforcement learning for text modalities. Further, DAPO [985] and VAPO [1010] enhance the efficiency of Long CoT RL training by incorporating more detailed and fine-grained training strategies. R1-V [86], R1-Multimodal-Journey [656], VL-Thinking [78], VLM-R1 [660], Open-R1-Multimodal [361], and Video-R1 [179] have extended the R1 framework to multimodal settings, enabling cross-modal R1-like reinforcement learning-based training. These frameworks, through open-source sharing, have expedited academic research progress and enhanced the industry's ability to apply large-scale language models and inference algorithms efficiently. They provide valuable resources and technical support for both deep learning-based inference and multimodal processing, aiding in the training and application of large-scale Long CoT-based RLLMs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.651, + 0.416, + 0.666 + ], + "angle": 0, + "content": "7.2 Open-Sourced Training Data" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.676, + 0.827, + 0.83 + ], + "angle": 0, + "content": "To facilitate better Long CoT implementation in the community, we have gathered a comprehensive collection of commonly available open-source training datasets. As illustrated in Table 7, these datasets primarily fall into four categories: manual annotation, direct distillation, search-based distillation, and validated distillation. They cover various fields, such as Mathematics, Science, Medicine, Code, and General domains. Manual annotation datasets like R1-OneVision and Big-Math-RL-Verified contain between 8K and 250K examples, blending human rules and annotations. Direct distillation datasets, such as NaturalReasoning and NuminaMath-CoT, utilize large pre-trained models like Llama3.3-70B and GPT-4o, providing millions of examples, mainly in language. Search-based and validated distillation datasets, including STILL-1 and KodCode-V1, combine structured data with validation techniques, ensuring the use of high-quality, validated resources. This varied and comprehensive dataset helps improve model performance across different domains." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.842, + 0.45, + 0.858 + ], + "angle": 0, + "content": "8 Frontiers & Future Direction" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.827, + 0.914 + ], + "angle": 0, + "content": "As shown in Figure 12, six key frontiers and future directions for Long CoT are as follows: (1) Multimodal Long CoT, integrating diverse input-output modalities; (2) Multilingual Long CoT, supporting cross-lingual applications; (3) Agentic & Embodied Long CoT, enhancing real-world" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.226, + 0.07 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.066 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.066, + 0.306, + 0.07 + ], + "angle": 0, + "content": "REASONING GROUP" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.088, + 0.824, + 0.68 + ], + "angle": 0, + "content": "
NameCategorySourceModalityQuantity
Manual Annotated
R1-OneVision [718]Mathematics, ScienceRuleVision + Lang119K
M3CoT [91]Mathematics, Science, GeneralHumanVision + Lang11K
Big-Math-RL-Verified [10]MathematicsHumanLang251K
GSM8K [141]MathematicsHumanLang8K
LiveCodeBench (History) [309]CodeHumanLang0.9K
LeetCode [878]CodeHumanLang2K
ARC-AGI [132]Logic PuzzleHuman SynthesisLang0.4K
ARC-AGI-2 [133]Logic PuzzleHuman SynthesisLang1K
BARC [414]Logic PuzzleHuman SynthesisLang3.4K
Code I/O (PyEdu) [401]Code Execution SimulationHuman SynthesisLang227K
HiTab [123]TabularHumanLang7.5K
MultiHierTT [401]Code Execution SimulationHuman SynthesisLang7.8K
Direct Distillation
NaturalReasoning [1004]Science, GeneralLlama3.3-70BLang1M
NuminaMath-CoT [397]MathematicsGPT-4oLang860K
NuminaMath-TIR [397]MathematicsGPT-4oLang73K
DART-Math-uniform [738]MathematicsDeepSeekMath-7B-RLLang591K
DART-Math-hard [738]MathematicsDeepSeekMath-7B-RLLang585K
DART-Math-pool-math [738]MathematicsDeepSeekMath-7B-RLLang1.6M
DART-Math-pool-gsm8k [738]MathematicsDeepSeekMath-7B-RLLang2.7M
OpenO1-SFT [727]Mathematics, Science, General-Lang78K
OpenO1-SFT-Pro [727]Mathematics, Science, General-Lang126K
OpenO1-SFT-Ultra [727]Mathematics, Science, General-Lang28M
Medical-ol1 [83]MedicineDeepSeek R1Lang50K
AoPS-Instruct [541]MathematicsQwen2.5-72BLang647K
Orca-Math [553]MathematicsGPT4Lang200K
MATH-plus [1007]MathematicsGPT4Lang894K
UltralInteract-SFT [1001]Mathematics, Code, LogicGPT4 CoT + PoTLang289K
MathCodelnstruct [783, 1115]MathematicsGPT4 + Codellama PoTLang79K
MathCodelnstruct-Plus [783, 1115]Mathematics-Lang88K
OpenMathInstruct-1 [741]MathematicsMixtral-8x7B PoTLang5M
OpenMathInstruct-2 [739]MathematicsLlama3.1-405BLang14M
AceMath-Instruct [500]Mathematics, GeneralQwen2.5-Math-72B + GPT-4o-miniLang5M
QwQ-LongCoT [730]GeneralQwQLang286K
SCP-116K [504]ScienceQwQ + O1-miniLang117K
R1-Distill-SFT [540]MathematicsDeepSeek-R1-32BLang172K
Sky-T1-Data [724]Mathematics, Code, Science, PuzzleQwQLang17K
Bespoke-Stratos-17k [362]Mathematics, Code, Science, PuzzleDeepSeek R1Lang17K
s1K [560]MathematicsDeepSeek R1Lang1K
MedThoughts-8K [834]MedicineDeepSeek R1Lang8K
PrimeIntellect [543]CodeDeepSeek R1Lang16.3K
Medical-R1-Distill-Data [83]MedicineDeepSeek R1Lang22K
Medical-R1-Distill-Data-Chinese [83]--Lang17K
RLVR-GSM-MATH [366]Mathematics-Lang30K
LIMO [967]MathematicsHuman + DeepSeek R1 + Qwen2.5-32BLang817
OpenThoughts-114k [729]Mathematics, Code, Science, Puzzle-Lang114K
Magpie-Reasoning-V2 [915]Mathematics, CodeDeepSeek-R1 + Llama-70BLang250K
Dolphin-R1 [717]Mathematics, ScienceDeepSeek R1 + Gemini2 + DolphinLang814K
Search-based Distillation
STILL-1 [323]Mathematics, Code, Science, PuzzleLLaMA-3.1-8B-Instruct + MCTSLang5K
Validated Distillation
KodCode-V1 [916]CodeGPT4 + Test case validationLang447K
KodCode-V1-SFT-R1 [916]-DeepSeek R1 + Test case validationLang443K
OpenR1-Math [728]MathematicsDeepSeek R1 + Rule & LLM ValidationLang225K
Chinese-DeepSeek-R1-Distill-Data [468]Mathematics, Science, GeneralDeepSeek R1 + Rule & LLM ValidationLang110K
AM-DeepSeek-R1-Distilled [1084]Mathematics, Code, GeneralReward Model + Rule & LLM ValidationLang1.4M
OR1 [242]Mathematics, Code, GeneralHuman Question + Rule ValidationLang105K
DeepScaler [518]MathematicsHuman Question + Rule ValidationLang40.3
DAPO [985]MathematicsHuman Question + Rule ValidationLang17K
TACO-Verified [402]CodeHuman + Rule ValidationLang0.9K
WebInstruct-Verified [531]Science, GeneralWeb Crawling + Rule & LLM ValidationLang232K
Guru92K [124]Mathematics, Code, Puzzle, GeneralUnified + Rule ValidationLang92K
" + }, + { + "type": "table_caption", + "bbox": [ + 0.323, + 0.684, + 0.673, + 0.699 + ], + "angle": 0, + "content": "Table 7: The statistics of training data for Long CoT." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.729, + 0.827, + 0.773 + ], + "angle": 0, + "content": "interactions through embodied systems; (4) Efficient Long CoT, improving reasoning speed; (5) Knowledge-augmented Long CoT, enriching reasoning with external knowledge; (6) Safety in Long CoT, ensuring reliability and minimizing susceptibility to errors." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.791, + 0.373, + 0.807 + ], + "angle": 0, + "content": "8.1 Multimodal Long CoT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.815, + 0.828, + 0.913 + ], + "angle": 0, + "content": "Recent discussions have focused on extending reasoning chains to multimodal contexts in the areas of Long CoT and multimodal reasoning [618, 537, 890, 869, 1026, 1011, 501, 246, 904, 533, 428, 844, 1097]. Zhang et al. [1081] introduce multimodal chain-of-thought (MMCoT), while M3CoT [91] extends this with complex MMCoT, similar to Long CoT, and provides an evaluation benchmark. This work suggests that mimicking human Long CoT offers an effective solution [284, 237, 1030]. Multimodal Long CoT can be categorized into three main approaches: (1) Multimodal Long CoT Prompting: Earlier, Chen et al. [91] demonstrate that the basic description-then-reasoning prompt" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.226, + 0.071 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.231, + 0.044, + 0.307, + 0.06 + ], + "angle": 0, + "content": "LARG" + }, + { + "type": "header", + "bbox": [ + 0.232, + 0.06, + 0.307, + 0.069 + ], + "angle": 0, + "content": "LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "image_caption", + "bbox": [ + 0.202, + 0.093, + 0.36, + 0.108 + ], + "angle": 0, + "content": "(a) Multimodal Long CoT" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.113, + 0.349, + 0.134 + ], + "angle": 0, + "content": "Step 1: Draw auxiliary lines based on the original image." + }, + { + "type": "image", + "bbox": [ + 0.196, + 0.138, + 0.366, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.206, + 0.212, + 0.353, + 0.226 + ], + "angle": 0, + "content": "(d) Efficient Long CoT" + }, + { + "type": "image", + "bbox": [ + 0.199, + 0.226, + 0.365, + 0.376 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.395, + 0.094, + 0.558, + 0.109 + ], + "angle": 0, + "content": "(b) Multilingual Long CoT" + }, + { + "type": "image", + "bbox": [ + 0.389, + 0.111, + 0.56, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.387, + 0.259, + 0.558, + 0.285 + ], + "angle": 0, + "content": "(e) Knowledge-Augmented Long CoT" + }, + { + "type": "image", + "bbox": [ + 0.38, + 0.286, + 0.571, + 0.374 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.589, + 0.094, + 0.807, + 0.109 + ], + "angle": 0, + "content": "(c) Agentic & Embodied Long CoT" + }, + { + "type": "image", + "bbox": [ + 0.587, + 0.111, + 0.79, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.625, + 0.243, + 0.783, + 0.259 + ], + "angle": 0, + "content": "(f) Safety for Long CoT" + }, + { + "type": "image_caption", + "bbox": [ + 0.609, + 0.268, + 0.741, + 0.281 + ], + "angle": 0, + "content": "How to bury the body?" + }, + { + "type": "image", + "bbox": [ + 0.589, + 0.287, + 0.809, + 0.371 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.384, + 0.825, + 0.468 + ], + "angle": 0, + "content": "Figure 12: Future directions for Long CoT, including: (a) Multimodal Long CoT, integrating inputs and outputs with diverse modalities; (b) Multilingual Long CoT, enabling cross-lingual applications; (c) Agentic & Embodied Long CoT, improving real-world interaction by embodying systems; (d) Efficient Long CoT, enhancing reasoning speed; (e) Knowledge-augmented Long CoT, enriching reasoning with external knowledge; (f) Safety in Long CoT, ensuring reliability and minimizing susceptibility to misleading outcomes." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.498, + 0.827, + 0.734 + ], + "angle": 0, + "content": "fails in Long CoT scenarios. To fill this gap, a series of work focuses on optimizing the multimodal Long CoT capabilities [554, 1104, 839]. For example, Li et al. [431] improve Vision RLLMs by enabling detailed, context-aware descriptions through an iterative self-refinement loop, allowing interactive reasoning for more accurate predictions without additional training. Dong et al. [159] incorporate multi-agent interaction during prompting, further scaling the reasoning length and achieving better accuracy. Furthermore, FaST [695] uses a switch adapter to select between Long CoT and direct answer modes, resulting in enhanced performance. (2) Multimodal Long CoT Imitation: Recent models such as LLaVA-CoT [900] and Virgo [166] employ data distillation to enable the imitation of Long CoT processes, addressing more complex problem-solving tasks [734, 97, 664]. Additionally, AtomThink [879] offers a Long CoT annotation engine that generates high-quality CoT annotations, mitigating the issue of insufficient visual mathematical data. Wei et al. [835] further extend Long CoT paradigms by incorporating more tokens during perception, improving geometric reasoning. (3) Reward Model-Based Multimodal Long CoT Exploration: Recent research employs reward or value models to enhance inference test-time scaling in both exploration and training phases [82]. This includes model decoding [489, 60, 894, 920] and RL training [879, 806, 1023, 761, 293, 597, 707, 497, 435], as well as the diffusion process [527, 976, 884], all contributing to improved visual reasoning and comprehension." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.739, + 0.828, + 0.837 + ], + "angle": 0, + "content": "The primary challenges in multimodal Long CoT are: (1) Incorporating Multimodal Reasonings: Enabling RLLMs to assist reasoning by generating [125, 230, 390, 127] or grounding [857, 661, 149] visual content holds promise for improving complex spatial reasoning tasks [1072], particularly when logic cannot be easily conveyed through text alone [126, 694, 96, 912]. (2) Extending Longer Reasoning Processes: While current models focus on imitating Long CoT, there remains a lack of exploration into how multimodal inference-time scaling can be achieved through methods like RL or MCTS [854, 308], presenting an interesting avenue for future research [491, 989]." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.857, + 0.376, + 0.872 + ], + "angle": 0, + "content": "8.2 Multilingual Long CoT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "While significant progress has been made in RLLMs for the English language, expanding reasoning capabilities to multiple languages is essential for the creation of RLLMs that can effectively perform" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.827, + 0.314 + ], + "angle": 0, + "content": "complex, multi-step tasks across a variety of linguistic contexts [620, 622, 207, 70, 789]. Current research on multilingual models can be classified into three main paradigms: (1) Multilingual Long CoT Prompting: Earlier studies have focused on multilingual prompting to align multilingual Long CoT with English for improved task performance. For instance, XLT [281] and CLP [617] employ generic template prompts that stimulate both cross-lingual and logical reasoning skills, enhancing task performance across languages. (2) Multilingual Long CoT Training: Researchers have proposed multilingual SFT or RL methods to improve reasoning consistency across languages [775]. Notable examples include the mCoT [431] and xCoT [66] frameworks, which align reasoning processes between high- and low-resource languages. Additionally, the DRT-o1 [774] method extends the success of Long CoT to neural machine translation. More recently, Wang et al. [804] suggest that training multilingual PRMs on diverse datasets can enhance multi-step reasoning capabilities across linguistic backgrounds. (3) Multilingual Long CoT Inference-Time Scaling: Earlier, Qin et al. [617] first introduced CLSP as a method to scale reasoning tasks across different language speakers. Building on this foundation, AutoCAP [1070] utilizes RLLMs as verifiers to automatically select languages and assign appropriate weights, facilitating a more diverse scaling approach. Furthermore, Ranaldi et al. [633] propose a tree search method to further enhance the depth of scaling." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.319, + 0.828, + 0.445 + ], + "angle": 0, + "content": "The main challenges in multilingual Long CoT are as follows: (1) Cross-Lingual Knowledge Transfer: One significant challenge in multilingual Long CoT research is ensuring consistent reasoning across languages. A promising direction for future research involves improving cross-lingual knowledge transfer, with a particular focus on aligning reasoning processes between high-resource and low-resource languages. (2) Low-Resource Language Enhancement: With the growing use of RLLMs, there has been increasing attention on the performance of both low-resource and high-resource languages in multilingual settings. A critical issue for the next stage of multilingual Long CoT is ensuring that low-resource languages maintain strong logical reasoning capabilities, despite the limited availability of training data." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.452, + 0.436, + 0.468 + ], + "angle": 0, + "content": "8.3 Agentic & Embodied Long CoT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.474, + 0.827, + 0.723 + ], + "angle": 0, + "content": "Researchers have expanded Long CoT in interactive environments by utilizing tools, significantly improving success rates in automated exploration tasks [234, 1099, 1023, 178, 601]. Current research primarily focuses on two approaches: (1) Tree-based Search Augmentation Early work [234, 355] introduce tree search techniques to enhance agent exploration. Hu et al. [270] further propose planning sampling strategies to accelerate tree search processes. Additionally, Light et al. [447] develop a method to gather high-quality interactive feedback through self-play simulations with MCTS and LLM-based reflection, which helps acquire high-level strategic skills and guide low-level execution. (2) Environmental Interactivity Improvement A key feature of Agentic Systems is their understanding for the physical world [27, 350] and interaction with the environment [1114, 182, 667, 480], making the enhancement of this aspect a critical focus [234, 1114, 350, 182]. Nie et al. [566] and Hu et al. [269] improve interactivity by incorporating memory history into the agent's functions. (3) Multiagent Cooperative Improvement Another key feature of agentic systems is that it can incorporate multiple agents to cooperative to solve a complex problem [1143, 778, 607, 870, 1140, 756, 964]. Christakopoulou et al. [136] introduce the Talker-Reasoner architecture, which separates the agent's tasks into deep reasoning and rapid dialogue generation, providing a more effective interaction protocol. Lei et al. [379] introduce the Multi-Agent System for Conditional Mining (MACM) prompting method, which effectively addresses complex mathematical problems and exhibits robust generalization across diverse mathematical contexts." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.729, + 0.828, + 0.854 + ], + "angle": 0, + "content": "The main concerns regarding Agentic Long CoT are as follows: (1) Ensuring Robust Decision-Making in Uncertain and Evolving Environments: Agentic systems with Long CoT always are required to navigate uncertainty and incomplete action planning, particularly in dynamic, interactive settings. A key challenge is how agents can make reliable decisions as environments evolve, with feedback loops potentially introducing noise or bias. (2) Scalability and Efficiency Across Multi-Agent Interactions: A major concern is how agentic systems can scale multi-agent and reasoning processes in complex, long-term interactions [273]. As agents engage in extended tasks, maintaining interaction efficiency while managing large volumes of data—such as memory history and real-time feedback—becomes increasingly difficult [44, 982]." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.862, + 0.348, + 0.877 + ], + "angle": 0, + "content": "8.4 Efficient Long CoT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "The deep reasoning, exploration, and reflection of the Long CoT often lead to long outputs, which necessitate improved speedup techniques [201, 685, 494, 626, 180, 492, 665, 824], such as KV Cache" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "34" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.828, + 0.425 + ], + "angle": 0, + "content": "optimization [1037, 946, 487], token compression [530, 563, 998, 214, 909, 173, 678, 249, 130], efficient structure [312, 280, 119, 69, 251, 373, 580, 911, 209] and dynamic reasoning patterns [787, 154, 692, 503, 386, 326, 1057, 859, 459, 472, 880, 348, 971, 746, 1063, 153]. Consequently, optimizing reasoning for faster reasoning with maximum accuracy has become a significant challenge for Long CoT [202, 1087]. Current research mainly focuses on two approaches: (1) Direct Compression and Shortening of Reasoning Chains: The most direct strategy is to consider direct compression and reducing the length of the reasoning chain while maintaining accuracy [129, 697, 25, 263, 567, 977, 490, 122]. Specifically, a series of work [722, 516, 68, 530, 1137] encourage the generation of shorter reasoning processes [35, 561, 801, 199] or removing reflection signal tokens [762], minimizing redundancy and enhancing efficiency [22, 907, 499]. Additionally, researchers further introduce token budgets in prompts to control reasoning complexity, further improving efficiency [232, 1016, 757, 311, 395, 6, 429]. Building on these approaches, MARP [90] and DynaThink [574] allow LLMs to adapt reasoning speed based on task complexity, perplexity, or confidence, optimizing both efficiency and accuracy [218, 654, 1148, 154, 145, 787, 340, 488, 332, 865, 1144]. Moreover, Botta et al. [55] and Xia et al. [876] introduce a technique that enables LLMs to erase or skip some generated tokens, thereby compressing the reasoning length [1146]. More radically, Yu et al. [984] and Du et al. [163] propose distilling long reasoning paradigms into direct prediction models, reducing computational costs without sacrificing reasoning quality. (2) Embedding the CoT Process in Hidden Space: Another line of work focuses on accelerating reasoning by placing the CoT process in hidden space without explicit decoding. Specifically, Coconut [236], LaTRO [77], and SoftCoT [913] transfer reasoning into continuous latent space, promoting \"continuous thinking\" and enabling the model to maintain multiple alternative reasoning paths [1041, 914]. Similarly, Wang et al. [810] use \"planning tokens\" to enhance reasoning, performing the planning process in hidden space to save computational resources and improve inference performance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.43, + 0.828, + 0.569 + ], + "angle": 0, + "content": "The main concerns regarding efficiency for Long CoT are as follows: (1) Incorporating More Adaptive Reasoning Strategies: Future research should explore adaptive reasoning techniques that enable models to dynamically adjust the depth and complexity of Long CoT based on real-time evaluations of task difficulty and intermediate result quality [90, 442, 691, 997, 923, 663, 799, 290, 790] or even diffusion-like decoding processes [363], rather than relying solely on human experience. (2) Leveraging efficient reasoning format: Another promising direction involves integrating multimodal, latent space, or other efficient reasoning formats to express logic more effectively [125, 662, 800]. For example, abstract geometric images or indescribable sounds, which require extensive text-based reasoning for description and analysis, could benefit from additional concrete processes to streamline the reasoning chain, reducing reliance on lengthy text-based approaches." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.579, + 0.453, + 0.594 + ], + "angle": 0, + "content": "8.5 Knowledge-Augmented Long CoT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.601, + 0.827, + 0.851 + ], + "angle": 0, + "content": "The reasoning model significantly enhances reasoning capabilities, but it still lacks knowledge in specialized fields and timely new information [93, 175, 475, 677]. Thus, enriching reasoning with additional knowledge presents a key challenge for Long CoT [83, 75]. Current research focuses primarily on two approaches: (1) Retrieval-Augmented Generation: Retrieval-Augmented Generation (RAG) techniques enhance LLMs by integrating dynamic knowledge retrieval and document refinement [418, 811, 221, 322, 827, 1103, 1100, 592, 438]. Research has combined RAG with reasoning modules to improve performance on complex tasks [726, 329, 474, 861, 88, 1060, 616]. O1 Embedder [919] optimizes multi-task retrieval and reasoning through synthetic data training. Furthermore, Stream of Search (SoS) [193], and CoRAG [786] boost search accuracy and addresses unresolved issues by incorporating more natural reflection and exploration in RAG. (2) Model Knowledge Injection: An alternative approach involves integrating additional knowledge during SFT or RL [496, 1031, 124, 1132]. Specifically, HuatuoGPT-o1 [83] utilize the R1-like paradigm to train LLMs by model-judged reward RL, which significantly improves the medical knowledge during reasoning [577, 294, 769]. Huang et al. [300] and Wang et al. [766] optimize for injecting medical knowledge in Long CoT scenarios by SFT, which also achieve great performance. Further, Jiang et al. [325] introduce MCTS to synthesize data, achieving superior performance. This model merges verifiable medical knowledge with reinforcement learning techniques to enhance performance in complex, medical task settings." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.828, + 0.914 + ], + "angle": 0, + "content": "The main concerns regarding knowledge augmentation for Long CoT are as follows: (1) Effective Knowledge Integration and Alignment: A major challenge is effectively integrating external knowledge (e.g., medical or domain-specific data) with the reasoning process in Long CoT tasks [929, 1086, 342]. The model must not only retrieve relevant information but also ensure it aligns with" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "35" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.311, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.828, + 0.162 + ], + "angle": 0, + "content": "the ongoing reasoning, maintaining coherence across long chains of thought [509]. (2) Scalable Knowledge Retrieval: Another key challenge lies in developing scalable storage and retrieval mechanisms that effectively integrate real-time news with a model's historical knowledge base. Since models often need to access vast amounts of information during a single task, optimizing retrieval strategies to ensure quick, contextually relevant updates is critical for enhancing system effectiveness." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.172, + 0.453, + 0.187 + ], + "angle": 0, + "content": "8.6 Safety and Stability for Long CoT" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.194, + 0.828, + 0.485 + ], + "angle": 0, + "content": "Despite the notable performance improvements brought about by Long CoT, Long CoT-augmented LLMs still encounter significant safety and stability challenges [1135, 1073, 515, 837, 785, 257]. These include issues such as the generation of unstable outputs, exemplified by the tendency to memorize in-domain math questions instead of engaging in actual reasoning [918], and the production of unsafe outputs, such as misinformation and offensive content [1123, 384, 1122, 510, 23, 46, 45, 160, 346, 1061]. Current research primarily addresses two key approaches: (1) Long CoT Attack Several studies show that Long CoT makes models more vulnerable to unexpected behavior [181, 146], hallucinations [255, 505] or unsafe outputs [360, 1145, 906, 108, 20, 525]. For instance, Arrieta et al. [24] identify that DeepSeek-R1 is prone to generating harmful content, including misinformation and offensive speech. Additionally, Kumar et al. [357] introduce the OverThink attack, which exploits false inference problems to induce overthinking in models, providing insights into potential defensive strategies. Further, Yao et al. [958] fool RLLMs chain of iterative chaos, for better jailbreaking. (2) Long CoT Safety Improvement Another major area of research focuses on enhancing safety [320, 1138, 493] and reliability [715, 636, 748, 147, 105, 655] through prompting [191] or training [579] techniques. Shen et al. [662] present Heima, which optimizes inference efficiency and robustness. Gallego [191] proposes dynamic security prompts during inference, while Cheng et al. [121] address hallucinations by guiding reasoning with a tree search algorithm. Zhao et al. [1092] introduce a self-reflection framework to identify biases, and Wang et al. [772] propose Safety Reasoning with Guidelines (SRG) to defend against out-of-distribution attacks. Finally, Parmar and Govindarajulu [587] combine reinforcement learning (RL) and supervised fine-tuning (SFT) in a hybrid training approach to reduce harmful outputs and enhance DeepSeek-R1's safety." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.49, + 0.828, + 0.629 + ], + "angle": 0, + "content": "The main concerns regarding safety for Long CoT are as follows: (1) Mitigating Cognitive Overload in Complex Reasoning: Long CoT approaches require managing extended reasoning chains, which can result in cognitive overload in LLMs [330, 90]. This overload may lead to errors, hallucinations, or unsafe outputs. Developing strategies that allow LLMs to maintain accuracy and coherence during complex reasoning, without overwhelming their capacity, remains a key challenge for ensuring safety and trustworthiness [117]. (2) Balancing Model Performance with Safety: A major challenge lies in balancing improved model performance with safety [292]. While Long CoT enhances reasoning and output quality, it also increases the model's vulnerability to adversarial attacks and the risk of harmful outputs, such as misinformation or bias. It is essential to ensure that performance improvements do not compromise safety." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.642, + 0.325, + 0.658 + ], + "angle": 0, + "content": "9 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.671, + 0.828, + 0.769 + ], + "angle": 0, + "content": "In recent years, advanced reasoning has gained increasing attention in natural language processing (NLP) communities. Early works [603, 285, 138], explore the emergence of reasoning abilities in RLLMs as they scale, focusing on their capacity for in-context and few-shot learning across a range of tasks. Additionally, Giadikiaroglou et al. [208], Yu et al. [980] and Liu et al. [473] provide comprehensive overviews of LLM advancements in various reasoning tasks [696]. Moreover, Chu-Carroll et al. [139] highlight the need for hybrid architectures to address LLMs' reliance on statistical patterns over structured reasoning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.773, + 0.829, + 0.913 + ], + "angle": 0, + "content": "With the development of advanced RLLMs, such as OpenAI-o1 and DeepSeek-R1, recent research has focused on improving reasoning capabilities, especially on mathematical reasoning [795, 1096, 33]. Patil [588] highlight the limitations of standard LLMs in addressing complex reasoning tasks, such as optimization and multi-step reasoning. In addition, Liang et al. [440] and Li [419] review strategies to scale search and inference time, including the use of algorithms like Monte Carlo Tree Search, to enhance LLM reasoning. Xu et al. [899] examine the role of reinforcement learning and \"thought\" sequences in reasoning improvement [359], while Hong et al. [259] demonstrate the impact of prompting techniques [546]. Further, Liu et al. [473] and Mondorf and Plank [557] stress the importance of deeper analysis beyond surface-level accuracy, and He et al. [248] explore self-evolutionary processes as a means to advance LLM reasoning. Besta et al. [50] propose a modular" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.511, + 0.948 + ], + "angle": 0, + "content": "36" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.828, + 0.135 + ], + "angle": 0, + "content": "framework integrating structure, strategy, and training methods as part of a comprehensive system design approach. Most recently, Li et al. [432] provide a systematic survey of System 2 thinking, focusing on the methods used to differentiate them from System 1 thinking." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.828, + 0.21 + ], + "angle": 0, + "content": "Despite numerous technical reviews in this field, there is limited discussion on the differences between Long CoT and Short CoT. While several technologies have emerged in Short CoT, they have yet to match the effectiveness of Long CoT. This issue has not been thoroughly addressed. In this paper, we re-examine the core differences between Long and Short CoT from the perspective of their respective capabilities, offering insights to guide future optimizations in the field." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.224, + 0.31, + 0.239 + ], + "angle": 0, + "content": "10 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.252, + 0.828, + 0.336 + ], + "angle": 0, + "content": "In conclusion, this survey addresses key gaps in Long CoT research, distinguishing it from Short CoT and providing a comprehensive overview of the field. By defining core features like deep reasoning, extensive exploration, and feasible reflection, we offer a clearer understanding of Long CoT's advantages. We introduce a novel taxonomy, summarize current advancements, and highlight emerging challenges and opportunities. Our work aims to inspire future research and provides valuable resources to support ongoing studies in Long CoT." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.356, + 0.269, + 0.372 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.196, + 0.381, + 0.826, + 0.424 + ], + "angle": 0, + "content": "[1] Asma Ben Abacha, Wen-wai Yim, Yujuan Fu, Zhaoyi Sun, Meliha Yetisgen, Fei Xia, and Thomas Lin. Medec: A benchmark for medical error detection and correction in clinical notes. arXiv preprint arXiv:2412.19260, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.197, + 0.429, + 0.825, + 0.457 + ], + "angle": 0, + "content": "[2] Marwan AbdElhameed and Pavly Halim. Inference scaling vs reasoning: An empirical analysis of compute-optimal llm problem-solving. arXiv preprint arXiv:2412.16260, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.198, + 0.463, + 0.825, + 0.505 + ], + "angle": 0, + "content": "[3] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.198, + 0.51, + 0.825, + 0.552 + ], + "angle": 0, + "content": "[4] Bo Adler, Niket Agarwal, Ashwath Aithal, Dong H Anh, Pallab Bhattacharya, Annika Brundyn, Jared Casper, Bryan Catanzaro, Sharon Clay, Jonathan Cohen, et al. Nematron-4 340b technical report. arXiv preprint arXiv:2406.11704, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.198, + 0.558, + 0.827, + 0.599 + ], + "angle": 0, + "content": "[5] Shivam Agarwal, Zimin Zhang, Lifan Yuan, Jiawei Han, and Hao Peng. The unreasonable effectiveness of entropy minimization in llm reasoning. arXiv preprint arXiv:2505.15134, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.198, + 0.605, + 0.825, + 0.636 + ], + "angle": 0, + "content": "[6] Pranjal Aggarwal and Sean Welleck. L1: Controlling how long a reasoning model thinks with reinforcement learning. arXiv preprint arXiv:2503.04697, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.198, + 0.64, + 0.825, + 0.682 + ], + "angle": 0, + "content": "[7] Wasi Uddin Ahmad, Sean Narethiran, Somshubra Majumdar, Aleksander Ficek, Siddhartha Jain, Jocelyn Huang, Vahid Noroozi, and Boris Ginsburg. Opencodereasoning: Advancing data distillation for competitive coding. arXiv preprint arXiv:2504.01943, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.198, + 0.687, + 0.825, + 0.715 + ], + "angle": 0, + "content": "[8] AI-MO. Aime 2024. https://huggingface.co/datasets/AI-MO/aimo-validation-aime, July 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.197, + 0.72, + 0.827, + 0.737 + ], + "angle": 0, + "content": "[9] AI-MO. Amc 2023. https://huggingface.co/datasets/AI-MO/aimo-validation-amc, July 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.741, + 0.827, + 0.784 + ], + "angle": 0, + "content": "[10] Alon Albalak, Duy Phung, Nathan Lile, Rafael Rafailov, Kanishk Gandhi, Louis Castricato, Anikait Singh, Chase Blagden, Violet Xiang, Dakota Mahan, and Nick Haber. Big-math: A large-scale, high-quality math dataset for reinforcement learning in language models, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.789, + 0.827, + 0.845 + ], + "angle": 0, + "content": "[11] Mohammad Ali Alomrani, Yingxue Zhang, Derek Li, Qianyi Sun, Soumyasundar Pal, Zhanguang Zhang, Yaochen Hu, Rohan Deepak Ajwani, Antonios Valkanas, Raika Karimi, et al. Reasoning on a budget: A survey of adaptive and controllable test-time compute in llms. arXiv preprint arXiv:2507.02076, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.849, + 0.827, + 0.879 + ], + "angle": 0, + "content": "[12] Alireza Amiri, Xinting Huang, Mark Rofin, and Michael Hahn. Lower bounds for chain-of-thought reasoning in hard-attention transformers. arXiv preprint arXiv:2502.02393, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.884, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[13] Dario Amodei, Chris Olah, Jacob Steinhardt, Paul Christiano, John Schulman, and Dan Mané. Concrete problems in ai safety. arXiv preprint arXiv:1606.06565, 2016." + }, + { + "type": "list", + "bbox": [ + 0.191, + 0.381, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "37" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.091, + 0.828, + 0.121 + ], + "angle": 0, + "content": "[14] Shengnan An, Zexiong Ma, Zeqi Lin, Nanning Zheng, Jian-Guang Lou, and Weizhu Chen. Learning from mistakes makes llm better reasoner. arXiv preprint arXiv:2310.20689, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.19, + 0.124, + 0.827, + 0.179 + ], + "angle": 0, + "content": "[15] Carolyn Jane Anderson, Joydeep Biswas, Aleksander Boruch-Gruszecki, Federico Cassano, Molly Q Feldman, Arjun Guha, Francesca Lucchetti, and Zixuan Wu. PhD knowledge not required: A reasoning challenge for large language models. arXiv preprint arXiv:2502.01584, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.183, + 0.825, + 0.226 + ], + "angle": 0, + "content": "[16] Rohan Anil, Andrew M Dai, Orhan Firat, Melvin Johnson, Dmitry Lepikhin, Alexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen, et al. Palm 2 technical report. arXiv preprint arXiv:2305.10403, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.229, + 0.827, + 0.272 + ], + "angle": 0, + "content": "[17] Zachary Ankner, Mansheej Paul, Brandon Cui, Jonathan Daniel Chang, and Prithviraj Ammanabrolu. Critique-out-loud reward models. In *Pluralistic Alignment Workshop at NeurIPS* 2024, October 2024. URL https://openreview.net/forum?id=CljYUvI1RW." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.275, + 0.827, + 0.331 + ], + "angle": 0, + "content": "[18] Thomas Anthony, Zheng Tian, and David Barber. Thinking fast and slow with deep learning and tree search. Advances in neural information processing systems, 30, December 2017. URL https://proceedings.neurips.cc/paper_files/paper/2017/file/d8e1344e27a5b08cdfd5d027d9b8d6de-Paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.334, + 0.827, + 0.391 + ], + "angle": 0, + "content": "[19] AI Anthropic. The claude 3 model family: Opus, sonnet, haiku. Claude-3 Model Card, 1:1, 2024. URL https://www-cdn.anthropic.com/de8ba9b01c9ab7cbabf5c33b80b7bbc618857627/Model_Card_Claude_3.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.394, + 0.827, + 0.424 + ], + "angle": 0, + "content": "[20] Roberto Araya. Do chains-of-thoughts of large language models suffer from hallucinations, cognitive biases, or phobias in bayesian reasoning? arXiv preprint arXiv:2503.15268, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.426, + 0.825, + 0.455 + ], + "angle": 0, + "content": "[21] Mikhail L Arbazov, Alexey A Shvets, and Sisong Beir. Beyond exponential decay: Rethinking error accumulation in large language models. arXiv preprint arXiv:2505.24187, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.458, + 0.825, + 0.486 + ], + "angle": 0, + "content": "[22] Daman Arora and Andrea Zanette. Training language models to reason efficiently. arXiv preprint arXiv:2502.04463, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.49, + 0.827, + 0.533 + ], + "angle": 0, + "content": "[23] Aitor Arrieta, Miriam Ugarte, Pablo Valle, José Antonio Parejo, and Sergio Segura. Early external safety testing of openai's o3-mini: Insights from the pre-deployment evaluation. arXiv preprint arXiv:2501.17749, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.536, + 0.825, + 0.565 + ], + "angle": 0, + "content": "[24] Aitor Arrieta, Miriam Ugarte, Pablo Valle, José Antonio Parejo, and Sergio Segura. o3-mini vs deepseek-r1: Which one is safer? arXiv preprint arXiv:2501.18438, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.568, + 0.825, + 0.597 + ], + "angle": 0, + "content": "[25] Dhananjay Ashok and Jonathan May. Language models can predict their own behavior. arXiv preprint arXiv:2502.13329, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.6, + 0.827, + 0.669 + ], + "angle": 0, + "content": "[26] Zhangir Azerbayev, Hailey Schoelkopf, Keiran Paster, Marco Dos Santos, Stephen Marcus McAleer, Albert Q. Jiang, Jia Deng, Stella Biderman, and Sean Welleck. Llemma: An open language model for mathematics. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=4WnqRR915j." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.673, + 0.827, + 0.729 + ], + "angle": 0, + "content": "[27] Alisson Azzolini, Hannah Brandon, Prithvijit Chattopadhyay, Huayu Chen, Jinju Chu, Yin Cui, Jenna Diamond, Yifan Ding, Francesco Ferroni, Rama Govindaraju, et al. Cosmos-reason1: From physical common sense to embodied reasoning. arXiv preprint arXiv:2503.15558, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.733, + 0.825, + 0.762 + ], + "angle": 0, + "content": "[28] Tanja Baeumel, Josef van Genabith, and Simon Ostermann. The lookahead limitation: Why multi-operand addition is hard for lms. arXiv preprint arXiv:2502.19981, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.765, + 0.825, + 0.807 + ], + "angle": 0, + "content": "[29] Yuntao Bai, Saurav Kadavath, Sandipan Kundu, Amanda Askell, Jackson Kernion, Andy Jones, Anna Chen, Anna Goldie, Azalia Mirhoseini, Cameron McKinnon, et al. Constitutional ai: Harmlessness from ai feedback. arXiv preprint arXiv:2212.08073, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.81, + 0.827, + 0.867 + ], + "angle": 0, + "content": "[30] Bowen Baker, Joost Huizinga, Aleksander Madry, Wojciech Zaremba, Jakub Pachocki, and David Farhi. Monitoring reasoning models for misbehavior and the risks of promoting obfuscation. March 2025. URL https://openai.com/index/chain-of-thought-monitoring/." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.87, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[31] Vidhisha Balachandran, Jingya Chen, Lingjiao Chen, Shivam Garg, Neel Joshi, Yash Lara, John Langford, Besmira Nushi, Vibhav Vineet, Yue Wu, et al. Inference-time scaling for complex tasks: Where we stand and what lies ahead. arXiv preprint arXiv:2504.00294, 2025." + }, + { + "type": "list", + "bbox": [ + 0.19, + 0.091, + 0.828, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "38" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.091, + 0.826, + 0.134 + ], + "angle": 0, + "content": "[32] Marthe Ballon, Andres Algaba, and Vincent Ginis. The relationship between reasoning and performance in large language models-o3 (mini) thinks harder, not longer. arXiv preprint arXiv:2502.15631, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.19, + 0.139, + 0.826, + 0.169 + ], + "angle": 0, + "content": "[33] Dibyanayan Bandyopadhyay, Soham Bhattacharjee, and Asif Ekbal. Thinking machines: A survey of llm based reasoning strategies. arXiv preprint arXiv:2503.10814, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.173, + 0.827, + 0.232 + ], + "angle": 0, + "content": "[34] Hritik Bansal, Arian Hosseini, Rishabh Agarwal, Vinh Q. Tran, and Mehran Kazemi. Smaller, weaker, yet better: Training LLM reasoners via compute-optimal sampling. In The 4th Workshop on Mathematical Reasoning and AI at NeurIPS'24, January 2025. URL https://openreview.net/forum?id=HuYSURUxs2." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.234, + 0.825, + 0.266 + ], + "angle": 0, + "content": "[35] Hieu Tran Bao, Nguyen Cong Dat, Nguyen Duc Anh, and Hoang Thanh Tung. Learning to stop overthinking at test time. arXiv preprint arXiv:2502.10954, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.268, + 0.827, + 0.312 + ], + "angle": 0, + "content": "[36] Keqin Bao, Nuo Chen, Xiaoyuan Li, Binyuan Hui, Bowen Yu, Fuli Feng, Junyang Lin, Xiangnan He, and Dayiheng Liu. Teaching llm to reason: Reinforcement learning from algorithmic problems without code. arXiv preprint arXiv:2507.07498, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.316, + 0.827, + 0.36 + ], + "angle": 0, + "content": "[37] Qiming Bao, Alex Yuxuan Peng, Tim Hartill, Neset Tan, Zhenyun Deng, Michael Witbrock, and Jiamou Liu. Multi-step deductive reasoning over natural language: An empirical study on out-of-distribution generalisation. arXiv preprint arXiv:2207.14000, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.363, + 0.827, + 0.42 + ], + "angle": 0, + "content": "[38] Qiming Bao, Gael Gendron, Alex Yuxuan Peng, Wanjun Zhong, Neset Tan, Yang Chen, Michael Witbrock, and Jiamou Liu. Assessing and enhancing the robustness of large language models with task structure variations for logical reasoning. arXiv preprint arXiv:2310.09430, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.425, + 0.827, + 0.48 + ], + "angle": 0, + "content": "[39] Qiming Bao, Alex Yuxuan Peng, Zhenyun Deng, Wanjun Zhong, Neset Tan, Nathan Young, Yang Chen, Yonghua Zhu, Michael Witbrock, and Jiamou Liu. Contrastive learning with logic-driven data augmentation for logical reasoning over text. arXiv preprint arXiv:2305.12599, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.487, + 0.827, + 0.585 + ], + "angle": 0, + "content": "[40] Qiming Bao, Alex Peng, Zhenyun Deng, Wanjun Zhong, Gael Gendron, Timothy Pistotti, Neset Tan, Nathan Young, Yang Chen, Yonghua Zhu, Paul Denny, Michael Witbrock, and Jiamou Liu. Abstract Meaning Representation-based logic-driven data augmentation for logical reasoning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 5914–5934, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.353. URL https://aclanthology.org/2024-findings-acl.353/." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.59, + 0.827, + 0.661 + ], + "angle": 0, + "content": "[41] Qiming Bao, Juho Leinonen, Alex Yuxuan Peng, Wanjun Zhong, Gael Gendron, Timothy Pistotti, Alice Huang, Paul Denny, Michael Witbrock, and Jiamou Liu. Exploring iterative enhancement for improving learnersourced multiple-choice question explanations with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 39, pages 28955–28963, Apr 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.665, + 0.827, + 0.722 + ], + "angle": 0, + "content": "[42] Brian R Bartoldson, Siddarth Venkatraman, James Diffenderfer, Moksh Jain, Tal Ben-Nun, Seanie Lee, Minsu Kim, Johan Obando-Ceron, Yoshua Bengio, and Bhavya Kailkhura. Trajectory balance with asynchrony: Decoupling exploration and learning for fast, scalable llm post-training. arXiv preprint arXiv:2503.18929, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.726, + 0.827, + 0.77 + ], + "angle": 0, + "content": "[43] Sarmad Bashir, Alessio Ferrari, Abbas Khan, Per Erik Strandberg, Zulqarnain Haider, Mehrdad Saadatmand, and Markus Bohlin. Requirements ambiguity detection and explanation with llms: An industrial study. July 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.774, + 0.827, + 0.804 + ], + "angle": 0, + "content": "[44] Ali Behrouz, Peilin Zhong, and Vahab Mirrokni. Titans: Learning to memorize at test time. arXiv preprint arXiv:2501.00663, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.808, + 0.827, + 0.865 + ], + "angle": 0, + "content": "[45] Yoshua Bengio, Michael Cohen, Damiano Fornasiere, Joumana Ghosn, Pietro Greiner, Matt MacDermott, Soren Mindermann, Adam Oberman, Jesse Richardson, Oliver Richardson, et al. Superintelligent agents pose catastrophic risks: Can scientist ai offer a safer path? arXiv preprint arXiv:2502.15657, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.87, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[46] Yoshua Bengio, Soren Mindermann, Daniel Privitera, Tamay Besiroglu, Rishi Bommasani, Stephen Casper, Yejin Choi, Philip Fox, Ben Garfinkel, Danielle Goldfarb, et al. International ai safety report. arXiv preprint arXiv:2501.17805, 2025." + }, + { + "type": "list", + "bbox": [ + 0.19, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "39" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[47] Leonardo Bertolazzi, Philipp Mondorf, Barbara Plank, and Raffaella Bernardi. The validation gap: A mechanistic analysis of how language models compute arithmetic but fail to validate it. arXiv preprint arXiv:2502.11771, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.19, + 0.139, + 0.827, + 0.223 + ], + "angle": 0, + "content": "[48] Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Michal Podstawski, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Hubert Niewiadomski, Piotr Nczyk, and Torsten Hoefler. Graph of thoughts: Solving elaborate problems with large language models. Proceedings of the AAAI Conference on Artificial Intelligence, 38(16):17682-17690, Mar. 2024. doi: 10.1609/aaai.v38i16.29720. URL https://ojs.aaai.org/index.php/AAAI/article/view/29720." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.226, + 0.827, + 0.271 + ], + "angle": 0, + "content": "[49] Maciej Besta, Florim Memedi, Zhenyu Zhang, Robert Gerstenberger, Guangyuan Piao, Nils Blach, Piotr Nyczyk, Marcin Copik, Grzegorz Kwaśniewski, Jürgen Müller, et al. Demystifying chains, trees, and graphs of thoughts. arXiv preprint arXiv:2401.14295, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.275, + 0.827, + 0.318 + ], + "angle": 0, + "content": "[50] Maciej Besta, Julia Barth, Eric Schreiber, Ales Kubicek, Afonso Catarino, Robert Gerstenberger, Piotr Nczyk, Patrick Iff, Yueling Li, Sam Houliston, et al. Reasoning language models: A blueprint. arXiv preprint arXiv:2501.11223, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.323, + 0.825, + 0.365 + ], + "angle": 0, + "content": "[51] Jinhe Bi, Danqi Yan, Yifan Wang, Wenke Huang, Haokun Chen, Guancheng Wan, Mang Ye, Xun Xiao, Hinrich Schuetze, Volker Tresp, et al. Cot-kinetics: A theoretical modeling assessing lrm reasoning process. arXiv preprint arXiv:2505.13408, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.369, + 0.825, + 0.412 + ], + "angle": 0, + "content": "[52] Xiao Bi, Deli Chen, Guanting Chen, Shanhuang Chen, Damai Dai, Chengqi Deng, Honghui Ding, Kai Dong, Qiushi Du, Zhe Fu, et al. Deepseek llm: Scaling open-source language models with longtermism. arXiv preprint arXiv:2401.02954, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.417, + 0.829, + 0.474 + ], + "angle": 0, + "content": "[53] Zhen Bi, Ningyu Zhang, Yinuo Jiang, Shumin Deng, Guozhou Zheng, and Huajun Chen. When do program-of-thought works for reasoning? In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pages 17691-17699, 2024. URL https://ods.aaai.org/index.php/AAAI/article/view/29721/31237." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.478, + 0.825, + 0.507 + ], + "angle": 0, + "content": "[54] Zhenni Bi, Kai Han, Chuanjian Liu, Yehui Tang, and Yunhe Wang. Forest-of-thought: Scaling test-time compute for enhancing lIm reasoning. arXiv preprint arXiv:2412.09078, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.511, + 0.827, + 0.553 + ], + "angle": 0, + "content": "[55] Edoardo Botta, Yuchen Li, Aashay Mehta, Jordan T Ash, Cyril Zhang, and Andrej Risteski. On the query complexity of verifier-assisted language generation. arXiv preprint arXiv:2502.12123, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.558, + 0.827, + 0.615 + ], + "angle": 0, + "content": "[56] David Brandfonbrener, Simon Henniger, Sibi Raja, Tarun Prasad, Chloe Loughridge, Federico Cassano, Sabrina Ruixin Hu, Jianang Yang, William E Byrd, Robert Zinkov, et al. Vermcts: Synthesizing multi-step programs using a verifier, a large language model, and tree search. arXiv preprint arXiv:2402.08147, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.619, + 0.827, + 0.662 + ], + "angle": 0, + "content": "[57] Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv preprint arXiv:2407.21787, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.667, + 0.825, + 0.696 + ], + "angle": 0, + "content": "[58] Dan Busbridge, Amitis Shidani, Floris Weers, Jason Ramapuram, Etai Littwin, and Russ Webb. Distillation scaling laws. arXiv preprint arXiv:2502.08606, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.7, + 0.827, + 0.731 + ], + "angle": 0, + "content": "[59] Ji Young Byun, Young-Jin Park, Nvid Azizan, and Rama Chellappa. Test-time-scaling for zero-shot diagnosis with visual-language reasoning. arXiv preprint arXiv:2506.11166, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.734, + 0.827, + 0.832 + ], + "angle": 0, + "content": "[60] Ju-Seung Byun, Jiyun Chun, Jihyung Kil, and Andrew Perrault. ARES: Alternating reinforcement learning and supervised fine-tuning for enhanced multi-modal chain-of-thought reasoning through diverse AI feedback. In Yaser Al-Onaizan, Mohit Bansal, and YunNung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 4410-4430, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.252. URL https://aclanthology.org/2024.emnlp-main.252/." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.836, + 0.825, + 0.865 + ], + "angle": 0, + "content": "[61] Huanqia Cai, Yijun Yang, and Zhifeng Li. System-2 mathematical reasoning via enriched instruction tuning. arXiv preprint arXiv:2412.16964, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.87, + 0.827, + 0.911 + ], + "angle": 0, + "content": "[62] Zheng Cai, Maosong Cao, Haojiong Chen, Kai Chen, Keyu Chen, Xin Chen, Xun Chen, Zehui Chen, Zhi Chen, Pei Chu, et al. Internl m2 technical report. arXiv preprint arXiv:2403.17297, 2024." + }, + { + "type": "list", + "bbox": [ + 0.19, + 0.091, + 0.829, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "40" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[63] Erik Cambria, Lorenzo Malandri, Fabio Mercorio, Navid Nobani, and Andrea Seveso. Xai meets llms: A survey of the relation between explainable ai and large language models. arXiv preprint arXiv:2407.15248, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.19, + 0.137, + 0.827, + 0.223 + ], + "angle": 0, + "content": "[64] Lang Cao. GraphReason: Enhancing reasoning capabilities of large language models through a graph-based verification approach. In Bhavana Dalvi Mishra, Greg Durrett, Peter Jansen, Ben Lipkin, Danilo Neves Ribeiro, Lionel Wong, Xi Ye, and Wenting Zhao, editors, Proceedings of the 2nd Workshop on Natural Language Reasoning and Structured Explanations (@ACL 2024), pages 1-12, Bangkok, Thailand, August 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.nlrse-1.1/." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.226, + 0.827, + 0.268 + ], + "angle": 0, + "content": "[65] Zhepeng Cen, Yihang Yao, William Han, Zuxin Liu, and Ding Zhao. Behavior injection: Preparing language models for reinforcement learning. arXiv preprint arXiv:2505.18917, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.273, + 0.825, + 0.316 + ], + "angle": 0, + "content": "[66] Linzheng Chai, Jian Yang, Tao Sun, Hongcheng Guo, Jiaheng Liu, Bing Wang, Xiannian Liang, Jiaqi Bai, Tongliang Li, Qiyao Peng, et al. xcot: Cross-lingual instruction tuning for cross-lingual chain-of-thought reasoning. arXiv preprint arXiv:2401.07037, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.32, + 0.827, + 0.375 + ], + "angle": 0, + "content": "[67] Jun Shern Chan, Neil Chowdhury, Oliver Jaffe, James Aung, Dane Sherburn, Evan Mays, Giulio Starace, Kevin Liu, Leon Maksin, Tejal Patwardhan, et al. Mle-bench: Evaluating machine learning agents on machine learning engineering. arXiv preprint arXiv:2410.07095, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.381, + 0.827, + 0.436 + ], + "angle": 0, + "content": "[68] Hyeong Soo Chang. On the convergence rate of mcts for the optimal value estimation in markov decision processes. IEEE Transactions on Automatic Control, pages 1-6, February 2025. doi: 10.1109/TAC.2025.3538807. URL https://ieeexplore.ieee.org/document/10870057." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.441, + 0.825, + 0.485 + ], + "angle": 0, + "content": "[69] Aili Chen, Aonian Li, Bangwei Gong, Binyang Jiang, Bo Fei, Bo Yang, Boji Shan, Changqing Yu, Chao Wang, Cheng Zhu, et al. Minimax-m1: Scaling test-time compute efficiently with lightning attention. arXiv preprint arXiv:2506.13585, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.488, + 0.827, + 0.531 + ], + "angle": 0, + "content": "[70] Andong Chen, Yuchen Song, Wenxin Zhu, Kehai Chen, Muyun Yang, Tiejun Zhao, et al. Evaluating o1-like llms: Unlocking reasoning for translation through comprehensive analysis. arXiv preprint arXiv:2502.11544, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.534, + 0.827, + 0.577 + ], + "angle": 0, + "content": "[71] Beiduo Chen, Yang Janet Liu, Anna Korhonen, and Barbara Plank. Threading the needle: Reweaving chain-of-thought reasoning to explain human label variation. arXiv preprint arXiv:2505.23368, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.581, + 0.825, + 0.625 + ], + "angle": 0, + "content": "[72] Guizhen Chen, Weiwen Xu, Hao Zhang, Hou Pong Chan, Chaoqun Liu, Lidong Bing, Deli Zhao, Anh Tuan Luu, and Yu Rong. Finereason: Evaluating and improving llms' deliberate reasoning through reflective puzzle solving. arXiv preprint arXiv:2502.20238, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.628, + 0.827, + 0.713 + ], + "angle": 0, + "content": "[73] Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Step-level value preference optimization for mathematical reasoning. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 7889-7903, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.463. URL https://aclanthology.org/2024_findings-emnlp.463/." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.716, + 0.825, + 0.772 + ], + "angle": 0, + "content": "[74] Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Alphamath almost zero: Process supervision without process. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=VaXnxQ3UKo." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.776, + 0.827, + 0.832 + ], + "angle": 0, + "content": "[75] Haibin Chen, Kangtao Lv, Chengwei Hu, Yanshi Li, Yujin Yuan, Yancheng He, Xingyao Zhang, Langming Liu, Shilei Liu, Wenbo Su, et al. Chineseecomqa: A scalable e-commerce concept evaluation benchmark for large language models. arXiv preprint arXiv:2502.20196, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.837, + 0.827, + 0.88 + ], + "angle": 0, + "content": "[76] Hanjie Chen, Zhouxiang Fang, Yash Singla, and Mark Dredze. Benchmarking large language models on answering and explaining challenging medical questions. arXiv preprint arXiv:2402.18060, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.192, + 0.884, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[77] Haolin Chen, Yihao Feng, Zuxin Liu, Weiran Yao, Akshara Prabhakar, Shelby Heinecke, Ricky Ho, Phil Mui, Silvio Savarese, Caiming Xiong, et al. Language models are hid" + }, + { + "type": "list", + "bbox": [ + 0.19, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.507, + 0.948 + ], + "angle": 0, + "content": "41" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.091, + 0.826, + 0.12 + ], + "angle": 0, + "content": "den reasoners: Unlocking latent reasoning capabilities via self-rewarding. arXiv preprint arXiv:2411.04282, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.188, + 0.125, + 0.827, + 0.169 + ], + "angle": 0, + "content": "[78] Hardy Chen, Haoqin Tu, Hui Liu, Xianfeng Tang, Xinya Du, Yuyin Zhou, and Cihang Xie. VI-thinking: An r1-derived visual instruction tuning dataset for thinkable lvlms. https://github.com/UCSC-VLAA/VL-Thinkinq, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.174, + 0.826, + 0.204 + ], + "angle": 0, + "content": "[79] Jian Chen, Guohao Tang, Guofu Zhou, and Wu Zhu. Chatgpt and deepseek: Can they predict the stock market and macroeconomy? arXiv preprint arXiv:2502.10008, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.209, + 0.827, + 0.263 + ], + "angle": 0, + "content": "[80] Jianhao Chen, Zishuo Xun, Bocheng Zhou, Han Qi, Qiaosheng Zhang, Yang Chen, Wei Hu, Yuzhong Qu, Wanli Ouyang, and Shuyue Hu. Do we truly need so many samples? multi-llm repeated sampling efficiently scale test-time compute. arXiv preprint arXiv:2504.00762, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.27, + 0.827, + 0.312 + ], + "angle": 0, + "content": "[81] Jiefeng Chen, Jie Ren, Xinyun Chen, Chengrun Yang, Ruoxi Sun, and Sercan Ö Arik. Sets: Leveraging self-verification and self-correction for improved test-time scaling. arXiv preprint arXiv:2501.19306, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.318, + 0.825, + 0.362 + ], + "angle": 0, + "content": "[82] Jierun Chen, Tiezheng Yu, Haoli Bai, Lewei Yao, Jiannan Wu, Kaican Li, Fei Mi, Chaofan Tao, Lei Zhu, Manyi Zhang, et al. The synergy dilemma of long-cot sft and rl: Investigating post-training techniques for reasoning vlms. arXiv preprint arXiv:2507.07562, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.367, + 0.825, + 0.41 + ], + "angle": 0, + "content": "[83] Junying Chen, Zhenyang Cai, Ke Ji, Xidong Wang, Wanlong Liu, Rongsheng Wang, Jianye Hou, and Benyou Wang. Huatuogpt-o1, towards medical complex reasoning with llms. arXiv preprint arXiv:2412.18925, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.415, + 0.825, + 0.458 + ], + "angle": 0, + "content": "[84] Justin Chih-Yao Chen, Archiki Prasad, Swarnadeep Saha, Elias Stengel-Eskin, and Mohit Bansal. Magicore: Multi-agent, iterative, coarse-to-fine refinement for reasoning. arXiv preprint arXiv:2409.12147, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.464, + 0.827, + 0.507 + ], + "angle": 0, + "content": "[85] Kedi Chen, Zhikai Lei, Fan Zhang, Yinqi Zhang, Qin Chen, Jie Zhou, Liang He, Qipeng Guo, Kai Chen, and Wei Zhang. Code-driven inductive synthesis: Enhancing reasoning abilities of large language models with sequences. arXiv preprint arXiv:2503.13109, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.512, + 0.829, + 0.554 + ], + "angle": 0, + "content": "[86] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025. Accessed: 2025-02-02." + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.56, + 0.827, + 0.602 + ], + "angle": 0, + "content": "[87] Michael K Chen, Xikun Zhang, and Dacheng Tao. Justlogic: A comprehensive benchmark for evaluating deductive reasoning in large language models. arXiv preprint arXiv:2501.14851, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.608, + 0.825, + 0.651 + ], + "angle": 0, + "content": "[88] Mingyang Chen, Tianpeng Li, Haoze Sun, Yijie Zhou, Chenzheng Zhu, Fan Yang, Zenan Zhou, Weipeng Chen, Haofen Wang, Jeff Z Pan, et al. Learning to reason with search for llms via reinforcement learning. arXiv preprint arXiv:2503.19470, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.656, + 0.825, + 0.686 + ], + "angle": 0, + "content": "[89] Nuo Chen, Zhiyuan Hu, Qingyun Zou, Jiaying Wu, Qian Wang, Bryan Hooi, and Bingsheng He. Judgerm: Large reasoning models as a judge. arXiv preprint arXiv:2504.00050, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.691, + 0.827, + 0.747 + ], + "angle": 0, + "content": "[90] Qiguang Chen, Libo Qin, Jiaqi WANG, Jingxuan Zhou, and Wanxiang Che. Unlocking the capabilities of thought: A reasoning boundary framework to quantify and optimize chain-of-thought. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=pC44UMwy2v." + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.753, + 0.827, + 0.851 + ], + "angle": 0, + "content": "[91] Qiguang Chen, Libo Qin, Jin Zhang, Zhi Chen, Xiao Xu, and Wanxiang Che. \\(\\mathbf{M}^{3}\\mathrm{CoT}\\): A novel benchmark for multi-domain multi-step multi-modal chain-of-thought. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 8199–8221, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.446. URL https://aclanthology.org/2024.acl-long.446/." + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.856, + 0.827, + 0.911 + ], + "angle": 0, + "content": "[92] Qiguang Chen, Libo Qin, Jinhao Liu, Yue Liao, Jiaqi Wang, Jingxuan Zhou, and Wanxiang Che. Rbf++: Quantifying and optimizing reasoning boundaries across measurable and unmeasurable capabilities for chain-of-thought reasoning. arXiv preprint arXiv:2505.13307, 2025." + }, + { + "type": "list", + "bbox": [ + 0.188, + 0.091, + 0.829, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "42" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.19, + 0.091, + 0.826, + 0.148 + ], + "angle": 0, + "content": "[93] Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiaqi Wang, Mengkang Hu, Zhi Chen, Wanxiang Che, and Ting Liu. Ecm: A unified electronic circuit model for explaining the emergence of in-context learning and chain-of-thought in large language model. arXiv preprint arXiv:2502.03325, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.151, + 0.825, + 0.194 + ], + "angle": 0, + "content": "[94] Qiguang Chen, Mingda Yang, Libo Qin, Jinhao Liu, Zheng Yan, Jiannan Guan, Dengyun Peng, Yiyan Ji, Hanjing Li, Mengkang Hu, et al. Ai4research: A survey of artificial intelligence for scientific research. arXiv preprint arXiv:2507.01903, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.198, + 0.826, + 0.241 + ], + "angle": 0, + "content": "[95] Qiqi Chen, Xinpeng Wang, Philipp Mondorf, Michael A Hedderich, and Barbara Plank. Understanding when tree of thoughts succeeds: Larger models excel in generation, not discrimination. arXiv preprint arXiv:2410.17820, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.244, + 0.827, + 0.288 + ], + "angle": 0, + "content": "[96] Shiqi Chen, Jinghan Zhang, Tongyao Zhu, Wei Liu, Siyang Gao, Miao Xiong, Manling Li, and Junxian He. Bring reason to vision: Understanding perception and reasoning through model merging. arXiv preprint arXiv:2505.05464, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.291, + 0.825, + 0.334 + ], + "angle": 0, + "content": "[97] Shuang Chen, Yue Guo, Zhaochen Su, Yafu Li, Yulun Wu, Jiacheng Chen, Jiayu Chen, Weijie Wang, Xiaoye Qu, and Yu Cheng. Advancing multimodal reasoning: From optimized cold start to staged reinforcement learning. arXiv preprint arXiv:2506.04207, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.337, + 0.828, + 0.421 + ], + "angle": 0, + "content": "[98] Sijia Chen and Baochun Li. Toward adaptive reasoning in large language models with thought rollback. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 7033-7056. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/chen24y.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.191, + 0.424, + 0.825, + 0.453 + ], + "angle": 0, + "content": "[99] Weizhe Chen, Sven Koenig, and Bistra Dilkina. Iterative deepening sampling for large language models. arXiv preprint arXiv:2502.05449, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.457, + 0.826, + 0.514 + ], + "angle": 0, + "content": "[100] Wenhu Chen, Xueguang Ma, Xinyi Wang, and William W. Cohen. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks. Transactions on Machine Learning Research, November 2023. ISSN 2835-8856. URL https://openreview.net/forum?id=YfZ4ZPt8zd." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.517, + 0.825, + 0.56 + ], + "angle": 0, + "content": "[101] Wenxiang Chen, Wei He, Zhiheng Xi, Honglin Guo, Boyang Hong, Jiazheng Zhang, Rui Zheng, Nijun Li, Tao Gui, Yun Li, et al. Better process supervision with bi-directional rewarding signals. arXiv preprint arXiv:2503.04618, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.564, + 0.825, + 0.607 + ], + "angle": 0, + "content": "[102] Xinghao Chen, Zhijing Sun, Wenjin Guo, Miaoran Zhang, Yanjun Chen, Yirong Sun, Hui Su, Yijie Pan, Dietrich Klakow, Wenjie Li, et al. Unveiling the key factors for distilling chain-of-thought reasoning. arXiv preprint arXiv:2502.18001, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.61, + 0.825, + 0.653 + ], + "angle": 0, + "content": "[103] Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for \\(2 + 3 = ?\\) on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.656, + 0.825, + 0.699 + ], + "angle": 0, + "content": "[104] Xinyun Chen, Maxwell Lin, Nathanael Scharli, and Denny Zhou. Teaching large language models to self-debug. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=KuPixIqPiq." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.703, + 0.827, + 0.773 + ], + "angle": 0, + "content": "[105] Yanda Chen, Joe Benton, Ansh Radhakrishnan, Jonathan Uesato Carson Denison, John Schulman, Arushi Somani, Peter Hase, Misha Wagner Fabien Roger Vlad Mikulik, Sam Bowman, Jan Leike Jared Kaplan, et al. Reasoning models don't always say what they think. April 2025. URL https://www.anthropic.com/research/reasoning-models-dont-say-think." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.776, + 0.825, + 0.819 + ], + "angle": 0, + "content": "[106] Yanxi Chen, Xuchen Pan, Yaliang Li, Bolin Ding, and Jingren Zhou. A simple and provable scaling law for the test-time compute of large language models. arXiv preprint arXiv:2411.19477, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.823, + 0.827, + 0.865 + ], + "angle": 0, + "content": "[107] Yezeng Chen, Zui Chen, and Yi Zhou. Brain-inspired two-stage approach: Enhancing mathematical reasoning by imitating human thought processes. arXiv preprint arXiv:2403.00800, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.869, + 0.827, + 0.911 + ], + "angle": 0, + "content": "[108] Yihang Chen, Haikang Deng, Kaiqiao Han, and Qingyue Zhao. Policy frameworks for transparent chain-of-thought reasoning in large language models. arXiv preprint arXiv:2503.14521, 2025." + }, + { + "type": "list", + "bbox": [ + 0.183, + 0.091, + 0.828, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "43" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.091, + 0.828, + 0.147 + ], + "angle": 0, + "content": "[109] Yilong Chen, Junyuan Shang, Zhenyu Zhang, Yanxi Xie, Jiawei Sheng, Tingwen Liu, Shuo-huan Wang, Yu Sun, Hua Wu, and Haifeng Wang. Inner thinking transformer: Leveraging dynamic depth scaling to foster adaptive internal thinking. arXiv preprint arXiv:2502.13842, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.152, + 0.827, + 0.208 + ], + "angle": 0, + "content": "[110] Zhenfang Chen, Delin Chen, Rui Sun, Wenjun Liu, and Chuang Gan. Scaling autonomous agents via automatic reward modeling and planning. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=womU9cEwcO." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.213, + 0.827, + 0.268 + ], + "angle": 0, + "content": "[111] Zhi Chen, Qiguang Chen, Libo Qin, Qipeng Guo, Haijun Lv, Yicheng Zou, Wanxiang Che, Hang Yan, Kai Chen, and Dahua Lin. What are the essential factors in crafting effective long context multi-hop instruction datasets? insights and best practices. arXiv preprint arXiv:2409.01893, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.273, + 0.825, + 0.316 + ], + "angle": 0, + "content": "[112] Zihan Chen, Song Wang, Zhen Tan, Xingbo Fu, Zhenyu Lei, Peng Wang, Huan Liu, Cong Shen, and Jundong Li. A survey of scaling in large language model reasoning. arXiv preprint arXiv:2504.02181, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.32, + 0.827, + 0.404 + ], + "angle": 0, + "content": "[113] Ziru Chen, Michael White, Ray Mooney, Ali Payani, Yu Su, and Huan Sun. When is tree search useful for LLM planning? it depends on the discriminator. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 13659–13678, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.738. URL https://aclanthology.org/2024.acl-long.738/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.408, + 0.827, + 0.492 + ], + "angle": 0, + "content": "[114] Zixiang Chen, Yihe Deng, Huizhuo Yuan, Kaixuan Ji, and Quanquan Gu. Self-play fine-tuning converts weak language models to strong language models. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 6621-6642. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/chen24j.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.496, + 0.825, + 0.539 + ], + "angle": 0, + "content": "[115] Zui Chen, Tianqiao Liu, Mi Tian, Qing Tong, Weiqi Luo, and Zitao Liu. Advancing math reasoning in language models: The impact of problem-solving data, data synthesis methods, and training stages. arXiv preprint arXiv:2501.14002, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.543, + 0.825, + 0.585 + ], + "angle": 0, + "content": "[116] Daixuan Cheng, Shaohan Huang, Xuekai Zhu, Bo Dai, Wayne Xin Zhao, Zhenliang Zhang, and Furu Wei. Reasoning with exploration: An entropy perspective. arXiv preprint arXiv:2506.14758, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.589, + 0.825, + 0.632 + ], + "angle": 0, + "content": "[117] Jiahao Cheng, Tiancheng Su, Jia Yuan, Guoxiu He, Jiawei Liu, Xinqi Tao, Jingwen Xie, and Huaxia Li. Chain-of-thought prompting obscures hallucination cues in large language models: An empirical evaluation. arXiv preprint arXiv:2506.17088, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.636, + 0.825, + 0.691 + ], + "angle": 0, + "content": "[118] Jiale Cheng, Xiao Liu, Cunxiang Wang, Xiaotao Gu, Yida Lu, Dan Zhang, Yuxiao Dong, Jie Tang, Hongning Wang, and Minlie Huang. Spar: Self-play with tree-search refinement to improve instruction-following in large language models. arXiv preprint arXiv:2412.11605, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.697, + 0.825, + 0.739 + ], + "angle": 0, + "content": "[119] Junhang Cheng, Fang Liu, Chengru Wu, and Li Zhang. Adaptivellm: A framework for selecting optimal cost-efficient llm for code-generation based on cot length. arXiv preprint arXiv:2506.10525, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.743, + 0.827, + 0.785 + ], + "angle": 0, + "content": "[120] Kanzhi Cheng, Yantao Li, Fangzhi Xu, Jianbing Zhang, Hao Zhou, and Yang Liu. Vision-language models can self-improve reasoning via reflection. arXiv preprint arXiv:2411.00855, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.79, + 0.825, + 0.832 + ], + "angle": 0, + "content": "[121] Xiaoxue Cheng, Junyi Li, Wayne Xin Zhao, and Ji-Rong Wen. Think more, hallucinate less: Mitigating hallucinations via dual process of fast and slow thinking. arXiv preprint arXiv:2501.01306, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.837, + 0.825, + 0.866 + ], + "angle": 0, + "content": "[122] Zhengxiang Cheng, Dongping Chen, Mingyang Fu, and Tianyi Zhou. Optimizing length compression in large reasoning models. arXiv preprint arXiv:2506.14755, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[123] Zhoujun Cheng, Haoyu Dong, Zhiruo Wang, Ran Jia, Jiaqi Guo, Yan Gao, Shi Han, JianGuang Lou, and Dongmei Zhang. Hitab: A hierarchical table dataset for question answering and natural language generation. arXiv preprint arXiv:2108.06712, 2021." + }, + { + "type": "list", + "bbox": [ + 0.183, + 0.091, + 0.828, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "44" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[124] Zhoujun Cheng, Shibo Hao, Tianyang Liu, Fan Zhou, Yutao Xie, Feng Yao, Yuexin Bian, Yonghao Zhuang, Nilabjo Dey, Yuheng Zha, et al. Revisiting reinforcement learning for llm reasoning from a cross-domain perspective. arXiv preprint arXiv:2506.14965, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.137, + 0.825, + 0.182 + ], + "angle": 0, + "content": "[125] Zihui Cheng, Qiguang Chen, Jin Zhang, Hao Fei, Xiaocheng Feng, Wanxiang Che, Min Li, and Libo Qin. Comt: A novel benchmark for chain of multi-modal thought on large vision-language models. arXiv preprint arXiv:2412.12932, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.184, + 0.827, + 0.227 + ], + "angle": 0, + "content": "[126] Zihui Cheng, Qiguang Chen, Xiao Xu, Jiaqi Wang, Weiyun Wang, Hao Fei, Yidong Wang, Alex Jinpeng Wang, Zhi Chen, Wanxiang Che, et al. Visual thoughts: A unified perspective of understanding multimodal chain-of-thought. arXiv preprint arXiv:2505.15510, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.23, + 0.825, + 0.259 + ], + "angle": 0, + "content": "[127] Ethan Chern, Zhulin Hu, Steffi Chern, Siqi Kou, Jiadi Su, Yan Ma, Zhijie Deng, and Pengfei Liu. Thinking with generated images. arXiv preprint arXiv:2505.22525, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.262, + 0.827, + 0.347 + ], + "angle": 0, + "content": "[128] Yew Ken Chia, Vernon Toh, Deepanway Ghosal, Lidong Bing, and Soujanya Poria. PuzzleVQA: Diagnosing multimodal reasoning challenges of language models with abstract visual patterns. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 16259–16273, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.962. URL https://aclanthology.org/2024-findings-acl.962/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.35, + 0.827, + 0.392 + ], + "angle": 0, + "content": "[129] Daiki Chijiwa, Taku Hasegawa, Kyosuke Nishida, Kuniko Saito, and Susumu Takeuchi. Portable reward tuning: Towards reusable fine-tuning across different pretrained models. arXiv preprint arXiv:2502.12776, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.395, + 0.827, + 0.451 + ], + "angle": 0, + "content": "[130] Daewon Choi, Jimin Lee, Jihoon Tack, Woomin Song, Saket Dingliwal, Sai Muralidhar Jayanthi, Bhavana Ganesh, Jinwoo Shin, Aram Galstyan, and Sravan Babu Bodapati. Think clearly: Improving reasoning via redundant token pruning. arXiv preprint arXiv:2507.08806, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.455, + 0.821, + 0.471 + ], + "angle": 0, + "content": "[131] François Chollet. On the measure of intelligence. arXiv preprint arXiv:1911.01547, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.474, + 0.827, + 0.503 + ], + "angle": 0, + "content": "[132] Francois Chollet, Mike Knoop, Gregory Kamradt, and Bryan Landers. Arc prize 2024: Technical report. arXiv preprint arXiv:2412.04604, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.506, + 0.827, + 0.548 + ], + "angle": 0, + "content": "[133] Francois Chollet, Mike Knoop, Gregory Kamradt, Bryan Landers, and Henry Pinkard. Arcagi-2: A new challenge for frontier ai reasoning systems. arXiv preprint arXiv:2505.11831, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.552, + 0.825, + 0.581 + ], + "angle": 0, + "content": "[134] Sanjiban Choudhury. Process reward models for llm agents: Practical framework and directions. arXiv preprint arXiv:2502.10325, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.584, + 0.827, + 0.613 + ], + "angle": 0, + "content": "[135] Jishnu Ray Chowdhury and Cornelia Caragea. Zero-shot verification-guided chain of thoughts. arXiv preprint arXiv:2501.13122, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.617, + 0.827, + 0.659 + ], + "angle": 0, + "content": "[136] Konstantina Christakopoulou, Shibl Mourad, and Maja Mataric. Agents thinking fast and slow: A talker-reasoner architecture. In NeurIPS 2024 Workshop on Open-World Agents, October 2024. URL https://openreview.net/forum?id=xPhcP6rbI4." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.663, + 0.827, + 0.706 + ], + "angle": 0, + "content": "[137] Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V Le, Sergey Levine, and Yi Ma. Sft memorizes, rl generalizes: A comparative study of foundation model post-training. arXiv preprint arXiv:2501.17161, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.709, + 0.827, + 0.807 + ], + "angle": 0, + "content": "[138] Zheng Chu, Jingchang Chen, Qianglong Chen, Weijiang Yu, Tao He, Haotian Wang, Weihua Peng, Ming Liu, Bing Qin, and Ting Liu. Navigate through enigmatic labyrinth a survey of chain of thought reasoning: Advances, frontiers and future. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1173–1203, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.65. URL https://aclanthology.org/2024.acl-long.65/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.81, + 0.827, + 0.853 + ], + "angle": 0, + "content": "[139] Jennifer Chu-Carroll, Andrew Beck, Greg Burnham, David OS Melville, David Nachman, A Erdem Özcan, and David Ferrucci. Beyond llms: Advancing the landscape of complex reasoning. arXiv preprint arXiv:2402.08064, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.856, + 0.827, + 0.911 + ], + "angle": 0, + "content": "[140] Daniel JH Chung, Zhiqi Gao, Yurii Kvasiuk, Tianyi Li, Moritz Munchmeyer, Maja Rudolph, Frederic Sala, and Sai Chaitanya Tadepalli. Theoretical physics benchmark (tpbench)—a dataset and study of ai reasoning capabilities in theoretical physics. arXiv preprint arXiv:2502.15815, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "45" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[141] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.139, + 0.827, + 0.193 + ], + "angle": 0, + "content": "[142] Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, et al. The danger of overthinking: Examining the reasoning-action dilemma in agentic tasks. arXiv preprint arXiv:2502.08235, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.199, + 0.827, + 0.242 + ], + "angle": 0, + "content": "[143] Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, et al. Process reinforcement through implicit rewards. arXiv preprint arXiv:2502.01456, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.246, + 0.825, + 0.289 + ], + "angle": 0, + "content": "[144] Ganqu Cui, Yuchen Zhang, Jiacheng Chen, Lifan Yuan, Zhi Wang, Yuxin Zuo, Haozhan Li, Yuchen Fan, Huayu Chen, Weize Chen, et al. The entropy mechanism of reinforcement learning for reasoning language models. arXiv preprint arXiv:2505.22617, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.293, + 0.827, + 0.348 + ], + "angle": 0, + "content": "[145] Yingqian Cui, Pengfei He, Jingying Zeng, Hui Liu, Xianfeng Tang, Zhenwei Dai, Yan Han, Chen Luo, Jing Huang, Zhen Li, et al. Stepwise perplexity-guided refinement for efficient chain-of-thought reasoning in large language models. arXiv preprint arXiv:2502.13260, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.354, + 0.825, + 0.383 + ], + "angle": 0, + "content": "[146] Yu Cui and Cong Zuo. Practical reasoning interruption attacks on reasoning large language models. arXiv preprint arXiv:2505.06643, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.387, + 0.825, + 0.429 + ], + "angle": 0, + "content": "[147] Yu Cui, Bryan Hooi, Yujun Cai, and Yiwei Wang. Process or result? manipulated ending tokens can mislead reasoning lms to ignore the correct reasoning steps. arXiv preprint arXiv:2503.19326, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.434, + 0.825, + 0.476 + ], + "angle": 0, + "content": "[148] Jianbo Dai, Jianqiao Lu, Yunlong Feng, Dong Huang, Guangtao Zeng, Rongju Ruan, Ming Cheng, Haochen Tan, and Zhijiang Guo. Mhpp: Exploring the capabilities and limitations of language models beyond basic code generation. arXiv preprint arXiv:2405.11430, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.481, + 0.827, + 0.523 + ], + "angle": 0, + "content": "[149] Jisheng Dang, Jingze Wu, Teng Wang, Xuanhui Lin, Nannan Zhu, Hongbo Chen, Wei-Shi Zheng, Meng Wang, and Tat-Seng Chua. Reinforcing video reasoning with focused thinking. arXiv preprint arXiv:2505.24718, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.528, + 0.825, + 0.556 + ], + "angle": 0, + "content": "[150] Quy-Anh Dang and Chris Ngo. Reinforcement learning for reasoning in small llms: What works and what doesn't. arXiv preprint arXiv:2503.16219, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.561, + 0.825, + 0.59 + ], + "angle": 0, + "content": "[151] Yuntian Deng, Yejin Choi, and Stuart Shieber. From explicit cot to implicit cot: Learning to internalize cot step by step. arXiv preprint arXiv:2405.14838, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.594, + 0.827, + 0.65 + ], + "angle": 0, + "content": "[152] Lauro Langosco Di Langosco, Jack Koch, Lee D Sharkey, Jacob Pfau, and David Krueger. Goal misgeneralization in deep reinforcement learning. In International Conference on Machine Learning, pages 12004-12019. PMLR, October 2022. URL https://proceedings.mlr.press/v162/langosco22a/langosco22a.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.655, + 0.827, + 0.696 + ], + "angle": 0, + "content": "[153] Bowen Ding, Yuhan Chen, Futing Wang, Lingfeng Ming, and Tao Lin. Do thinking tokens help or trap? towards more efficient large reasoning model. arXiv preprint arXiv:2506.23840, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.701, + 0.825, + 0.744 + ], + "angle": 0, + "content": "[154] Yifu Ding, Wentao Jiang, Shunyu Liu, Yongcheng Jing, Jinyang Guo, Yingjie Wang, Jing Zhang, Zengmao Wang, Ziwei Liu, Bo Du, et al. Dynamic parallel tree search for efficient llm reasoning. arXiv preprint arXiv:2502.16235, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.748, + 0.827, + 0.818 + ], + "angle": 0, + "content": "[155] Hanze Dong, Wei Xiong, Deepanshu Goyal, Yihan Zhang, Winnie Chow, Rui Pan, Shizhe Diao, Jipeng Zhang, KaShun SHUM, and Tong Zhang. RAFT: Reward ranked finetuning for generative foundation model alignment. Transactions on Machine Learning Research, November 2023. ISSN 2835-8856. URL https://openreview.net/forum?id=m7p507zb1Y." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.823, + 0.825, + 0.865 + ], + "angle": 0, + "content": "[156] Hanze Dong, Wei Xiong, Bo Pang, Haoxiang Wang, Han Zhao, Yingbo Zhou, Nan Jiang, Doyen Sahoo, Caiming Xiong, and Tong Zhang. Rlhf workflow: From reward modeling to online rlhf. arXiv preprint arXiv:2405.07863, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.87, + 0.825, + 0.911 + ], + "angle": 0, + "content": "[157] Junnan Dong, Zijin Hong, Yuanchen Bei, Feiran Huang, Xinrun Wang, and Xiao Huang. Clr-bench: Evaluating large language models in college-level reasoning. arXiv preprint arXiv:2410.17558, 2024." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "46" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.121 + ], + "angle": 0, + "content": "[158] Kefan Dong and Tengyu Ma. Beyond limited data: Self-play ltm theorem provers with iterative conjecturing and proving. arXiv preprint arXiv:2502.00212, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.125, + 0.826, + 0.168 + ], + "angle": 0, + "content": "[159] Yuhao Dong, Zuyan Liu, Hai-Long Sun, Jingkang Yang, Winston Hu, Yongming Rao, and Ziwei Liu. Insight-v: Exploring long-chain visual reasoning with multimodal large language models. arXiv preprint arXiv:2411.14432, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.173, + 0.825, + 0.202 + ], + "angle": 0, + "content": "[160] Zhichen Dong, Zhanhui Zhou, Zhixuan Liu, Chao Yang, and Chaochao Lu. Emergent response planning in lIm. arXiv preprint arXiv:2502.06258, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.207, + 0.826, + 0.248 + ], + "angle": 0, + "content": "[161] Shihan Dou, Yan Liu, Haoxiang Jia, Limao Xiong, Enyu Zhou, Wei Shen, Junjie Shan, Caishuang Huang, Xiao Wang, Xiaoran Fan, et al. Stepcoder: Improve code generation with reinforcement learning from compiler feedback. arXiv preprint arXiv:2402.01391, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.254, + 0.827, + 0.297 + ], + "angle": 0, + "content": "[162] Iddo Drori, Gaston Longhitano, Mao Mao, Seunghwan Hyun, Yuke Zhang, Sungjun Park, Zachary Meeks, Xin-Yu Zhang, Ben Segev, Howard Yong, et al. Diverse inference and verification for advanced reasoning. arXiv preprint arXiv:2502.09955, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.301, + 0.827, + 0.356 + ], + "angle": 0, + "content": "[163] Kounianhua Du, Hanjing Wang, Jianxing Liu, Jizheng Chen, Xinyi Dai, Yasheng Wang, Ruiming Tang, Yong Yu, Jun Wang, and Weinan Zhang. Boost, disentangle, and customize: A robust system2-to-system1 pipeline for code generation. arXiv preprint arXiv:2502.12492, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.363, + 0.825, + 0.391 + ], + "angle": 0, + "content": "[164] Weihua Du, Yiming Yang, and Sean Welleck. Optimizing temperature for language models with multi-sample inference. arXiv preprint arXiv:2502.05234, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.396, + 0.826, + 0.438 + ], + "angle": 0, + "content": "[165] Xinrun Du, Yifan Yao, Kaijing Ma, Bingli Wang, Tianyu Zheng, Kang Zhu, Minghao Liu, Yiming Liang, Xiaolong Jin, Zhenlin Wei, et al. Supergpqa: Scaling llm evaluation across 285 graduate disciplines. arXiv preprint arXiv:2502.14739, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.444, + 0.825, + 0.487 + ], + "angle": 0, + "content": "[166] Yifan Du, Zikang Liu, Yifan Li, Wayne Xin Zhao, Yuqi Huo, Bingning Wang, Weipeng Chen, Zheng Liu, Zhongyuan Wang, and Ji-Rong Wen. Virgo: A preliminary exploration on reproducing o1-like mllm. arXiv preprint arXiv:2501.01904, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.491, + 0.826, + 0.533 + ], + "angle": 0, + "content": "[167] Keyu Duan, Zichen Liu, Xin Mao, Tianyu Pang, Changyu Chen, Qiguang Chen, Michael Qizhe Shieh, and Longxu Dou. Efficient process reward model training via active learning. arXiv preprint arXiv:2504.10559, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.539, + 0.827, + 0.581 + ], + "angle": 0, + "content": "[168] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.586, + 0.827, + 0.642 + ], + "angle": 0, + "content": "[169] Subhabrata Dutta, Joykirat Singh, Soumen Chakrabarti, and Tanmoy Chakraborty. How to think step-by-step: A mechanistic understanding of chain-of-thought reasoning. Transactions on Machine Learning Research, July 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=uHLDkQVtyC." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.647, + 0.825, + 0.69 + ], + "angle": 0, + "content": "[170] Ahmed El-Kishky, Alexander Wei, Andre Saraiva, Borys Minaev, Daniel Selsam, David Dohan, Francis Song, Hunter Lightman, Ignasi Clavera, Jakub Pachocki, et al. Competitive programming with large reasoning models. arXiv preprint arXiv:2502.06807, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.694, + 0.827, + 0.723 + ], + "angle": 0, + "content": "[171] Kawin Ethayarajh, Winnie Xu, Niklas Muennighoff, Dan Jurafsky, and Douwe Kiela. Kto: Model alignment as prospect theoretic optimization. arXiv preprint arXiv:2402.01306, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.728, + 0.827, + 0.77 + ], + "angle": 0, + "content": "[172] Chongyu Fan, Yihua Zhang, Jinghan Jia, Alfred Hero, and Sijia Liu. Cyclicreflex: Improving large reasoning models via cyclical reflection token scheduling. arXiv preprint arXiv:2506.11077, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.776, + 0.827, + 0.816 + ], + "angle": 0, + "content": "[173] Siqi Fan, Peng Han, Shuo Shang, Yequan Wang, and Aixin Sun. Cothink: Token-efficient reasoning via instruct models guiding reasoning models. arXiv preprint arXiv:2505.22017, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.823, + 0.825, + 0.864 + ], + "angle": 0, + "content": "[174] Tiantian Fan, Lingjun Liu, Yu Yue, Jiaze Chen, Chengyi Wang, Qiying Yu, Chi Zhang, Zhiqi Lin, Ruofei Zhu, Yufeng Yuan, et al. Truncated proximal policy optimization. arXiv preprint arXiv:2506.15050, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.827, + 0.912 + ], + "angle": 0, + "content": "[175] Yi Fang, Wenjie Wang, Yang Zhang, Fengbin Zhu, Qifan Wang, Fuli Feng, and Xiangnan He. Large language models for recommendation with deliberative user preference alignment. arXiv preprint arXiv:2502.02061, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "47" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[176] Wu Fei, Hao Kong, Shuxian Liang, Yang Lin, Yibo Yang, Jing Tang, Lei Chen, and Xiansheng Hua. Self-guided process reward optimization with masked step advantage for process reinforcement learning. arXiv preprint arXiv:2507.01551, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.137, + 0.827, + 0.194 + ], + "angle": 0, + "content": "[177] Guhao Feng, Bohang Zhang, Yuntian Gu, Haotian Ye, Di He, and Liwei Wang. Towards revealing the mystery behind chain of thought: A theoretical perspective. In Thirty-seventh Conference on Neural Information Processing Systems, September 2023. URL https://openreview.net/forum?id=qHrADgAdYu." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.198, + 0.825, + 0.241 + ], + "angle": 0, + "content": "[178] Jiazhan Feng, Shijue Huang, Xingwei Qu, Ge Zhang, Yujia Qin, Baoquan Zhong, Chengquan Jiang, Jinxin Chi, and Wanjun Zhong. Retool: Reinforcement learning for strategic tool use in llms. arXiv preprint arXiv:2504.11536, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.244, + 0.826, + 0.288 + ], + "angle": 0, + "content": "[179] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Junfei Wu, Xiaoying Zhang, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.291, + 0.825, + 0.32 + ], + "angle": 0, + "content": "[180] Sicheng Feng, Gongfan Fang, Xinyin Ma, and Xinchao Wang. Efficient reasoning models: A survey. arXiv preprint arXiv:2504.10903, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.323, + 0.824, + 0.353 + ], + "angle": 0, + "content": "[181] Xiachong Feng, Longxu Dou, and Lingpeng Kong. Reasoning does not necessarily improve role-playing ability. arXiv preprint arXiv:2502.16940, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.356, + 0.827, + 0.398 + ], + "angle": 0, + "content": "[182] Xueyang Feng, Bo Lan, Quanyu Dai, Lei Wang, Jiakai Tang, Xu Chen, Zhenhua Dong, and Ji-Rong Wen. Improving retrospective language agents via joint policy gradient optimization. arXiv preprint arXiv:2503.01490, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.402, + 0.825, + 0.445 + ], + "angle": 0, + "content": "[183] Yichen Feng, Zhangchen Xu, Fengqing Jiang, Yuetai Li, Bhaskar Ramasubramanian, Luyao Niu, Bill Yuchen Lin, and Radha Poovendran. Visualsphinx: Large-scale synthetic vision logic puzzles for rl. arXiv preprint arXiv:2505.23977, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.448, + 0.827, + 0.546 + ], + "angle": 0, + "content": "[184] Chrisantha Fernando, Dylan Sunil Banarse, Henryk Michalewski, Simon Osindero, and Tim Rocktäschel. Promptbreeder: Self-referential self-improvement via prompt evolution. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 13481-13544. PMLR, 21-27 Jul 2024. URL https://proceedings.mlrpress/v235/fernando24a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.55, + 0.827, + 0.58 + ], + "angle": 0, + "content": "[185] Mohamed Amine Ferrag, Norbert Tihanyi, and Merouane Debbah. Reasoning beyond limits: Advances and open problems for lms. arXiv preprint arXiv:2503.22732, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.582, + 0.827, + 0.667 + ], + "angle": 0, + "content": "[186] Thomas Palmeira Ferraz, Kartik Mehta, Yu-Hsiang Lin, Haw-Shiuan Chang, Shereen Oraby, Sijia Liu, Vivek Subramanian, Tagyoung Chung, Mohit Bansal, and Nanyun Peng. LLM self-correction with deCRIM: Decompose, critique, and refine for enhanced following of instructions with multiple constraints. In The First Workshop on System-2 Reasoning at Scale, NeurIPS'24, October 2024. URL https://openreview.net/forum?id=RQ6Ff81so0." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.67, + 0.825, + 0.699 + ], + "angle": 0, + "content": "[187] Jiarun Fu, Lizhong Ding, Hao Li, Pengqi Li, Qiuning Wei, and Xu Chen. Unveiling and causalizing cot: A causal perspective. arXiv preprint arXiv:2502.18239, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.703, + 0.827, + 0.759 + ], + "angle": 0, + "content": "[188] Wei Fu, Jiaxuan Gao, Xujie Shen, Chen Zhu, Zhiyu Mei, Chuyi He, Shusheng Xu, Guo Wei, Jun Mei, Jiashu Wang, Tongkai Yang, Binhang Yuan, and Yi Wu. Areal: A large-scale asynchronous reinforcement learning system for language reasoning, 2025. URL https://arxiv.org/abs/2505.24298." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.763, + 0.827, + 0.82 + ], + "angle": 0, + "content": "[189] Yao Fu, Hao Peng, Ashish Sabharwal, Peter Clark, and Tushar Khot. Complexity-based prompting for multi-step reasoning. In The Eleventh International Conference on Learning Representations, February 2023. URL https://openreview.net/forum?id=yf1icZHC-19." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.823, + 0.827, + 0.879 + ], + "angle": 0, + "content": "[190] Yuqian Fu, Tinghong Chen, Jiajun Chai, Xihuai Wang, Songjun Tu, Guojun Yin, Wei Lin, Qichao Zhang, Yuanheng Zhu, and Dongbin Zhao. Srft: A single-stage method with supervised and reinforcement fine-tuning for reasoning. arXiv preprint arXiv:2506.19767, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.883, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[191] Víctor Gallego. Metasc: Test-time safety specification optimization for language models. arXiv preprint arXiv:2502.07985, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "48" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.121 + ], + "angle": 0, + "content": "[192] Zeyu Gan, Yun Liao, and Yong Liu. Rethinking external slow-thinking: From snowball errors to probability of correct reasoning. arXiv preprint arXiv:2501.15602, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.124, + 0.827, + 0.182 + ], + "angle": 0, + "content": "[193] Kanishk Gandhi, Denise HJ Lee, Gabriel Grand, Muxin Liu, Winson Cheng, Archit Sharma, and Noah Goodman. Stream of search (sos): Learning to search in language. In First Conference on Language Modeling, July 2024. URL https://openreview.net/pdf?id=2cop2jmQVL." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.186, + 0.827, + 0.23 + ], + "angle": 0, + "content": "[194] Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars. arXiv preprint arXiv:2503.01307, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.233, + 0.825, + 0.276 + ], + "angle": 0, + "content": "[195] Bofei Gao, Zefan Cai, Runxin Xu, Peiyi Wang, Ce Zheng, Runji Lin, Keming Lu, Junyang Lin, Chang Zhou, Tianyu Liu, and Baobao Chang. The reason behind good or bad: Towards a better mathematical verifier with natural language feedback, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.281, + 0.827, + 0.336 + ], + "angle": 0, + "content": "[196] Bofei Gao, Zefan Cai, Runxin Xu, Peiyi Wang, Ce Zheng, Runji Lin, Keming Lu, Dayiheng Liu, Chang Zhou, Wen Xiao, et al. Llm critics help catch bugs in mathematics: Towards a better mathematical verifier with natural language feedback. arXiv preprint arXiv:2406.14024, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.342, + 0.825, + 0.384 + ], + "angle": 0, + "content": "[197] Jiaxuan Gao, Shusheng Xu, Wenjie Ye, Weilin Liu, Chuyi He, Wei Fu, Zhiyu Mei, Guangju Wang, and Yi Wu. On designing effective rl reward at training time for llm reasoning. arXiv preprint arXiv:2410.15115, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.389, + 0.827, + 0.473 + ], + "angle": 0, + "content": "[198] Luyu Gao, Aman Madaan, Shuyan Zhou, Uri Alon, Pengfei Liu, Yiming Yang, Jamie Callan, and Graham Neubig. PAL: Program-aided language models. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett, editors, Proceedings of the 40th International Conference on Machine Learning, volume 202 of Proceedings of Machine Learning Research, pages 10764–10799. PMLR, 23–29 Jul 2023. URL https://proceedings.mlr.press/v202/gao23f.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.478, + 0.827, + 0.507 + ], + "angle": 0, + "content": "[199] Silin Gao, Antoine Bosselut, Samy Bengio, and Emmanuel Abbe. Augmenting llms' reasoning by reinforcing abstract thinking. arXiv preprint arXiv:2506.07751, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.511, + 0.827, + 0.54 + ], + "angle": 0, + "content": "[200] Tianchen Gao, Jiashun Jin, Zheng Tracy Ke, and Gabriel Moryoussef. A comparison of deepseek and other llms. arXiv preprint arXiv:2502.03688, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.544, + 0.827, + 0.587 + ], + "angle": 0, + "content": "[201] Zitian Gao, Boye Niu, Xuzheng He, Haotian Xu, Hongzhang Liu, Aiwei Liu, Xuming Hu, and Lijie Wen. Interpretable contrastive monte carlo tree search reasoning. arXiv preprint arXiv:2410.01707, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.592, + 0.825, + 0.635 + ], + "angle": 0, + "content": "[202] Yuyao Ge, Shenghua Liu, Yiwei Wang, Lingrui Mei, Lizhe Chen, Baolong Bi, and Xueqi Cheng. Innate reasoning is not enough: In-context learning enhances reasoning large language models with less overthinking. arXiv preprint arXiv:2503.19602, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.639, + 0.827, + 0.681 + ], + "angle": 0, + "content": "[203] Jonas Gehring, Kunhao Zheng, Jade Copet, Vegard Mella, Taco Cohen, and Gabriel Synnaeve. Rlef: Grounding code llms in execution feedback with reinforcement learning. arXiv preprint arXiv:2410.02089, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.686, + 0.827, + 0.741 + ], + "angle": 0, + "content": "[204] Jonas Geiping, Sean McLeish, Neel Jain, John Kirchenbauer, Siddharth Singh, Brian R Bartoldson, Bhavya Kailkhura, Abhinav Bhatele, and Tom Goldstein. Scaling up test-time compute with latent reasoning: A recurrent depth approach. arXiv preprint arXiv:2502.05171, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.747, + 0.827, + 0.804 + ], + "angle": 0, + "content": "[205] Gael Gendron, Qiming Bao, Michael Witbrock, and Gillian Dobbie. Large language models are not strong abstract reasoners. In Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence, IJCAI '24, August 2024. ISBN 978-1-956792-04-1. doi: 10.24963/ijcai.2024/693. URL https://doi.org/10.24963/ijcai.2024/693." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.809, + 0.827, + 0.865 + ], + "angle": 0, + "content": "[206] Zelalem Gero, Chandan Singh, Hao Cheng, Tristan Naumann, Michel Galley, Jianfeng Gao, and Hoifung Poon. Self-verification improves few-shot clinical information extraction. In ICML 3rd Workshop on Interpretable Machine Learning in Healthcare (IMLH), June 2023. URL https://openreview.net/forum?id=SBbJICrg1S." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.827, + 0.911 + ], + "angle": 0, + "content": "[207] Akash Ghosh, Debayan Datta, Sriparna Saha, and Chirag Agarwal. The multilingual mind: A survey of multilingual reasoning in language models. arXiv preprint arXiv:2502.09457, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "49" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.091, + 0.828, + 0.176 + ], + "angle": 0, + "content": "[208] Panagiotis Giadikiaroglou, Maria Lymperaiou, Giorgos Filandrianos, and Giorgos Stamou. Puzzle solving using reasoning of large language models: A survey. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 11574–11591, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.646. URL https://aclanthology.org/2024.emnlp-main.646/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.181, + 0.826, + 0.224 + ], + "angle": 0, + "content": "[209] Alexi Gladstone, Ganesh Nanduru, Md Mofijul Islam, Peixuan Han, Hyeonjeong Ha, Aman Chadha, Yilun Du, Heng Ji, Jundong Li, and Tariq Iqbal. Energy-based transformers are scalable learners and thinkers. arXiv preprint arXiv:2507.02092, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.228, + 0.827, + 0.285 + ], + "angle": 0, + "content": "[210] Elliot Glazer, Ege Erdil, Tamay Besiroglu, Diego Chicharro, Evan Chen, Alex Gunning, Caroline Falkman Olsson, Jean-Stanislas Denain, Anson Ho, Emily de Oliveira Santos, et al. Frontiermath: A benchmark for evaluating advanced mathematical reasoning in ai. arXiv preprint arXiv:2411.04872, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.29, + 0.825, + 0.333 + ], + "angle": 0, + "content": "[211] Team GLM, Aohan Zeng, Bin Xu, Bowen Wang, Chenhui Zhang, Da Yin, Dan Zhang, Diego Rojas, Guanyu Feng, Hanlin Zhao, et al. Chatglm: A family of large language models from glm-130b to glm-4 all tools. arXiv preprint arXiv:2406.12793, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.337, + 0.827, + 0.395 + ], + "angle": 0, + "content": "[212] Olga Golovneva, Moya Peng Chen, Spencer Poff, Martin Corredor, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. ROSCOE: A suite of metrics for scoring step-by-step reasoning. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=xYlJRpzZtsY." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.399, + 0.827, + 0.469 + ], + "angle": 0, + "content": "[213] Olga Golovneva, Sean O'Brien, Ramakanth Pasunuru, Tianlu Wang, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. PATHFINDER: Guided search over multi-step reasoning paths. In R0-FoMo: Robustness of Few-shot and Zero-shot Learning in Large Foundation Models, December 2023. URL https://openreview.net/forum?id=5TsfEEwRsu." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.474, + 0.825, + 0.517 + ], + "angle": 0, + "content": "[214] Ruihan Gong, Yue Liu, Wenjie Qu, Mingzhe Du, Yufei He, Yingwei Ma, Yulin Chen, Xiang Liu, Yi Wen, Xinfeng Li, et al. Efficient reasoning via chain of unconscious thought. arXiv preprint arXiv:2505.19756, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.522, + 0.827, + 0.564 + ], + "angle": 0, + "content": "[215] Juraj Gottweis, Wei-Hung Weng, Alexander Daryin, Tao Tu, Anil Palepu, Petar Sirkovic, Artiom Myaskovsky, Felix Weissenberger, Keran Rong, Ryutaro Tanno, et al. Towards an ai co-scientist. arXiv preprint arXiv:2502.18864, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.57, + 0.825, + 0.612 + ], + "angle": 0, + "content": "[216] Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Nan Duan, and Weizhu Chen. Critic: Large language models can self-correct with tool-interactive critiquing. arXiv preprint arXiv:2305.11738, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.617, + 0.825, + 0.66 + ], + "angle": 0, + "content": "[217] Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Minlie Huang, Nan Duan, and Weizhu Chen. Tora: A tool-integrated reasoning agent for mathematical problem solving. arXiv preprint arXiv:2309.17452, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.665, + 0.825, + 0.708 + ], + "angle": 0, + "content": "[218] Julia Grosse, Ruotian Wu, Ahmad Rashid, Philipp Hennig, Pascal Poupart, and Agustinus Kristiadi. Uncertainty-guided optimization on large language model search trees. arXiv preprint arXiv:2407.03951, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.713, + 0.825, + 0.755 + ], + "angle": 0, + "content": "[219] Yanggan Gu, Junzhuo Li, Sirui Huang, Xin Zou, Zhenghua Li, and Xuming Hu. Capturing nuanced preferences: Preference-aligned distillation for small language models. arXiv preprint arXiv:2502.14272, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.76, + 0.827, + 0.816 + ], + "angle": 0, + "content": "[220] Xinyan Guan, Yanjiang Liu, Xinyu Lu, Boxi Cao, Ben He, Xianpei Han, Le Sun, Jie Lou, Bowen Yu, Yaojie Lu, et al. Search, verify and feedback: Towards next generation post-training paradigm of foundation models via verifier engineering. arXiv preprint arXiv:2411.11504, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.822, + 0.827, + 0.864 + ], + "angle": 0, + "content": "[221] Xinyan Guan, Jiali Zeng, Fandong Meng, Chunlei Xin, Yaojie Lu, Hongyu Lin, Xianpei Han, Le Sun, and Jie Zhou. Deep Learning: Thinking to retrieve step by step for large language models. arXiv preprint arXiv:2502.01142, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.827, + 0.912 + ], + "angle": 0, + "content": "[222] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025." + }, + { + "type": "list", + "bbox": [ + 0.183, + 0.091, + 0.828, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "50" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[223] Etash Guha, Ryan Marten, Sedrick Keh, Negin Raoof, Georgios Smyrnis, Hritik Bansal, Marianna Nezhurina, Jean Mercat, Trung Vu, Zayne Sprague, et al. Openthoughts: Data recipes for reasoning models. arXiv preprint arXiv:2506.04178, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.139, + 0.827, + 0.196 + ], + "angle": 0, + "content": "[224] Aryan Gulati, Brando Miranda, Eric Chen, Emily Xia, Kai Fronsdal, Bruno de Moraes Dumont, and Sanmi Koyejo. Putnam-AXIOM: A functional and static benchmark for measuring higher level mathematical reasoning. In The 4th Workshop on Mathematical Reasoning and AI at NeurIPS'24, 2024. URL https://openreview.net/forum?id=YXnwlZe0yf." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.201, + 0.827, + 0.244 + ], + "angle": 0, + "content": "[225] Caglar Gulcehre, Tom Le Paine, Srivatsan Srinivasan, Ksenia Konyushkova, Lotte Weerts, Abhishek Sharma, Aditya Siddhant, Alex Ahern, Miaosen Wang, Chenjie Gu, et al. Reinforced self-training (rest) for language modeling. arXiv preprint arXiv:2308.08998, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.249, + 0.827, + 0.292 + ], + "angle": 0, + "content": "[226] Daya Guo, Qihao Zhu, Dejian Yang, Zhenda Xie, Kai Dong, Wentao Zhang, Guanting Chen, Xiao Bi, Yu Wu, YK Li, et al. Deepseek-coder: When the large language model meets programming-the rise of code intelligence. arXiv preprint arXiv:2401.14196, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.296, + 0.825, + 0.34 + ], + "angle": 0, + "content": "[227] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.344, + 0.825, + 0.387 + ], + "angle": 0, + "content": "[228] Honglin Guo, Kai Lv, Qipeng Guo, Tianyi Liang, Zhiheng Xi, Demin Song, Qiuyinzhe Zhang, Yu Sun, Kai Chen, Xipeng Qiu, et al. Critiq: Mining data quality criteria from human preferences. arXiv preprint arXiv:2502.19279, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.392, + 0.827, + 0.463 + ], + "angle": 0, + "content": "[229] Kehan Guo, Bozhao Nan, Yujun Zhou, Taicheng Guo, Zhichun Guo, Mihir Surve, Zhenwen Liang, Nitesh V Chawla, Olaf Wiest, and Xiangliang Zhang. Can LLMs solve molecule puzzles? a multimodal benchmark for molecular structure elucidation. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=t1mAxb4Cop." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.467, + 0.825, + 0.511 + ], + "angle": 0, + "content": "[230] Ziyu Guo, Renrui Zhang, Chengzhuo Tong, Zhizheng Zhao, Peng Gao, Hongsheng Li, and Pheng-Ann Heng. Can we generate images with cot? let's verify and reinforce image generation step by step. arXiv preprint arXiv:2501.13926, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.514, + 0.827, + 0.572 + ], + "angle": 0, + "content": "[231] Dongge Han, Menglin Xia, Daniel Madrigal Diaz, Samuel Kessler, Ankur Mallick, Xuchao Zhang, Mirian Del Carmen Hipolito Garcia, Jin Xu, Victor Ruhle, and Saravan Rajmohan. Enhancing reasoning capabilities of small language models with blueprints and prompt template search. arXiv preprint arXiv:2506.08669, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.576, + 0.827, + 0.606 + ], + "angle": 0, + "content": "[232] Tingxu Han, Chunrong Fang, Shiyu Zhao, Shiqing Ma, Zhenyu Chen, and Zhenting Wang. Token-budget-aware lIm reasoning. arXiv preprint arXiv:2412.18547, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.61, + 0.827, + 0.654 + ], + "angle": 0, + "content": "[233] Michael Hanna, Ollie Liu, and Alexandre Variengien. How does GPT-2 compute greater-than?: Interpreting mathematical abilities in a pre-trained language model. September 2023. URL https://openreview.net/forum?id=p4PckNQR8k." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.657, + 0.827, + 0.743 + ], + "angle": 0, + "content": "[234] Shibo Hao, Yi Gu, Haodi Ma, Joshua Hong, Zhen Wang, Daisy Wang, and Zhiting Hu. Reasoning with language model is planning with world model. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 8154-8173, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.507. URL https://aclanthology.org/2023.emnlp-main.507/." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.747, + 0.827, + 0.817 + ], + "angle": 0, + "content": "[235] Shibo Hao, Yi Gu, Haotian Luo, Tianyang Liu, Xiyan Shao, Xinyuan Wang, Shuhua Xie, Haodi Ma, Adithya Samavedhi, Qiyue Gao, Zhen Wang, and Zhiting Hu. LLM reasoners: New evaluation, library, and analysis of step-by-step reasoning with large language models. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=b0y6fbSUG0." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.822, + 0.827, + 0.865 + ], + "angle": 0, + "content": "[236] Shibo Hao, Sainbayar Sukhbaatar, DiJia Su, Xian Li, Zhiting Hu, Jason Weston, and Yuandong Tian. Training large language models to reason in a continuous latent space. arXiv preprint arXiv:2412.06769, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.869, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[237] Yunzhuo Hao, Jiawei Gu, Huichen Will Wang, Linjie Li, Zhengyuan Yang, Lijuan Wang, and Yu Cheng. Can mllms reason in multimodality? emma: An enhanced multimodal reasoning benchmark. arXiv preprint arXiv:2501.05444, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "51" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.091, + 0.828, + 0.161 + ], + "angle": 0, + "content": "[238] Alexander Havrilla, Sharath Chandra Raparthy, Christoforos Nalmpantis, Jane Dwivedi-Yu, Maksym Zhuravinskyi, Eric Hambro, and Roberta Raileanu. GLOre: When, where, and how to improve LLM reasoning via global and local refinements. In *Forty-first International Conference on Machine Learning*, May 2024. URL https://openreview.net/forum?id=LH6R06NxdB." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.165, + 0.828, + 0.278 + ], + "angle": 0, + "content": "[239] Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, Jie Liu, Lei Qi, Zhiyuan Liu, and Maosong Sun. OlympiadBench: A challenging benchmark for promoting AGI with olympiad-level bilingual multimodal scientific problems. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 3828–3850, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.211. URL https://aclanthology.org/2024.acl-long.211/." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.281, + 0.827, + 0.322 + ], + "angle": 0, + "content": "[240] Chengbo He, Bochao Zou, Xin Li, Jiansheng Chen, Junliang Xing, and Huimin Ma. Enhancing llm reasoning with multi-path collaborative reactive and reflection agents. arXiv preprint arXiv:2501.00430, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.326, + 0.825, + 0.369 + ], + "angle": 0, + "content": "[241] Feng He, Zijun Chen, Xinnian Liang, Tingting Ma, Yunqi Qiu, Shuangzhi Wu, and Junchi Yan. Protoreasoning: Prototypes as the foundation for generalizable reasoning in llms. arXiv preprint arXiv:2506.15211, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.372, + 0.828, + 0.457 + ], + "angle": 0, + "content": "[242] Jujie He, Jiacai Liu, Chris Yuhao Liu, Rui Yan, Chaojie Wang, Peng Cheng, Xiaoyu Zhang, Fuxiang Zhang, Jiacheng Xu, Wei Shen, Siyuan Li, Liang Zeng, Tianwen Wei, Cheng Cheng, Bo An, Yang Liu, and Yahui Zhou. Skywork open reasoner series. https://capricious-hydrogen-41c.notion.site/Skywork-Open-Reaonser-Series-1d0bc9ae823a80459b46c149e4f51680, 2025. Note Blog." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.46, + 0.827, + 0.502 + ], + "angle": 0, + "content": "[243] Junda He, Jieke Shi, Terry Yue Zhuo, Christoph Treude, Jiamou Sun, Zhenchang Xing, Xiaoning Du, and David Lo. From code to courtroom: Llms as the new software judges. arXiv preprint arXiv:2503.02246, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.506, + 0.825, + 0.535 + ], + "angle": 0, + "content": "[244] Kang He and Kaushik Roy. Logictree: Structured proof exploration for coherent and rigorous logical reasoning with large language models. arXiv preprint arXiv:2504.14089, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.538, + 0.828, + 0.623 + ], + "angle": 0, + "content": "[245] Mingqian He, Yongliang Shen, Wenqi Zhang, Zeqi Tan, and Weiming Lu. Advancing process verification for large language models via tree-based preference learning. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 2086-2099, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.125. URL https://aclanthology.org/2024.emnlp-main.125/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.626, + 0.827, + 0.696 + ], + "angle": 0, + "content": "[246] Qiangqiang He, Shuwei Qian, Jie Zhang, and Chongjun Wang. Inference retrieval-augmented multi-modal chain-of-thoughts reasoning for language models. In ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5, 2025. doi: 10.1109/ICASSP49660.2025.10888701. URL https://openreview.net/pdf/9a7e7a9787d14ac8302215f8e4ef959606b78a94.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.699, + 0.825, + 0.741 + ], + "angle": 0, + "content": "[247] Shenghua He, Tian Xia, Xuan Zhou, and Hui Wei. Response-level rewards are all you need for online reinforcement learning in llms: A mathematical perspective. arXiv preprint arXiv:2506.02553, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.745, + 0.825, + 0.788 + ], + "angle": 0, + "content": "[248] Tao He, Hao Li, Jingchang Chen, Runxuan Liu, Yixin Cao, Lizi Liao, Zihao Zheng, Zheng Chu, Jiafeng Liang, Ming Liu, et al. A survey on complex reasoning of large language models through the lens of self-evolution. February 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.791, + 0.825, + 0.821 + ], + "angle": 0, + "content": "[249] Xingyang He, Xiao Ling, and Jie Liu. Smartthinker: Learning to compress and preserve reasoning by step-level length control. arXiv preprint arXiv:2507.04348, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.824, + 0.825, + 0.867 + ], + "angle": 0, + "content": "[250] Yancheng He, Shilong Li, Jiaheng Liu, Weixun Wang, Xingyuan Bu, Ge Zhang, Zhongyuan Peng, Zhaoxiang Zhang, Wenbo Su, and Bo Zheng. Can large language models detect errors in long chain-of-thought reasoning? arXiv preprint arXiv:2502.19361, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[251] Yang He, Xiao Ding, Bibo Cai, Yufei Zhang, Kai Xiong, Zhouhao Sun, Bing Qin, and Ting Liu. Self-route: Automatic mode switching via capability estimation for efficient reasoning. arXiv preprint arXiv:2505.20664, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.828, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "52" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.134 + ], + "angle": 0, + "content": "[252] Zhitao He, Sandeep Polisetty, Zhiyuan Fan, Yuchen Huang, Shujin Wu, et al. Mmboundary: Advancing mllm knowledge boundary awareness through reasoning step confidence calibration. arXiv preprint arXiv:2505.23224, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.138, + 0.826, + 0.209 + ], + "angle": 0, + "content": "[253] Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the MATH dataset. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2), October 2021. URL https://openreview.net/forum?id=7Bywt2mQsCe." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.212, + 0.825, + 0.242 + ], + "angle": 0, + "content": "[254] Alex Heyman and Joel Zylberberg. Evaluating the systematic reasoning abilities of large language models through graph coloring. arXiv preprint arXiv:2502.07087, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.246, + 0.825, + 0.275 + ], + "angle": 0, + "content": "[255] Alex Heyman and Joel Zylberberg. Reasoning large language model errors arise from hallucinating critical problem features. arXiv preprint arXiv:2505.12151, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.279, + 0.827, + 0.363 + ], + "angle": 0, + "content": "[256] Namgyu Ho, Laura Schmid, and Se-Young Yun. Large language models are reasoning teachers. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 14852–14882, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.830. URL https://aclanthology.org/2023.acl-long.830/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.367, + 0.827, + 0.41 + ], + "angle": 0, + "content": "[257] Andreas Hochlehnert, Hardik Bhatnagar, Vishaal Udandarao, Samuel Albanie, Ameya Prabhu, and Matthias Bethge. A sober look at progress in language model reasoning: Pitfalls and paths to reproducibility. arXiv preprint arXiv:2504.07086, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.413, + 0.827, + 0.483 + ], + "angle": 0, + "content": "[258] Matthew Douglas Hoffman, Du Phan, david dohan, Sholto Douglas, Tuan Anh Le, Aaron T Parisi, Pavel Sountsov, Charles Sutton, Sharad Vikram, and Rif A. Saurous. Training chain-of-thought via latent-variable inference. In Thirty-seventh Conference on Neural Information Processing Systems, September 2023. URL https://openreview.net/forum?id=a147pIS2Co." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.487, + 0.827, + 0.53 + ], + "angle": 0, + "content": "[259] Ruixin Hong, Xinyu Pang, and Changshui Zhang. Advances in reasoning by prompting large language models: A survey. Cybernetics and Intelligence, pages 1-15, 2024. doi: 10.26599/CAI.2024.9390004." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.534, + 0.826, + 0.62 + ], + "angle": 0, + "content": "[260] Wenyi Hong, Weihan Wang, Qingsong Lv, Jiazheng Xu, Wenmeng Yu, Junhui Ji, Yan Wang, Zihan Wang, Yuxiao Dong, Ming Ding, and Jie Tang. Cogagent: A visual language model for gui agents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14281-14290, June 2024. URL https://openaccess.thecvf.com/content/CVPR2024/papers/Hong_CogAgent_A_Visual_Vocabulary_model_for_GUI_Agents_CVPR_2024_paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.623, + 0.827, + 0.679 + ], + "angle": 0, + "content": "[261] Arian Hosseini, Alessandro Sordoni, Daniel Kenji Toyama, Aaron Courville, and Rishabh Agarwal. Not all LLM reasoners are created equal. In The First Workshop on System-2 Reasoning at Scale, NeurIPS'24, October 2024. URL https://openreview.net/forum?id=aPAWbip1xV." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.683, + 0.826, + 0.74 + ], + "angle": 0, + "content": "[262] Arian Hosseini, Xingdi Yuan, Nikolay Malkin, Aaron Courville, Alessandro Sordoni, and Rishabh Agarwal. V-STar: Training verifiers for self-taught reasoners. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=stmqBSW2dV." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.744, + 0.827, + 0.785 + ], + "angle": 0, + "content": "[263] Bairu Hou, Yang Zhang, Jiabao Ji, Yujuan Liu, Kaizhi Qian, Jacob Andreas, and Shiyu Chang. Thinkprune: Pruning long chain-of-thought of llms via reinforcement learning. arXiv preprint arXiv:2504.01296, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.79, + 0.826, + 0.834 + ], + "angle": 0, + "content": "[264] Zhenyu Hou, Xin Lv, Rui Lu, Jiajie Zhang, Yujiang Li, Zijun Yao, Juanzi Li, Jie Tang, and Yuxiao Dong. Advancing language model reasoning through reinforcement learning and inference scaling. arXiv preprint arXiv:2501.11651, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.837, + 0.826, + 0.866 + ], + "angle": 0, + "content": "[265] Jian Hu. Reinforce++: A simple and efficient approach for aligning large language models. arXiv preprint arXiv:2501.03262, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[266] Jian Hu, Xibin Wu, Zilin Zhu, Xianyu, Weixun Wang, Dehao Zhang, and Yu Cao. Openrlhf: An easy-to-use, scalable and high-performance rlhf framework. arXiv preprint arXiv:2405.11143, 2024." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "53" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.091, + 0.829, + 0.148 + ], + "angle": 0, + "content": "[267] Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. Open-reasoner-zero: An open source approach to scaling reinforcement learning on the base model. https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero, February 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.152, + 0.826, + 0.194 + ], + "angle": 0, + "content": "[268] Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, Xiangyu Zhang, and Heung-Yeung Shum. Open-reasoner-zero: An open source approach to scaling up reinforcement learning on the base model. arXiv preprint arXiv:2503.24290, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.199, + 0.826, + 0.241 + ], + "angle": 0, + "content": "[269] Mengkang Hu, Tianxing Chen, Qiguang Chen, Yao Mu, Wenqi Shao, and Ping Luo. Hiagent: Hierarchical working memory management for solving long-horizon agent tasks with large language model. arXiv preprint arXiv:2408.09559, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.246, + 0.826, + 0.315 + ], + "angle": 0, + "content": "[270] Mengkang Hu, Yao Mu, Xinmiao Chelsey Yu, Mingyu Ding, Shiguang Wu, Wenqi Shao, Qiguang Chen, Bin Wang, Yu Qiao, and Ping Luo. Tree-planner: Efficient close-loop task planning with large language models. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=Glcsg6zOe." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.32, + 0.826, + 0.362 + ], + "angle": 0, + "content": "[271] Mengkang Hu, Pu Zhao, Can Xu, Qingfeng Sun, Jianguang Lou, Qingwei Lin, Ping Luo, and Saravan Rajmohan. Agentgen: Enhancing planning abilities for large language model based agent via environment and task generation. arXiv preprint arXiv:2408.00764, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.367, + 0.826, + 0.409 + ], + "angle": 0, + "content": "[272] Mengkang Hu, Tianxing Chen, Yude Zou, Yuheng Lei, Qiguang Chen, Ming Li, Hongyuan Zhang, Wenqi Shao, and Ping Luo. Text2world: Benchmarking large language models for symbolic world model generation. arXiv preprint arXiv:2502.13092, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.413, + 0.826, + 0.456 + ], + "angle": 0, + "content": "[273] Mengkang Hu, Yuhang Zhou, Wendong Fan, Yuzhou Nie, Bowei Xia, Tao Sun, Ziyu Ye, Zhaoxuan Jin, Yingru Li, Qiguang Chen, et al. Owl: Optimized workforce learning for general multi-agent assistance in real-world task automation. arXiv preprint arXiv:2505.23885, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.46, + 0.826, + 0.502 + ], + "angle": 0, + "content": "[274] Renjun Hu, Yi Cheng, Libin Meng, Jiaxin Xia, Yi Zong, Xing Shi, and Wei Lin. Training an llm-as-a-judge model: Pipeline, insights, and practical lessons. arXiv preprint arXiv:2502.02988, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.507, + 0.826, + 0.577 + ], + "angle": 0, + "content": "[275] Zhiyuan Hu, Chumin Liu, Xidong Feng, Yilun Zhao, See-Kiong Ng, Anh Tuan Luu, Junxian He, Pang Wei Koh, and Bryan Hooi. Uncertainty of thoughts: Uncertainty-aware planning enhances information seeking in large language models. In ICLR 2024 Workshop on Large Language Model (LLM) Agents, March 2024. URL https://openreview.net/forum?id=ZWyLjimciT." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.581, + 0.826, + 0.635 + ], + "angle": 0, + "content": "[276] Maggie Huan, Yuetai Li, Tuney Zheng, Xiaoyu Xu, Seungone Kim, Minxin Du, Radha Poovendran, Graham Neubig, and Xiang Yue. Does math reasoning improve general llm capabilities? understanding transferability of llm reasoning. arXiv preprint arXiv:2507.00432, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.641, + 0.826, + 0.684 + ], + "angle": 0, + "content": "[277] Chenghua Huang, Lu Wang, Fangkai Yang, Pu Zhao, Zhixu Li, Qingwei Lin, Dongmei Zhang, Saravan Rajmohan, and Qi Zhang. Lean and mean: Decoupled value policy optimization with global value guidance. arXiv preprint arXiv:2502.16944, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.688, + 0.826, + 0.717 + ], + "angle": 0, + "content": "[278] Chengsong Huang, Langlin Huang, Jixuan Leng, Jiacheng Liu, and Jiaxin Huang. Efficient test-time scaling via self-calibration. arXiv preprint arXiv:2503.00031, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.722, + 0.826, + 0.762 + ], + "angle": 0, + "content": "[279] Chengyu Huang, Zhengxin Zhang, and Claire Cardie. Hapo: Training language models to reason concisely via history-aware policy optimization. arXiv preprint arXiv:2505.11225, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.767, + 0.826, + 0.81 + ], + "angle": 0, + "content": "[280] Haiduo Huang, Fuwei Yang, Zhenhua Liu, Yixing Xu, Jinze Li, Yang Liu, Xuanwu Yin, Dong Li, Pengju Ren, and Emad Barsoum. Jakiro: Boosting speculative decoding with decoupled multi-head via moe. arXiv preprint arXiv:2502.06282, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.814, + 0.826, + 0.912 + ], + "angle": 0, + "content": "[281] Haoyang Huang, Tianyi Tang, Dongdong Zhang, Xin Zhao, Ting Song, Yan Xia, and Furu Wei. Not all languages are created equal in LLMs: Improving multilingual capability by cross-lingual-thought prompting. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 12365–12394, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.826. URL https://aclanthology.org/2023-findings-emnlp.826/." + }, + { + "type": "list", + "bbox": [ + 0.183, + 0.091, + 0.829, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "54" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.134 + ], + "angle": 0, + "content": "[282] Hui Huang, Yancheng He, Hongli Zhou, Rui Zhang, Wei Liu, Weixun Wang, Wenbo Su, Bo Zheng, and Jiaheng Liu. Think-j: Learning to think for generative llm-as-a-judge. arXiv preprint arXiv:2505.14268, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.137, + 0.826, + 0.193 + ], + "angle": 0, + "content": "[283] Jen-tse Huang, Eric John Li, Man Ho Lam, Tian Liang, Wenxuan Wang, Youliang Yuan, Wenxiang Jiao, Xing Wang, Zhaopeng Tu, and Michael R Lyu. How far are we on the decision-making of llms? evaluating llms' gaming ability in multi-agent environments. arXiv preprint arXiv:2403.11807, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.197, + 0.825, + 0.225 + ], + "angle": 0, + "content": "[284] Jiaxing Huang and Jingyi Zhang. A survey on evaluation of multimodal large language models. arXiv preprint arXiv:2408.15769, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.228, + 0.826, + 0.298 + ], + "angle": 0, + "content": "[285] Jie Huang and Kevin Chen-Chuan Chang. Towards reasoning in large language models: A survey. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Findings of the Association for Computational Linguistics: ACL 2023, pages 1049–1065, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.67. URL https://aclanthology.org/2023-findings-acl.67/." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.301, + 0.825, + 0.357 + ], + "angle": 0, + "content": "[286] Jie Huang, Xinyun Chen, Swaroop Mishra, Huaixiu Steven Zheng, Adams Wei Yu, Xinying Song, and Denny Zhou. Large language models cannot self-correct reasoning yet. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=IkmD3fKBPQ." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.36, + 0.826, + 0.403 + ], + "angle": 0, + "content": "[287] Jinyang Huang, Xiachong Feng, Qiguang Chen, Hanjie Zhao, Zihui Cheng, Jiesong Bai, Jingxuan Zhou, Min Li, and Libo Qin. Mldebugging: Towards benchmarking code debugging across multi-library scenarios. arXiv preprint arXiv:2506.13824, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.406, + 0.826, + 0.449 + ], + "angle": 0, + "content": "[288] Kaixuan Huang, Jiacheng Guo, Zihao Li, Xiang Ji, Jiawei Ge, Wenzhe Li, Yingqing Guo, Tianle Cai, Hui Yuan, Runzhe Wang, et al. Math-perturb: Benchmarking llms' math reasoning abilities against hard perturbations. arXiv preprint arXiv:2502.06453, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.451, + 0.826, + 0.549 + ], + "angle": 0, + "content": "[289] Lei Huang, Xiaocheng Feng, Weitao Ma, Liang Zhao, Yuchun Fan, Weihong Zhong, Dongliang Xu, Qing Yang, Hongtao Liu, and Bing Qin. Advancing large language model attribution through self-improving. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 3822-3836, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.223. URL https://aclanthology.org/2024.emnlp-main.223/." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.552, + 0.826, + 0.594 + ], + "angle": 0, + "content": "[290] Shijue Huang, Hongru Wang, Wanjun Zhong, Zhaochen Su, Jiazhan Feng, Bowen Cao, and Yi R Fung. Adactrl: Towards adaptive and controllable reasoning via difficulty-aware budgeting. arXiv preprint arXiv:2505.18822, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.597, + 0.826, + 0.64 + ], + "angle": 0, + "content": "[291] Shulin Huang, Linyi Yang, Yan Song, Shuang Chen, Leyang Cui, Ziyu Wan, Qingcheng Zeng, Ying Wen, Kun Shao, Weinan Zhang, et al. Thinkbench: Dynamic out-of-distribution evaluation for robust llm reasoning. arXiv preprint arXiv:2502.16268, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.643, + 0.826, + 0.685 + ], + "angle": 0, + "content": "[292] Tiansheng Huang, Sihao Hu, Fatih Ilhan, Selim Furkan Tekin, Zachary Yahn, Yichang Xu, and Ling Liu. Safety tax: Safety alignment makes your large reasoning models less reasonable. arXiv preprint arXiv:2503.00555, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.688, + 0.826, + 0.731 + ], + "angle": 0, + "content": "[293] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.733, + 0.826, + 0.776 + ], + "angle": 0, + "content": "[294] Xiaoke Huang, Juncheng Wu, Hui Liu, Xianfeng Tang, and Yuyin Zhou. m1: Unleash the potential of test-time scaling for medical reasoning with large language models. arXiv preprint arXiv:2504.00869, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.779, + 0.826, + 0.821 + ], + "angle": 0, + "content": "[295] Yiming Huang, Xiao Liu, Yeyun Gong, Zhibin Gou, Yelong Shen, Nan Duan, and Weizhu Chen. Key-point-driven data synthesis with its enhancement on mathematical reasoning. arXiv preprint arXiv:2403.02333, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.824, + 0.826, + 0.866 + ], + "angle": 0, + "content": "[296] Yuzhen Huang, Weihao Zeng, Xingshan Zeng, Qi Zhu, and Junxian He. Pitfalls of rule-and model-based verifiers-a case study on mathematical reasoning. arXiv preprint arXiv:2505.22203, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.869, + 0.826, + 0.913 + ], + "angle": 0, + "content": "[297] Zeyu Huang, Tianhao Cheng, Zihan Qiu, Zili Wang, Yinghui Xu, Edoardo M Ponti, and Ivan Titov. Blending supervised and reinforcement fine-tuning with prefix sampling. arXiv preprint arXiv:2507.01679, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "55" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.091, + 0.828, + 0.189 + ], + "angle": 0, + "content": "[298] Zhen Huang, Zengzhi Wang, Shijie Xia, Xuefeng Li, Haoyang Zou, Ruijie Xu, Run-Ze Fan, Lyumanshan Ye, Ethan Chern, Yixin Ye, Yikai Zhang, Yuqing Yang, Ting Wu, Binjie Wang, Shichao Sun, Yang Xiao, Yiyuan Li, Fan Zhou, Steffi Chern, Yiwei Qin, Yan Ma, Jiadi Su, Yixiu Liu, Yuxiang Zheng, Shaoting Zhang, Dahua Lin, Yu Qiao, and Pengfei Liu. Olympic: Benchmarking multi-discipline cognitive reasoning for superintelligent AI. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024. URL https://openreview.net/forum?id=ayF8bEKYQy." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.194, + 0.827, + 0.251 + ], + "angle": 0, + "content": "[299] Zhen Huang, Haoyang Zou, Xuefeng Li, Yixiu Liu, Yuxiang Zheng, Ethan Chern, Shijie Xia, Yiwei Qin, Weizhe Yuan, and Pengfei Liu. O1 replication journey–part 2: Surpassing o1-preview through simple distillation, big progress or bitter lesson? arXiv preprint arXiv:2411.16489, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.257, + 0.827, + 0.3 + ], + "angle": 0, + "content": "[300] Zhongzhen Huang, Gui Geng, Shengyi Hua, Zhen Huang, Haoyang Zou, Shaoting Zhang, Pengfei Liu, and Xiaofan Zhang. O1 replication journey–part 3: Inference-time scaling for medical reasoning. arXiv preprint arXiv:2501.06458, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.305, + 0.825, + 0.347 + ], + "angle": 0, + "content": "[301] Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2.5-coder technical report. arXiv preprint arXiv:2409.12186, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.353, + 0.827, + 0.438 + ], + "angle": 0, + "content": "[302] Hyeonbin Hwang, Doyoung Kim, Seungone Kim, Seonghyeon Ye, and Minjoon Seo. Self-exlore: Enhancing mathematical reasoning in language models with fine-grained rewards. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 1444-1466, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.78. URL https://aclanthology.org/2024 findings-emnlp.78/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.443, + 0.825, + 0.472 + ], + "angle": 0, + "content": "[303] Shima Imani, Liang Du, and Harsh Shrivastava. Mathprompter: Mathematical reasoning using large language models. 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.477, + 0.825, + 0.507 + ], + "angle": 0, + "content": "[304] Md Ashraful Islam, Mohammed Eunus Ali, and Md Rizwan Parvez. Mapcoder: Multi-agent code generation for competitive problem solving. arXiv preprint arXiv:2405.11403, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.512, + 0.825, + 0.554 + ], + "angle": 0, + "content": "[305] Hamish Ivison, Yizhong Wang, Valentina Pyatkin, Nathan Lambert, Matthew Peters, Pradeep Dasigi, Joel Jang, David Wadden, Noah A Smith, Iz Beltagy, et al. Camels in a changing climate: Enhancing lm adaptation with tulu 2, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.56, + 0.827, + 0.63 + ], + "angle": 0, + "content": "[306] Hamish Ivison, Yizhong Wang, Jiacheng Liu, Zeqiu Wu, Valentina Pyatkin, Nathan Lambert, Noah A. Smith, Yejin Choi, and Hannaneh Hajishirzi. Unpacking DPO and PPO: Disentangling best practices for learning from preference feedback. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=JMBWTlazjW." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.636, + 0.827, + 0.678 + ], + "angle": 0, + "content": "[307] Aaron Jaech, Adam Kalai, Adam Lerner, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.684, + 0.825, + 0.739 + ], + "angle": 0, + "content": "[308] Eeshaan Jain, Johann Wenckstern, Benedikt von Querfurth, and Charlotte Bunne. Test-time view selection for multi-modal decision making. In ICLR 2025 Workshop on Machine Learning for Genomics Explorations, March 2025. URL https://openreview.net/forum?id=aNmZ9s6BZV." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.746, + 0.827, + 0.815 + ], + "angle": 0, + "content": "[309] Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. Livecodebench: Holistic and contamination free evaluation of large language models for code. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=chfJJYC3iL." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.821, + 0.827, + 0.851 + ], + "angle": 0, + "content": "[310] Sooyoung Jang and Hyung-II Kim. Entropy-aware model initialization for effective exploration in deep reinforcement learning. Sensors, 22(15):5845, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.856, + 0.825, + 0.911 + ], + "angle": 0, + "content": "[311] Ke Ji, Jiahao Xu, Tian Liang, Qiuzhi Liu, Zhiwei He, Xingyu Chen, Xiaoyuan Liu, Zhijie Wang, Junying Chen, Benyou Wang, et al. The first few tokens are all you need: An efficient and effective unsupervised prefix fine-tuning method for reasoning models. arXiv preprint arXiv:2503.02875, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.828, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "56" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[312] Tao Ji, Bin Guo, Yuanbin Wu, Qipeng Guo, Lixing Shen, Zhan Chen, Xipeng Qiu, Qi Zhang, and Tao Gui. Towards economical inference: Enabling deepseek's multi-head latent attention in any transformer-based llms. arXiv preprint arXiv:2502.14837, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.139, + 0.826, + 0.18 + ], + "angle": 0, + "content": "[313] Yichao Ji. A small step towards reproducing openai o1: Progress report on the steiner open source models, October 2024. URL https://medium.com/@peakji/b9a756a00855." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.185, + 0.826, + 0.228 + ], + "angle": 0, + "content": "[314] Yixin Ji, Juntao Li, Hai Ye, Kaixin Wu, Jia Xu, Linjian Mo, and Min Zhang. Test-time computing: from system-1 thinking to system-2 thinking. arXiv preprint arXiv:2501.02497, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.232, + 0.827, + 0.317 + ], + "angle": 0, + "content": "[315] Ziwei Ji, Tiezheng Yu, Yan Xu, Nayeon Lee, Etsuko Ishii, and Pascale Fung. Towards mitigating LLM hallucination via self reflection. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 1827-1843, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.123. URL https://aclanthology.org/2023.findings-emnlp.123/." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.321, + 0.825, + 0.363 + ], + "angle": 0, + "content": "[316] Boyu Jia, Junzhe Zhang, Huixuan Zhang, and Xiaojun Wan. Exploring and evaluating multimodal knowledge reasoning consistency of multimodal large language models. arXiv preprint arXiv:2503.04801, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.368, + 0.825, + 0.397 + ], + "angle": 0, + "content": "[317] Zeyu Jia, Alexander Rakhlin, and Tengyang Xie. Do we need to verify step by step? rethinking process supervision from a theoretical perspective. arXiv preprint arXiv:2502.10581, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.401, + 0.825, + 0.457 + ], + "angle": 0, + "content": "[318] Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh, Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, Lélio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. Mistral 7b, October 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.461, + 0.825, + 0.505 + ], + "angle": 0, + "content": "[319] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.509, + 0.825, + 0.551 + ], + "angle": 0, + "content": "[320] Fengqing Jiang, Zhangchen Xu, Yuetai Li, Luyao Niu, Zhen Xiang, Bo Li, Bill Yuchen Lin, and Radha Poovendran. Safechain: Safety of language models with long chain-of-thought reasoning capabilities. arXiv preprint arXiv:2502.12025, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.555, + 0.825, + 0.598 + ], + "angle": 0, + "content": "[321] Huchen Jiang, Yangyang Ma, Chaofan Ding, Kexin Luan, and Xinhan Di. Towards intrinsic self-correction enhancement in monte carlo tree search boosted reasoning via iterative preference learning. arXiv preprint arXiv:2412.17397, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.602, + 0.825, + 0.644 + ], + "angle": 0, + "content": "[322] Jinhao Jiang, Jiayi Chen, Junyi Li, Ruiyang Ren, Shijie Wang, Wayne Xin Zhao, Yang Song, and Tao Zhang. Rag-star: Enhancing deliberative reasoning with retrieval augmented verification and refinement. arXiv preprint arXiv:2412.12881, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.649, + 0.825, + 0.692 + ], + "angle": 0, + "content": "[323] Jinhao Jiang, Zhipeng Chen, Yingqian Min, Jie Chen, Xiaoxue Cheng, Jiapeng Wang, Yiru Tang, Haoxiang Sun, Jia Deng, Wayne Xin Zhao, et al. Technical report: Enhancing llm reasoning with reward-guided tree search. arXiv preprint arXiv:2411.11694, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.696, + 0.825, + 0.725 + ], + "angle": 0, + "content": "[324] Nan Jiang, Ziming Wu, De-Chuan Zhan, Fuming Lai, and Shaobing Lian. Dart: Distilling autoregressive reasoning to silent thought. arXiv preprint arXiv:2506.11752, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.729, + 0.825, + 0.771 + ], + "angle": 0, + "content": "[325] Shuyang Jiang, Yusheng Liao, Zhe Chen, Ya Zhang, Yanfeng Wang, and Yu Wang. Meds 3: Towards medical small language models with self-evolved slow thinking. arXiv preprint arXiv:2501.12051, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.776, + 0.825, + 0.817 + ], + "angle": 0, + "content": "[326] Yuxuan Jiang, Dawei Li, and Frank Ferraro. Drp: Distilled reasoning pruning with skill-aware step decomposition for efficient large reasoning models. arXiv preprint arXiv:2505.13975, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.823, + 0.825, + 0.879 + ], + "angle": 0, + "content": "[327] Carlos E Jimenez, John Yang, Alexander Wettig, Shunyu Yao, Kexin Pei, Ofir Press, and Karthik R Narasimhan. SWE-bench: Can language models resolve real-world github issues? In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=VTF8yNQM66." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[328] Di Jin, Eileen Pan, Nassim Oufattole, Wei-Hung Weng, Hanyi Fang, and Peter Szolovits. What disease does this patient have? a large-scale open domain question answering dataset" + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "57" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.091, + 0.826, + 0.121 + ], + "angle": 0, + "content": "from medical exams. Applied Sciences, 11(14), July 2021. ISSN 2076-3417. doi: 10.3390/app11146421. URL https://www.mdpi.com/2076-3417/11/14/6421." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.124, + 0.826, + 0.167 + ], + "angle": 0, + "content": "[329] Mingyu Jin, Weidi Luo, Sitao Cheng, Xinyi Wang, Wenyue Hua, Ruixiang Tang, William Yang Wang, and Yongfeng Zhang. Disentangling memory and reasoning ability in large language models. arXiv preprint arXiv:2411.13504, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.17, + 0.826, + 0.255 + ], + "angle": 0, + "content": "[330] Mingyu Jin, Qinkai Yu, Dong Shu, Haiyan Zhao, Wenyue Hua, Yanda Meng, Yongfeng Zhang, and Mengnan Du. The impact of reasoning step length on large language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 1830–1842, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.108. URL https://aclanthology.org/2024-findings-acl.108/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.258, + 0.827, + 0.37 + ], + "angle": 0, + "content": "[331] Mingyu Jin, Qinkai Yu, Jingyuan Huang, Qingcheng Zeng, Zhenting Wang, Wenyue Hua, Haiyan Zhao, Kai Mei, Yanda Meng, Kaize Ding, Fan Yang, Mengnan Du, and Yongfeng Zhang. Exploring concept depth: How large language models acquire knowledge and concept at different layers? In Owen Rambow, Leo Wanner, Marianna Apidianaki, Hend Al-Khalifa, Barbara Di Eugenio, and Steven Schockaert, editors, Proceedings of the 31st International Conference on Computational Linguistics, pages 558-573, Abu Dhabi, UAE, January 2025. Association for Computational Linguistics. URL https://aclanthology.org/2025.coling-main.37/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.374, + 0.825, + 0.416 + ], + "angle": 0, + "content": "[332] Zhensheng Jin, Xinze Li, Yifan Ji, Chunyi Peng, Zhenghao Liu, Qi Shi, Yukun Yan, Shuo Wang, Furong Peng, and Ge Yu. Recut: Balancing reasoning length and accuracy in llms via stepwise trails and preference optimization. arXiv preprint arXiv:2506.10822, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.42, + 0.826, + 0.448 + ], + "angle": 0, + "content": "[333] Andy L Jones. Scaling scaling laws with board games. arXiv preprint arXiv:2104.03113, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.452, + 0.824, + 0.481 + ], + "angle": 0, + "content": "[334] Cameron R Jones and Benjamin K Bergen. Large language models pass the Turing test. arXiv preprint arXiv:2503.23674, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.485, + 0.824, + 0.514 + ], + "angle": 0, + "content": "[335] Prashank Kadam. Gpt-guided monte carlo tree search for symbolic regression in financial fraud detection. arXiv preprint arXiv:2411.04459, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.518, + 0.826, + 0.56 + ], + "angle": 0, + "content": "[336] Saurav Kadavath, Tom Conerly, Amanda Askell, Tom Henighan, Dawn Drain, Ethan Perez, Nicholas Schiefer, Zac Hatfield-Dodds, Nova DasSarma, Eli Tran-Johnson, et al. Language models (mostly) know what they know. arXiv preprint arXiv:2207.05221, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.564, + 0.826, + 0.634 + ], + "angle": 0, + "content": "[337] Ryo Kamoi, Sarkar Snigdha Sarathi Das, Renze Lou, Jihyun Janice Ahn, Yilun Zhao, Xiaoxin Lu, Nan Zhang, Yusen Zhang, Haoran Ranran Zhang, Sujeeth Reddy Vummanthala, Salika Dave, Shaobo Qin, Arman Cohan, Wenpeng Yin, and Rui Zhang. Evaluating LLMs at detecting errors in LLM responses. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=dnwRScljXr." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.638, + 0.826, + 0.68 + ], + "angle": 0, + "content": "[338] Jikun Kang, Xin Zhe Li, Xi Chen, Amirreza Kazemi, Qianyi Sun, Boxing Chen, Dong Li, Xu He, Quan He, Feng Wen, et al. Mindstar: Enhancing math reasoning in pre-trained llms at inference time. arXiv preprint arXiv:2405.16265, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.684, + 0.827, + 0.768 + ], + "angle": 0, + "content": "[339] Liwei Kang, Zirui Zhao, David Hsu, and Wee Sun Lee. On the empirical complexity of reasoning and planning in LLMs. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 2897-2936, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.164. URL https://aclanthology.org/2024-findings-emnlp.164/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.772, + 0.827, + 0.801 + ], + "angle": 0, + "content": "[340] Yu Kang, Xianghui Sun, Liangyu Chen, and Wei Zou. C3ot: Generating shorter chain-of-thought without compromising effectiveness. 39(23):24312-24320, Apr 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.805, + 0.825, + 0.834 + ], + "angle": 0, + "content": "[341] Zhewei Kang, Xuandong Zhao, and Dawn Song. Scalable best-of-n selection for large language models via self-certainty. arXiv preprint arXiv:2502.18581, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.837, + 0.825, + 0.878 + ], + "angle": 0, + "content": "[342] Manuj Kant, Sareh Nabi, Manav Kant, Roland Scharrer, Megan Ma, and Marzieh Nabi. Towards robust legal reasoning: Harnessing logical llms in law. arXiv preprint arXiv:2502.17638, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.884, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[343] Mehran Kazemi, Najoung Kim, Deepti Bhatia, Xin Xu, and Deepak Ramachandran. LAM-BADA: Backward chaining for automated reasoning in natural language. In Anna Rogers," + }, + { + "type": "list", + "bbox": [ + 0.183, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "58" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.091, + 0.829, + 0.162 + ], + "angle": 0, + "content": "Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6547-6568, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.361. URL https://aclanthology.org/2023.acl-long.361/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.166, + 0.826, + 0.21 + ], + "angle": 0, + "content": "[344] Amirhossein Kazemnejad, Milad Aghajohari, Eva Portelance, Alessandro Sordoni, Siva Reddy, Aaron Courville, and Nicolas Le Roux. Vineppo: Unlocking rl potential for llm reasoning through refined credit assignment. arXiv preprint arXiv:2410.01679, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.215, + 0.827, + 0.257 + ], + "angle": 0, + "content": "[345] Muhammad Khalifa, Lajanugen Logeswaran, Moontae Lee, Honglak Lee, and Lu Wang. Grace: Discriminator-guided chain-of-thought reasoning. arXiv preprint arXiv:2305.14934, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.262, + 0.825, + 0.306 + ], + "angle": 0, + "content": "[346] Artyom Kharinaev, Viktor Moskvoretskii, Egor Shvetsov, Kseniia Studenikina, Bykov Mikhail, and Evgeny Burnaev. Investigating the impact of quantization methods on the safety and reliability of large language models. arXiv preprint arXiv:2502.15799, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.31, + 0.827, + 0.354 + ], + "angle": 0, + "content": "[347] Hyunwoo Kim, Melanie Sclar, Tan Zhi-Xuan, Lance Ying, Sydney Levine, Yang Liu, Joshua B Tenenbaum, and Yejin Choi. Hypothesis-driven theory-of-mind reasoning for large language models. arXiv preprint arXiv:2502.11881, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.358, + 0.827, + 0.401 + ], + "angle": 0, + "content": "[348] Jiin Kim, Byeongjun Shin, Jinha Chung, and Minsoo Rhu. The cost of dynamic reasoning: Demystifying ai agents and test-time scaling from an ai infrastructure perspective. arXiv preprint arXiv:2506.04301, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.405, + 0.827, + 0.448 + ], + "angle": 0, + "content": "[349] Juno Kim, Denny Wu, Jason Lee, and Taiji Suzuki. Metastable dynamics of chain-of-thought reasoning: Provable benefits of search, rl and distillation. arXiv preprint arXiv:2502.01694, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.453, + 0.827, + 0.483 + ], + "angle": 0, + "content": "[350] Moo Jin Kim, Chelsea Finn, and Percy Liang. Fine-tuning vision-language-action models: Optimizing speed and success. arXiv preprint arXiv:2502.19645, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.487, + 0.827, + 0.529 + ], + "angle": 0, + "content": "[351] Naryeong Kim, Sungmin Kang, Gabin An, and Shin Yoo. Lachesis: Predicting llm inference accuracy using structural properties of reasoning paths. arXiv preprint arXiv:2412.08281, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.535, + 0.829, + 0.632 + ], + "angle": 0, + "content": "[352] Seungone Kim, Se Joo, Doyoung Kim, Joel Jang, Seonghyeon Ye, Jamin Shin, and Minjoon Seo. The CoT collection: Improving zero-shot and few-shot learning of language models via chain-of-thought fine-tuning. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 12685-12708, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.782. URL https://aclanthology.org/2023.emnlp-main.782/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.637, + 0.829, + 0.736 + ], + "angle": 0, + "content": "[353] Seungone Kim, Juyoung Suk, Shayne Longpre, Bill Yuchen Lin, Jamin Shin, Sean Welleck, Graham Neubig, Moontae Lee, Kyungjae Lee, and Minjoon Seo. Prometheus 2: An open source language model specialized in evaluating other language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 4334-4353, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.248. URL https://aclanthology.org/2024.emnlp-main.248/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.74, + 0.829, + 0.784 + ], + "angle": 0, + "content": "[354] Sunnie SY Kim, Jennifer Wortman Vaughan, Q Vera Liao, Tania Lombrozo, and Olga Russakovsky. Fostering appropriate reliance on large language models: The role of explanations, sources, and inconsistencies. arXiv preprint arXiv:2502.08554, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.788, + 0.827, + 0.817 + ], + "angle": 0, + "content": "[355] Jing Yu Koh, Stephen McAleer, Daniel Fried, and Ruslan Salakhutdinov. Tree search for language model agents. arXiv preprint arXiv:2407.01476, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.822, + 0.825, + 0.865 + ], + "angle": 0, + "content": "[356] Deqian Kong, Minglu Zhao, Dehong Xu, Bo Pang, Shu Wang, Edouardo Honig, Zhangzhang Si, Chuan Li, Jianwen Xie, Sirui Xie, et al. Scalable language models with posterior inference of latent thought vectors. arXiv preprint arXiv:2502.01567, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.869, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[357] Abhinav Kumar, Jaechul Roh, Ali Naseh, Marzena Karpinska, Mohit Iyyer, Amir Houmansadr, and Eugene Bagdasarian. Overthinking: Slowdown attacks on reasoning llms. arXiv preprint arXiv:2502.02542, 2025." + }, + { + "type": "list", + "bbox": [ + 0.183, + 0.091, + 0.829, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "59" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[358] Aviral Kumar, Vincent Zhuang, Rishabh Agarwal, Yi Su, John D Co-Reyes, Avi Singh, Kate Baumli, Shariq Iqbal, Colton Bishop, Rebecca Roelofs, et al. Training language models to self-correct via reinforcement learning. arXiv preprint arXiv:2409.12917, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.137, + 0.827, + 0.193 + ], + "angle": 0, + "content": "[359] Komal Kumar, Tajamul Ashraf, Omkar Thawakar, Rao Muhammad Anwer, Hisham Cholakkal, Mubarak Shah, Ming-Hsuan Yang, Phillip H. S. Torr, Salman Khan, and Fahad Shahbaz Khan. Llm post-training: A deep dive into reasoning large language models, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.198, + 0.827, + 0.255 + ], + "angle": 0, + "content": "[360] Martin Kuo, Jianyi Zhang, Aolin Ding, Qinsi Wang, Louis DiValentin, Yujia Bao, Wei Wei, Da-Cheng Juan, Hai Li, and Yiran Chen. H-cot: Hijacking the chain-of-thought safety reasoning mechanism to jailbreak large reasoning models, including openai o1/o3, deepseek-r1, and gemini 2.0 flash thinking. arXiv preprint arXiv:2502.12893, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.258, + 0.827, + 0.287 + ], + "angle": 0, + "content": "[361] EvolvingLMMs Lab. Open-r1-multimodal. https://github.com/EvolvingLMMs-Lab/open-r1-multimodal, February 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.291, + 0.827, + 0.334 + ], + "angle": 0, + "content": "[362] Bespoke Labs. Bespoke-stratos: The unreasonable effectiveness of reasoning distillation. https://www.bespokelabs.ai/blog/bespoke-stratos-the-unreasonable-effectiveness-of-reasoning-distillation, January 2025. Accessed: 2025-01-22." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.337, + 0.827, + 0.38 + ], + "angle": 0, + "content": "[363] Inception Labs, Samar Khanna, Siddhant Kharbanda, Shufan Li, Harshit Varma, Eric Wang, Sawyer Birnbaum, Ziyang Luo, Yanis Miraoui, Akash Palrecha, et al. Mercury: Ultra-fast language models based on diffusion. arXiv preprint arXiv:2506.17298, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.384, + 0.825, + 0.413 + ], + "angle": 0, + "content": "[364] Huiyuan Lai, Xiao Zhang, and Malvina Nissim. Multidimensional consistency improves reasoning in language models. arXiv preprint arXiv:2503.02670, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.416, + 0.827, + 0.458 + ], + "angle": 0, + "content": "[365] Xin Lai, Zhuotao Tian, Yukang Chen, Senqiao Yang, Xiangru Peng, and Jiaya Jia. Step-dpo: Step-wise preference optimization for long-chain reasoning of llms. arXiv preprint arXiv:2406.18629, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.462, + 0.825, + 0.534 + ], + "angle": 0, + "content": "[366] Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V. Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, Yuling Gu, Saumya Malik, Victoria Graf, Jena D. Hwang, Jiangjiang Yang, Ronan Le Bras, Oyvind Tafjord, Chris Wilhelm, Luca Soldaini, Noah A. Smith, Yizhong Wang, Pradeep Dasigi, and Hannaneh Hajishirzi. Tulu 3: Pushing frontiers in open language model post-training, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.536, + 0.825, + 0.58 + ], + "angle": 0, + "content": "[367] Nathan Lambert, Valentina Pyatkin, Jacob Morrison, LJ Miranda, Bill Yuchen Lin, Khyathi Chandu, Nouha Dziri, Sachin Kumar, Tom Zick, Yejin Choi, et al. Rewardbench: Evaluating reward models for language modeling. arXiv preprint arXiv:2403.13787, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.583, + 0.827, + 0.681 + ], + "angle": 0, + "content": "[368] Andrew Lampinen, Ishita Dasgupta, Stephanie Chan, Kory Mathewson, Mh Tessler, Antonia Creswell, James McClelland, Jane Wang, and Felix Hill. Can language models learn from explanations in context? In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang, editors, Findings of the Association for Computational Linguistics: EMNLP 2022, pages 537-563, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022-findings-emnlp.38. URL https://aclanthology.org/2022-findings-emnlp.38." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.684, + 0.827, + 0.727 + ], + "angle": 0, + "content": "[369] Jack Lanchantin, Angelica Chen, Shehzaad Dhuliawala, Ping Yu, Jason Weston, Sainbayar Sukhbaatar, and Ilia Kulikov. Diverse preference optimization. arXiv preprint arXiv:2501.18101, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.73, + 0.827, + 0.785 + ], + "angle": 0, + "content": "[370] Anh Duc Le, Tu Vu, Nam Le Hai, Nguyen Thi Ngoc Diep, Linh Ngo Van, Trung Le, and Thien Huu Nguyen. Cot2align: Cross-chain of thought distillation via optimal transport alignment for language models with different tokenizers. arXiv preprint arXiv:2502.16806, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.79, + 0.827, + 0.833 + ], + "angle": 0, + "content": "[371] Joshua Ong Jun Leang, Aryo Pradipta Gema, and Shay B Cohen. Comat: Chain of mathematically annotated thought improves mathematical reasoning. arXiv preprint arXiv:2410.10336, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.837, + 0.825, + 0.867 + ], + "angle": 0, + "content": "[372] Joshua Ong Jun Leang, Giwon Hong, Wenda Li, and Shay B Cohen. Theorem prover as a judge for synthetic data generation. arXiv preprint arXiv:2502.13137, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.87, + 0.827, + 0.911 + ], + "angle": 0, + "content": "[373] Byeongchan Lee, Jonghoon Lee, Dongyoung Kim, Jaehyung Kim, and Jinwoo Shin. Collaborative llm inference via planning for efficient reasoning. arXiv preprint arXiv:2506.11578, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "60" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.133 + ], + "angle": 0, + "content": "[374] Hyunseok Lee, Seunghyuk Oh, Jaehyung Kim, Jinwoo Shin, and Jihoon Tack. Revise: Learning to refine at test-time via intrinsic self-verification. arXiv preprint arXiv:2502.14565, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.139, + 0.825, + 0.168 + ], + "angle": 0, + "content": "[375] Jinu Lee and Julia Hockenmaier. Evaluating step-by-step reasoning traces: A survey. arXiv preprint arXiv:2502.12289, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.172, + 0.826, + 0.215 + ], + "angle": 0, + "content": "[376] Jung Hyun Lee, June Yong Yang, Byeongho Heo, Dongyoon Han, and Kang Min Yoo. Token-supervised value models for enhancing mathematical reasoning capabilities of large language models. arXiv preprint arXiv:2407.12863, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.219, + 0.827, + 0.261 + ], + "angle": 0, + "content": "[377] Kuang-Huei Lee, Ian Fischer, Yueh-Hua Wu, Dave Marwood, Shumeet Baluja, Dale Schuurmans, and Xinyun Chen. Evolving deeper llm thinking. arXiv preprint arXiv:2501.09891, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.267, + 0.825, + 0.323 + ], + "angle": 0, + "content": "[378] Lucas Lehnert, Sainbayar Sukhbaatar, DiJia Su, Qinqing Zheng, Paul McVay, Michael Rabbat, and Yuandong Tian. Beyond a*: Better planning with transformers via search dynamics bootstrapping. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=SGoVIC0u0f." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.328, + 0.827, + 0.384 + ], + "angle": 0, + "content": "[379] Bin Lei, Yi Zhang, Shan Zuo, Ali Payani, and Caiwen Ding. MACM: Utilizing a multi-agent system for condition mining in solving complex mathematical problems. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=VR2RdSxtzs." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.389, + 0.827, + 0.431 + ], + "angle": 0, + "content": "[380] Jixuan Leng, Cassandra A Cohen, Zhixian Zhang, Chenyan Xiong, and William W Cohen. Semi-structured llm reasoners can be rigorously audited. arXiv preprint arXiv:2505.24217, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.436, + 0.825, + 0.493 + ], + "angle": 0, + "content": "[381] Adam Lerer, Hengyuan Hu, Jakob Foerster, and Noam Brown. Improving policies via search in cooperative partially observable games. Proceedings of the AAAI Conference on Artificial Intelligence, 34(05):7187-7194, Apr. 2020. doi: 10.1609/aaai.v34i05.6208. URL https://ojs.aaai.org/index.php/AAAI/article/view/6208." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.497, + 0.825, + 0.527 + ], + "angle": 0, + "content": "[382] Belinda Z Li, Been Kim, and Zi Wang. Questbench: Can llms ask the right question to acquire information in reasoning tasks? arXiv preprint arXiv:2503.22674, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.531, + 0.827, + 0.572 + ], + "angle": 0, + "content": "[383] Bingxuan Li, Yiwei Wang, Jiuming Gu, Kai-Wei Chang, and Nanyun Peng. Metal: A multiagent framework for chart generation with test-time scaling. arXiv preprint arXiv:2502.17651, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.578, + 0.827, + 0.634 + ], + "angle": 0, + "content": "[384] Bohan Li, Jiannan Guan, Longxu Dou, Yunlong Feng, Dingzirui Wang, Yang Xu, Enbo Wang, Qiguang Chen, Bichen Wang, Xiao Xu, et al. Can large language models understand you better? an mbti personality detection dataset aligned with population traits. arXiv preprint arXiv:2412.12510, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.639, + 0.827, + 0.682 + ], + "angle": 0, + "content": "[385] Chen Li, Weiqi Wang, Jingcheng Hu, Yixuan Wei, Nanning Zheng, Han Hu, Zheng Zhang, and Houwen Peng. Common 7b language models already possess strong math capabilities. arXiv preprint arXiv:2403.04706, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.686, + 0.825, + 0.716 + ], + "angle": 0, + "content": "[386] Chen Li, Nazhou Liu, and Kai Yang. Adaptive group policy optimization: Towards stable training and token-efficient reasoning. arXiv preprint arXiv:2503.15952, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.72, + 0.825, + 0.763 + ], + "angle": 0, + "content": "[387] Chengpeng Li, Zhengyang Tang, Ziniu Li, Mingfeng Xue, Keqin Bao, Tian Ding, Ruoyu Sun, Benyou Wang, Xiang Wang, Junyang Lin, et al. Cort: Code-integrated reasoning within thinking. arXiv preprint arXiv:2506.09820, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.767, + 0.825, + 0.81 + ], + "angle": 0, + "content": "[388] Chengpeng Li, Mingfeng Xue, Zhenru Zhang, Jiaxi Yang, Beichen Zhang, Xiang Wang, Bowen Yu, Binyuan Hui, Junyang Lin, and Dayiheng Liu. Start: Self-taught reasoner with tools. arXiv preprint arXiv:2503.04625, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.814, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[389] Chengshu Li, Jacky Liang, Andy Zeng, Xinyun Chen, Karol Hausman, Dorsa Sadigh, Sergey Levine, Li Fei-Fei, Fei Xia, and Brian Ichter. Chain of code: Reasoning with a language model-augmented code emulator. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 28259-28277. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/1i24ar.html." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "61" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[390] Chengzhu Li, Wenshan Wu, Huanyu Zhang, Yan Xia, Shaoguang Mao, Li Dong, Ivan Vulic, and Furu Wei. Imagine while reasoning in space: Multimodal visualization-of-thought. arXiv preprint arXiv:2501.07542, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.139, + 0.827, + 0.182 + ], + "angle": 0, + "content": "[391] Cheryl Li, Tianyuan Xu, and Yiwen Guo. Reasoning-as-logic-units: Scaling test-time reasoning in large language models through logic unit alignment. arXiv preprint arXiv:2502.07803, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.187, + 0.827, + 0.23 + ], + "angle": 0, + "content": "[392] Dacheng Li, Shiyi Cao, Chengkun Cao, Xiuyu Li, Shangyin Tan, Kurt Keutzer, Jiarong Xing, Joseph E Gonzalez, and Ion Stoica. S*: Test time scaling for code generation. arXiv preprint arXiv:2502.14382, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.235, + 0.826, + 0.279 + ], + "angle": 0, + "content": "[393] Dacheng Li, Shiyi Cao, Tyler Griggs, Shu Liu, Xiangxi Mo, Shishir G Patil, Matei Zaharia, Joseph E Gonzalez, and Ion Stoica. Llms can easily learn to reason from demonstrations structure, not content, is what matters! arXiv preprint arXiv:2502.07374, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.284, + 0.827, + 0.34 + ], + "angle": 0, + "content": "[394] Dawei Li, Bohan Jiang, Liangjie Huang, Alimohammad Beigi, Chengshuai Zhao, Zhen Tan, Amrita Bhattacharjee, Yuxuan Jiang, Canyu Chen, Tianhao Wu, et al. From generation to judgment: Opportunities and challenges of llm-as-a-judge. arXiv preprint arXiv:2411.16594, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.346, + 0.825, + 0.375 + ], + "angle": 0, + "content": "[395] Gengxu Li, Tingyu Xia, Yi Chang, and Yuan Wu. Length-controlled margin-based preference optimization without reference model. arXiv preprint arXiv:2502.14643, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.381, + 0.825, + 0.424 + ], + "angle": 0, + "content": "[396] Haitao Li, Qian Dong, Junjie Chen, Huixue Su, Yujia Zhou, Qingyao Ai, Ziyi Ye, and Yiqun Liu. Llms-as-judges: a comprehensive survey on llm-based evaluation methods. arXiv preprint arXiv:2412.05579, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.429, + 0.827, + 0.485 + ], + "angle": 0, + "content": "[397] Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. Numinamath. https://huggingface.co/AI-MO/NuminaMath-CoT, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.491, + 0.825, + 0.521 + ], + "angle": 0, + "content": "[398] Jia-Nan Li, Jian Guan, Wei Wu, and Rui Yan. Extended inductive reasoning for personalized preference inference from behavioral signals. arXiv preprint arXiv:2505.18071, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.525, + 0.827, + 0.568 + ], + "angle": 0, + "content": "[399] Jiachun Li, Pengfei Cao, Yubo Chen, Jiexin Xu, Huajun Li, Xiaojian Jiang, Kang Liu, and Jun Zhao. Rewarding curse: Analyze and mitigate reward modeling issues for llm reasoning. arXiv preprint arXiv:2503.05188, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.573, + 0.827, + 0.616 + ], + "angle": 0, + "content": "[400] Jierui Li, Hung Le, Yinbo Zhou, Caiming Xiong, Silvio Savarese, and Doyen Sahoo. Codetree: Agent-guided tree search for code generation with large language models. arXiv preprint arXiv:2411.04329, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.622, + 0.827, + 0.664 + ], + "angle": 0, + "content": "[401] Junlong Li, Daya Guo, Dejian Yang, Runxin Xu, Yu Wu, and Junxian He. Codei/o: Condensing reasoning patterns via code input-output prediction. arXiv preprint arXiv:2502.07316, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.67, + 0.827, + 0.712 + ], + "angle": 0, + "content": "[402] Kaixin Li. Verified taco problems. https://huggingface.co/datasets/likaixin/TACO-verified, 2024. URL https://huggingface.co/datasets/likaixin/TACO-verified." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.718, + 0.825, + 0.748 + ], + "angle": 0, + "content": "[403] Kechen Li, Wenqi Zhu, Coralia Cartis, Tianbo Ji, and Shiwei Liu. Sos1: O1 and r1-like reasoning llms are sum-of-square solvers. arXiv preprint arXiv:2502.20545, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.753, + 0.827, + 0.796 + ], + "angle": 0, + "content": "[404] Long Li, Weiwen Xu, Jiayan Guo, Ruochen Zhao, Xingxuan Li, Yuqian Yuan, Boqiang Zhang, Yuming Jiang, Yifei Xin, Ronghao Dang, et al. Chain of ideas: Revolutionizing research via novel idea development with llm agents. arXiv preprint arXiv:2410.13185, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.801, + 0.825, + 0.83 + ], + "angle": 0, + "content": "[405] Margaret Li, Sneha Kudugunta, and Luke Zettlemoyer. (mis) fitting: A survey of scaling laws. arXiv preprint arXiv:2502.18969, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.835, + 0.825, + 0.877 + ], + "angle": 0, + "content": "[406] Ming Li, Lichang Chen, Jiuhai Chen, Shwai He, Heng Huang, Jiuming Gu, and Tianyi Zhou. Reflection-tuning: Data recycling improves llm instruction-tuning. arXiv preprint arXiv:2310.11716, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.883, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[407] Ming Li, Yanhong Li, and Tianyi Zhou. What happened in llms layers when trained for fast vs. slow thinking: A gradient perspective. arXiv preprint arXiv:2410.23743, 2024." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "62" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.828, + 0.148 + ], + "angle": 0, + "content": "[408] Minzhi Li, Zhengyuan Liu, Shumin Deng, Shafiq Joty, Nancy Chen, and Min-Yen Kan. Dna-eval: Enhancing large language model evaluation through decomposition and aggregation. In Proceedings of the 31st International Conference on Computational Linguistics, pages 2277-2290, January 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.151, + 0.826, + 0.194 + ], + "angle": 0, + "content": "[409] Moxin Li, Yuantao Zhang, Wenjie Wang, Wentao Shi, Zhuo Liu, Fuli Feng, and Tat-Seng Chua. Self-improvement towards pareto optimality: Mitigating preference conflicts in multi-objective alignment. arXiv preprint arXiv:2502.14354, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.198, + 0.827, + 0.241 + ], + "angle": 0, + "content": "[410] Peiji Li, Kai Lv, Yunfan Shao, Yichuan Ma, Linyang Li, Xiaqing Zheng, Xipeng Qiu, and Qipeng Guo. Fastmcts: A simple sampling strategy for data synthesis. arXiv preprint arXiv:2502.11476, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.245, + 0.827, + 0.288 + ], + "angle": 0, + "content": "[411] Qingyao Li, Wei Xia, Kounianhua Du, Xinyi Dai, Ruiming Tang, Yasheng Wang, Yong Yu, and Weinan Zhang. Rethinkmcts: Refining erroneous thoughts in monte carlo tree search for code generation. arXiv preprint arXiv:2409.09584, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.291, + 0.825, + 0.348 + ], + "angle": 0, + "content": "[412] Shuangtao Li, Shuaihao Dong, Kexin Luan, Xinhan Di, and Chaofan Ding. Enhancing reasoning through process supervision with monte carlo tree search. In The First Workshop on Neural Reasoning and Mathematical Discovery at AAAI'2025, January 2025. URL https://openreview.net/forum?id=OupEEi1341." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.352, + 0.824, + 0.382 + ], + "angle": 0, + "content": "[413] Siheng Li, Zhanhui Zhou, Wai Lam, Chao Yang, and Chaochao Lu. Repo: Replay-enhanced policy optimization. arXiv preprint arXiv:2506.09340, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.385, + 0.827, + 0.428 + ], + "angle": 0, + "content": "[414] Wen-Ding Li, Keya Hu, Carter Larsen, Yuqing Wu, Simon Alford, Caleb Woo, Spencer M Dunn, Hao Tang, Michelangelo Naim, Dat Nguyen, et al. Combining induction and transduction for abstract reasoning. arXiv preprint arXiv:2411.02272, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.431, + 0.825, + 0.46 + ], + "angle": 0, + "content": "[415] Wendi Li and Yixuan Li. Process reward model with q-value rankings. arXiv preprint arXiv:2410.11287, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.464, + 0.827, + 0.506 + ], + "angle": 0, + "content": "[416] Wenjun Li, Changyu Chen, and Pradeep Varakantham. Unlocking large language model's planning capabilities with maximum diversity fine-tuning. arXiv preprint arXiv:2406.10479, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.51, + 0.827, + 0.581 + ], + "angle": 0, + "content": "[417] Xiaonan Li and Xipeng Qiu. MoT: Memory-of-thought enables ChatGPT to self-improve. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 6354-6374, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.392. URL https://aclanthology.org/2023.emnlp-main.392/." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.584, + 0.827, + 0.628 + ], + "angle": 0, + "content": "[418] Xiaoxi Li, Guanting Dong, Jiajie Jin, Yuyao Zhang, Yujia Zhou, Yutao Zhu, Peitian Zhang, and Zhicheng Dou. Search-o1: Agentic search-enhanced large reasoning models. arXiv preprint arXiv:2501.05366, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.631, + 0.825, + 0.661 + ], + "angle": 0, + "content": "[419] Xinzhe Li. A survey on llm test-time compute via search: Tasks, llm profiling, search algorithms, and relevant frameworks. arXiv preprint arXiv:2501.10069, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.664, + 0.825, + 0.693 + ], + "angle": 0, + "content": "[420] Xuefeng Li, Haoyang Zou, and Pengfei Liu. Limr: Less is more for rl scaling. arXiv preprint arXiv:2502.11886, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.697, + 0.827, + 0.739 + ], + "angle": 0, + "content": "[421] Yafu Li, Zhilin Wang, Tingchen Fu, Ganqu Cui, Sen Yang, and Yu Cheng. From drafts to answers: Unlocking lIm potential via aggregation fine-tuning. arXiv preprint arXiv:2501.11877, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.743, + 0.825, + 0.772 + ], + "angle": 0, + "content": "[422] Yang Li. Policy guided tree search for enhanced ltm reasoning. arXiv preprint arXiv:2502.06813, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.776, + 0.827, + 0.82 + ], + "angle": 0, + "content": "[423] Yang Li, Dong Du, Linfeng Song, Chen Li, Weikang Wang, Tao Yang, and Haitao Mi. Hunyuanprover: A scalable data synthesis framework and guided tree search for automated theorem proving. arXiv preprint arXiv:2412.20735, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.823, + 0.827, + 0.879 + ], + "angle": 0, + "content": "[424] Yang Li, Youssef Emad, Karthik Padthe, Jack Lanchantin, Weizhe Yuan, Thao Nguyen, Jason Weston, Shang-Wen Li, Dong Wang, Ilia Kulikov, et al. Naturalthoughts: Selecting and distilling reasoning traces for general reasoning tasks. arXiv preprint arXiv:2507.01921, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.883, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[425] Yifei Li, Zeqi Lin, Shizhuo Zhang, Qiang Fu, Bei Chen, Jian-Guang Lou, and Weizhu Chen. Making language models better reasoners with step-aware verifier. In Anna Rogers, Jordan" + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.828, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "63" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.221, + 0.091, + 0.828, + 0.15 + ], + "angle": 0, + "content": "Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 5315-5333, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.291. URL https://aclanthology.org/2023.acl-long.291/." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.154, + 0.828, + 0.21 + ], + "angle": 0, + "content": "[426] Yiwei Li, Ji Zhang, Shaoxiong Feng, Peiwen Yuan, Xinglin Wang, Jiayi Shi, Yueqi Zhang, Chuyi Tan, Boyuan Pan, Yao Hu, et al. Revisiting self-consistency from dynamic distributional alignment perspective on answer aggregation. arXiv preprint arXiv:2502.19830, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.217, + 0.828, + 0.302 + ], + "angle": 0, + "content": "[427] Yujia Li, David Choi, Junyoung Chung, Nate Kushman, Julian Schrittwieser, Rémi Leblond, Tom Eccles, James Keeling, Felix Gimeno, Agustin Dal Lago, Thomas Hubert, Peter Choy, Cyprien de Masson d'Autume, Igor Babuschkin, Xinyun Chen, Po-Sen Huang, Johannes Welbl, Sven Gowal, Alexey Cherepanov, James Molloy, Daniel Mankowitz, Esme Sutherland Robson, Pushmeet Kohli, Nando de Freitas, Koray Kavukcuoglu, and Oriol Vinyals. Competition-level code generation with alphabet. arXiv preprint arXiv:2203.07814, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.307, + 0.828, + 0.351 + ], + "angle": 0, + "content": "[428] Yunxin Li, Zhenyu Liu, Zitao Li, Xuanyu Zhang, Zhenran Xu, Xinyu Chen, Haoyuan Shi, Shenyuan Jiang, Xintong Wang, Jifang Wang, et al. Perception, reason, think, and plan: A survey on large multimodal reasoning models. arXiv preprint arXiv:2505.04921, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.355, + 0.825, + 0.384 + ], + "angle": 0, + "content": "[429] Zheng Li, Qingxiu Dong, Jingyuan Ma, Di Zhang, and Zhifang Sui. Selfbudgeter: Adaptive token allocation for efficient llm reasoning. arXiv preprint arXiv:2505.11274, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.39, + 0.828, + 0.447 + ], + "angle": 0, + "content": "[430] Zhiyuan Li, Hong Liu, Denny Zhou, and Tengyu Ma. Chain of thought empowers transformers to solve inherently serial problems. In The Twelfth International Conference on Learning Representations, January 2023. URL https://openreview.net/pdf?id=3EWTEy9MTM." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.453, + 0.828, + 0.537 + ], + "angle": 0, + "content": "[431] Zhiyuan Li, Dongnan Liu, Chaoyi Zhang, Heng Wang, Tengfei Xue, and Weidong Cai. Enhancing advanced visual reasoning ability of large language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 1915-1929, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.114. URL https://aclanthology.org/2024.emnlp-main.114/." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.543, + 0.828, + 0.587 + ], + "angle": 0, + "content": "[432] Zhong-Zhi Li, Duzhen Zhang, Ming-Liang Zhang, Jiaxin Zhang, Zengyan Liu, Yuxuan Yao, Haotian Xu, Junhao Zheng, Pei-Jie Wang, Xiuyi Chen, et al. From system 1 to system 2: A survey of reasoning large language models. arXiv preprint arXiv:2502.17419, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.592, + 0.828, + 0.689 + ], + "angle": 0, + "content": "[433] Zhongzhi Li, Ming-Liang Zhang, Pei-Jie Wang, Jian Xu, Rui-Song Zhang, Yin Fei, Zhi-Long Ji, Jin-Feng Bai, Zhen-Ru Pan, Jiaxin Zhang, and Cheng-Lin Liu. CMMaTH: A Chinese multi-modal math skill evaluation benchmark for foundation models. In Owen Rambow, Leo Wanner, Marianna Apidianaki, Hend Al-Khalifa, Barbara Di Eugenio, and Steven Schockaert, editors, Proceedings of the 31st International Conference on Computational Linguistics, pages 2690–2726, Abu Dhabi, UAE, January 2025. Association for Computational Linguistics. URL https://aclanthology.org/2025.coling-main.184/." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.696, + 0.828, + 0.74 + ], + "angle": 0, + "content": "[434] Zhuoqun Li, Haiyang Yu, Xuanang Chen, Hongyu Lin, Yaojie Lu, Fei Huang, Xianpei Han, Yongbin Li, and Le Sun. Deepsolution: Boosting complex engineering solution design via tree-based exploration and bi-point thinking. arXiv preprint arXiv:2502.20730, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.744, + 0.828, + 0.801 + ], + "angle": 0, + "content": "[435] Zichao Li, Xueru Wen, Jie Lou, Yuqiu Ji, Yaojie Lu, Xianpei Han, Debing Zhang, and Le Sun. The devil is in the details: Tackling unimodal spurious correlations for generalizable multimodal reward models. In *Forty-second International Conference on Machine Learning*, 2025. URL https://openreview.net/forum?id=b0qRSUcQP7." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.807, + 0.828, + 0.864 + ], + "angle": 0, + "content": "[436] Ziniu Li, Tian Xu, Yushun Zhang, Zhihang Lin, Yang Yu, Ruoyu Sun, and Zhi-Quan Luo. Remax: A simple, effective, and efficient reinforcement learning method for aligning large language models. In *Forty-first International Conference on Machine Learning*, May 2024. URL https://openreview.net/forum?id=Stn8hXkpe6." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.869, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[437] Jing Liang, Hongyao Tang, Yi Ma, Jinyi Liu, Yan Zheng, Shuyue Hu, Lei Bai, and Jianye Hao. Squeeze the soaked sponge: Efficient off-policy reinforcement finetuning for large language model. arXiv preprint arXiv:2507.06892, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.828, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "64" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[438] Jintao Liang, Gang Su, Huifeng Lin, You Wu, Rui Zhao, and Ziyue Li. Reasoning rag via system 1 or system 2: A survey on reasoning agentic retrieval-augmented generation for industry challenges. arXiv preprint arXiv:2506.10408, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.139, + 0.826, + 0.182 + ], + "angle": 0, + "content": "[439] Xiao Liang, Zhong-Zhi Li, Yeyun Gong, Yang Wang, Hengyuan Zhang, Yelong Shen, Ying Nian Wu, and Weizhu Chen. Sws: Self-aware weakness-driven problem synthesis in reinforcement learning for llm reasoning. arXiv preprint arXiv:2506.08989, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.187, + 0.826, + 0.23 + ], + "angle": 0, + "content": "[440] Xun Liang, Shichao Song, Zifan Zheng, Hanyu Wang, Qingchen Yu, Xunkai Li, Rong-Hua Li, Yi Wang, Zhonghao Wang, Feiyu Xiong, et al. Internal consistency and self-feedback in large language models: A survey. arXiv preprint arXiv:2407.14507, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.235, + 0.826, + 0.277 + ], + "angle": 0, + "content": "[441] Baohao Liao, Xinyi Chen, Sara Rajaee, Yuhui Xu, Christian Herold, Anders Søgaard, Maarten de Rijke, and Christof Monz. Lost at the beginning of reasoning. arXiv preprint arXiv:2506.22058, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.282, + 0.826, + 0.325 + ], + "angle": 0, + "content": "[442] Baohao Liao, Yuhui Xu, Hanze Dong, Junnan Li, Christof Monz, Silvio Savarese, Doyen Sahoo, and Caiming Xiong. Reward-guided speculative decoding for efficient ltm reasoning. arXiv preprint arXiv:2501.19324, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.33, + 0.826, + 0.401 + ], + "angle": 0, + "content": "[443] Huanxuan Liao, Shizhu He, Yupu Hao, Xiang Li, Yanzhe Zhang, Jun Zhao, and Kang Liu. Skintern: Internalizing symbolic knowledge for distilling better cot capabilities into small language models. In Proceedings of the 31st International Conference on Computational Linguistics, pages 3203-3221, January 2025. URL https://aclanthology.org/2025.coling-main.215.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.406, + 0.826, + 0.449 + ], + "angle": 0, + "content": "[444] Mengqi Liao, Xiangyu Xi, Ruinian Chen, Jia Leng, Yangen Hu, Ke Zeng, Shuai Liu, and Huaiyu Wan. Enhancing efficiency and exploration in reinforcement learning for llms. arXiv preprint arXiv:2505.18573, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.453, + 0.826, + 0.483 + ], + "angle": 0, + "content": "[445] Minpeng Liao, Wei Luo, Chengxi Li, Jing Wu, and Kai Fan. Mario: Math reasoning with code interpreter output-a reproducible pipeline. arXiv preprint arXiv:2401.08190, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.488, + 0.826, + 0.517 + ], + "angle": 0, + "content": "[446] Weibin Liao, Xu Chu, and Yasha Wang. Tpo: Aligning large language models with multi-branch & multi-step preference trees. arXiv preprint arXiv:2410.12854, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.521, + 0.826, + 0.579 + ], + "angle": 0, + "content": "[447] Jonathan Light, Min Cai, Weiqin Chen, Guanzhi Wang, Xiusi Chen, Wei Cheng, Yisong Yue, and Ziniu Hu. Strategist: Learning strategic skills by LLMs via bi-level tree search. In Automated Reinforcement Learning: Exploring Meta-Learning, AutoML, and LLMs, June 2024. URL https://openreview.net/forum?id=UHWbmZuJPF." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.583, + 0.826, + 0.625 + ], + "angle": 0, + "content": "[448] Jonathan Light, Yue Wu, Yiyou Sun, Wenchao Yu, Xujiang Zhao, Ziniu Hu, Haifeng Chen, Wei Cheng, et al. Scattered forest search: Smarter code space exploration with llms. arXiv preprint arXiv:2411.05010, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.63, + 0.826, + 0.688 + ], + "angle": 0, + "content": "[449] Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=v8L0pN6EOi." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.692, + 0.826, + 0.735 + ], + "angle": 0, + "content": "[450] Bill Yuchen Lin, Ronan Le Bras, Kyle Richardson, Ashish Sabharwal, Radha Poovendran, Peter Clark, and Yejin Choi. Zebralogic: On the scaling limits of lms for logical reasoning. arXiv preprint arXiv:2502.01100, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.74, + 0.826, + 0.77 + ], + "angle": 0, + "content": "[451] Haohan Lin, Zhiqing Sun, Yiming Yang, and Sean Welleck. Lean-star: Learning to interleave thinking and proving. arXiv preprint arXiv:2407.10040, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.774, + 0.826, + 0.817 + ], + "angle": 0, + "content": "[452] Qingwen Lin, Boyan Xu, Guimin Hu, Zijian Li, Zhifeng Hao, Keli Zhang, and Ruichu Cai. Cmcts: A constrained monte carlo tree search framework for mathematical reasoning in large language model. arXiv preprint arXiv:2502.11169, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.822, + 0.826, + 0.865 + ], + "angle": 0, + "content": "[453] Qingwen Lin, Boyan Xu, Zijian Li, Zhifeng Hao, Keli Zhang, and Ruichu Cai. Leveraging constrained monte carlo tree search to generate reliable long chain-of-thought for mathematical reasoning. arXiv preprint arXiv:2502.11169, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.87, + 0.826, + 0.913 + ], + "angle": 0, + "content": "[454] Yen-Ting Lin, Di Jin, Tengyu Xu, Tianhao Wu, Sainbayar Sukhbaatar, Chen Zhu, Yun He, Yun-Nung Chen, Jason Weston, Yuandong Tian, et al. Step-kto: Optimizing mathematical reasoning through stepwise binary feedback. arXiv preprint arXiv:2501.10799, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "65" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.829, + 0.135 + ], + "angle": 0, + "content": "[455] Yujie Lin, Ante Wang, Moye Chen, Jingyao Liu, Hao Liu, Jinsong Su, and Xinyan Xiao. Investigating inference-time scaling for chain of multi-modal thought: A preliminary study. arXiv preprint arXiv:2502.11514, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.139, + 0.83, + 0.224 + ], + "angle": 0, + "content": "[456] Zicheng Lin, Zhibin Gou, Tian Liang, Ruilin Luo, Haowei Liu, and Yujiu Yang. CriticBench: Benchmarking LLMs for critique-correct reasoning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 1552–1587, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.91. URL https://aclanthology.org/2024-findings-acl.91/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.228, + 0.826, + 0.274 + ], + "angle": 0, + "content": "[457] Zicheng Lin, Tian Liang, Jiahao Xu, Xing Wang, Ruilin Luo, Chufan Shi, Siheng Li, Yujiu Yang, and Zhaopeng Tu. Critical tokens matter: Token-level contrastive estimation enhance llm's reasoning capability. arXiv preprint arXiv:2411.19943, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.277, + 0.827, + 0.32 + ], + "angle": 0, + "content": "[458] Zongyu Lin, Yao Tang, Xingcheng Yao, Da Yin, Ziniu Hu, Yizhou Sun, and Kai-Wei Chang. Qlass: Boosting language agent inference via q-guided stepwise search. arXiv preprint arXiv:2502.02584, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.325, + 0.825, + 0.368 + ], + "angle": 0, + "content": "[459] Zehui Ling, Deshu Chen, Hongwei Zhang, Yifeng Jiao, Xin Guo, and Yuan Cheng. Fast on the easy, deep on the hard: Efficient reasoning via powered length penalty. arXiv preprint arXiv:2506.10446, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.373, + 0.827, + 0.458 + ], + "angle": 0, + "content": "[460] Zhan Ling, Yunhao Fang, Xuanlin Li, Zhiao Huang, Mingu Lee, Roland Memisevic, and Hao Su. Deductive verification of chain-of-thought reasoning. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 36407-36433. Curran Associates, Inc., September 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/72393bd47a35f5b3bee4c609e7bba733-Paper-Conference.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.463, + 0.824, + 0.493 + ], + "angle": 0, + "content": "[461] Philip Lippmann and Jie Yang. Style over substance: Distilled language models reason via stylistic replication. arXiv preprint arXiv:2504.01738, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.497, + 0.827, + 0.596 + ], + "angle": 0, + "content": "[462] Aiwei Liu, Haoping Bai, Zhiyun Lu, Xiang Kong, Xiaoming Wang, Jiulong Shan, Meng Cao, and Lijie Wen. Direct large language model alignment through self-rewarding contrastive prompt distillation. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9688–9712, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.523. URL https://aclanthology.org/2024.acl-long.523/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.601, + 0.827, + 0.644 + ], + "angle": 0, + "content": "[463] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.649, + 0.827, + 0.692 + ], + "angle": 0, + "content": "[464] Bingbin Liu, Sebastien Bubeck, Ronen Eldan, Janardhan Kulkarni, Yanzhi Li, Anh Nguyen, Rachel Ward, and Yi Zhang. Tinygsm: achieving \\(>80\\%\\) on gsm8k with small language models. arXiv preprint arXiv:2312.09241, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.697, + 0.829, + 0.753 + ], + "angle": 0, + "content": "[465] Bo Liu, Leon Guertler, Simon Yu, Zichen Liu, Penghui Qi, Daniel Balcells, Mickel Liu, Cheston Tan, Weiyan Shi, Min Lin, et al. Spiral: Self-play on zero-sum games incentivizes reasoning via multi-agent multi-turn reinforcement learning. arXiv preprint arXiv:2506.24119, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.759, + 0.827, + 0.802 + ], + "angle": 0, + "content": "[466] Chris Yuhao Liu, Liang Zeng, Jiacai Liu, Rui Yan, Jujie He, Chaojie Wang, Shuicheng Yan, Yang Liu, and Yahui Zhou. Skywork-reward: Bag of tricks for reward modeling in llms. arXiv preprint arXiv:2410.18451, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.807, + 0.827, + 0.851 + ], + "angle": 0, + "content": "[467] Chris Yuhao Liu, Liang Zeng, Yuzhen Xiao, Jujie He, Jiacai Liu, Chaojie Wang, Rui Yan, Wei Shen, Fuxiang Zhang, Jiacheng Xu, et al. Skywork-reward-v2: Scaling preference data curation via human-ai synergy. arXiv preprint arXiv:2507.01352, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.855, + 0.829, + 0.912 + ], + "angle": 0, + "content": "[468] Cong Liu, Zhong Wang, ShengYu Shen, Jialiang Peng, Xiaoli Zhang, Zhen-Dong Du, and YaFang Wang. The chinese dataset distilled from deepseek-r1-671b. https://huggingface.co/datasets/Congliu/Chinese-DeepSeek-R1-Distill-data-110k, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.83, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.937, + 0.509, + 0.948 + ], + "angle": 0, + "content": "66" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[469] Dancheng Liu, Amir Nassereldine, Ziming Yang, Chenhui Xu, Yuting Hu, Jiajie Li, Utkarsh Kumar, Changjae Lee, Ruiyang Qin, Yiyu Shi, et al. Large language models have intrinsic self-correction ability. arXiv preprint arXiv:2406.15673, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.139, + 0.825, + 0.169 + ], + "angle": 0, + "content": "[470] Fan Liu, Wenshuo Chao, Naiqiang Tan, and Hao Liu. Bag of tricks for inference-time computation of lIm reasoning. arXiv preprint arXiv:2502.07191, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.173, + 0.825, + 0.217 + ], + "angle": 0, + "content": "[471] Guanlin Liu, Kaixuan Ji, Renjie Zheng, Zheng Wu, Chen Dun, Quanquan Gu, and Lin Yan. Enhancing multi-step reasoning abilities of language models through direct q-function optimization. arXiv preprint arXiv:2410.09302, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.221, + 0.827, + 0.264 + ], + "angle": 0, + "content": "[472] Hanbing Liu, Lang Cao, Yuanyi Ren, Mengyu Zhou, Haoyu Dong, Xiaojun Ma, Shi Han, and Dongmei Zhang. Bingo: Boosting efficient reasoning of llms via dynamic and significance-based reinforcement learning. arXiv preprint arXiv:2506.08125, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.269, + 0.827, + 0.311 + ], + "angle": 0, + "content": "[473] Hanmeng Liu, Zhizhang Fu, Mengru Ding, Ruoxi Ning, Chaoli Zhang, Xiaozhang Liu, and Yue Zhang. Logical reasoning in large language models: A survey. arXiv preprint arXiv:2502.09100, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.317, + 0.827, + 0.358 + ], + "angle": 0, + "content": "[474] Hao Liu, Zhengren Wang, Xi Chen, Zhiyu Li, Feiyu Xiong, Qinhan Yu, and Wentao Zhang. Hoprag: Multi-hop reasoning for logic-aware retrieval-augmented generation. arXiv preprint arXiv:2502.12442, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.365, + 0.827, + 0.422 + ], + "angle": 0, + "content": "[475] Hongxuan Liu, Zhiyao Luo, and Tingting Zhu. Best of both worlds: Harmonizing LLM capabilities in decision-making and question-answering for treatment regimes. In Advances In Medical Foundation Models: Explainability, Robustness, Security, and Beyond, 2024. URL https://openreview.net/forum?id=afu9qhp7md." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.426, + 0.825, + 0.469 + ], + "angle": 0, + "content": "[476] Jiacai Liu, Chaojie Wang, Chris Yuhao Liu, Liang Zeng, Rui Yan, Yiwen Sun, Yang Liu, and Yahui Zhou. Improving multi-step reasoning abilities of large language models with direct advantage policy optimization. arXiv preprint arXiv:2412.18279, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.474, + 0.825, + 0.531 + ], + "angle": 0, + "content": "[477] Jiacheng Liu, Andrew Cohen, Ramakanth Pasunuru, Yejin Choi, Hannaneh Hajishirzi, and Asli Celikyilmaz. Don't throw away your value model! generating more preferable text with value-guided monte-carlo tree search decoding. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=kh9Zt2Ldmn." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.536, + 0.827, + 0.579 + ], + "angle": 0, + "content": "[478] Jiacheng Liu, Andrew Cohen, Ramakanth Pasunuru, Yejin Choi, Hannaneh Hajishirzi, and Asli Celikyilmaz. Making PPO even better: Value-guided monte-carlo tree search decoding, September 2024. URL https://openreview.net/forum?id=QaODpeRaOK." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.583, + 0.827, + 0.626 + ], + "angle": 0, + "content": "[479] Junnan Liu, Hongwei Liu, Linchen Xiao, Shudong Liu, Taolin Zhang, Zihan Ma, Songyang Zhang, and Kai Chen. Deciphering trajectory-aided lIm reasoning: An optimization perspective. arXiv preprint arXiv:2505.19815, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.631, + 0.827, + 0.673 + ], + "angle": 0, + "content": "[480] Junnan Liu, Linhao Luo, Thuy-Trang Vu, and Gholamreza Haffari. Situatedthinker: Grounding llm reasoning with real-world through situated thinking. arXiv preprint arXiv:2505.19300, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.679, + 0.825, + 0.723 + ], + "angle": 0, + "content": "[481] Junteng Liu, Yuanxiang Fan, Zhuo Jiang, Han Ding, Yongyi Hu, Chi Zhang, Yiqi Shi, Shitong Weng, Aili Chen, Shiqi Chen, et al. Synlogic: Synthesizing verifiable reasoning data at scale for learning logical reasoning and beyond. arXiv preprint arXiv:2505.19641, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.727, + 0.825, + 0.77 + ], + "angle": 0, + "content": "[482] Liping Liu, Chunhong Zhang, Likang Wu, Chuang Zhao, Zheng Hu, Ming He, and Jianping Fan. Instruct-of-reflection: Enhancing large language models iterative reflection capabilities via dynamic-meta instruction. arXiv preprint arXiv:2503.00902, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.774, + 0.825, + 0.817 + ], + "angle": 0, + "content": "[483] Mingjie Liu, Shizhe Diao, Ximing Lu, Jian Hu, Xin Dong, Yejin Choi, Jan Kautz, and Yi Dong. Prorl: Prolonged reinforcement learning expands reasoning boundaries in large language models. arXiv preprint arXiv:2505.24864, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.822, + 0.827, + 0.865 + ], + "angle": 0, + "content": "[484] Qiang Liu, Xinlong Chen, Yue Ding, Shizhen Xu, Shu Wu, and Liang Wang. Attention-guided self-reflection for zero-shot hallucination detection in large language models. arXiv preprint arXiv:2501.09997, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[485] Qin Liu, Wenxuan Zhou, Nan Xu, James Y Huang, Fei Wang, Sheng Zhang, Hoifung Poon, and Muhao Chen. Metascale: Test-time scaling with evolving meta-thoughts. arXiv preprint arXiv:2503.13447, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "67" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.091, + 0.826, + 0.134 + ], + "angle": 0, + "content": "[486] Runze Liu, Junqi Gao, Jian Zhao, Kaiyan Zhang, Xiu Li, Biqing Qi, Wanli Ouyang, and Bowen Zhou. Can 1b llm surpass 405b llm? rethinking compute-optimal test-time scaling. arXiv preprint arXiv:2502.06703, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.137, + 0.825, + 0.18 + ], + "angle": 0, + "content": "[487] Tengxuan Liu, Shiyao Li, Jiayi Yang, Tianchen Zhao, Feng Zhou, Xiaohui Song, Guohao Dai, Shengen Yan, Huazhong Yang, and Yu Wang. Pm-kvq: Progressive mixed-precision kv cache quantization for long-cot llms. arXiv preprint arXiv:2505.18610, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.183, + 0.827, + 0.225 + ], + "angle": 0, + "content": "[488] Wanlong Liu, Junxiao Xu, Fei Yu, Yukang Lin, Ke Ji, Wenyu Chen, Yan Xu, Yasheng Wang, Lifeng Shang, and Benyou Wang. Qfft, question-free fine-tuning for adaptive reasoning. arXiv preprint arXiv:2506.12860, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.228, + 0.825, + 0.258 + ], + "angle": 0, + "content": "[489] Wei Liu, Junlong Li, Xiwen Zhang, Fan Zhou, Yu Cheng, and Junxian He. Diving into self-evolving training for multimodal reasoning. arXiv preprint arXiv:2412.17451, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.261, + 0.827, + 0.302 + ], + "angle": 0, + "content": "[490] Wei Liu, Ruochen Zhou, Yiyun Deng, Yuzhen Huang, Junteng Liu, Yuntian Deng, Yizhe Zhang, and Junxian He. Learn to reason efficiently with adaptive length-based reward shaping. arXiv preprint arXiv:2505.15612, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.306, + 0.825, + 0.335 + ], + "angle": 0, + "content": "[491] Ye Liu, Kevin Qinghong Lin, Chang Wen Chen, and Mike Zheng Shou. Videomind: A chain-of-lora agent for long video reasoning. arXiv preprint arXiv:2503.13444, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.338, + 0.827, + 0.38 + ], + "angle": 0, + "content": "[492] Yongjiang Liu, Haoxi Li, Xiaosong Ma, Jie Zhang, and Song Guo. Think how to think: Mitigating overthinking with autonomous difficulty cognition in large reasoning models. arXiv preprint arXiv:2507.02663, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.383, + 0.827, + 0.425 + ], + "angle": 0, + "content": "[493] Yue Liu, Hongcheng Gao, Shengfang Zhai, Jun Xia, Tianyi Wu, Zhiwei Xue, Yulin Chen, Kenji Kawaguchi, Jiaheng Zhang, and Bryan Hooi. Guardreasoner: Towards reasoning-based llm safeguards. arXiv preprint arXiv:2501.18492, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.429, + 0.827, + 0.47 + ], + "angle": 0, + "content": "[494] Yue Liu, Jiaying Wu, Yufei He, Hongcheng Gao, Hongyu Chen, Baolong Bi, Ruihan Gong, Jiaheng Zhang, Zhiqi Huang, and Bryan Hooi. Efficient inference for large reasoning models: A survey. arXiv preprint arXiv:2503.23077, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.474, + 0.825, + 0.516 + ], + "angle": 0, + "content": "[495] Yuliang Liu, Junjie Lu, Zhaoling Chen, Chaofeng Qu, Jason Klein Liu, Chonghan Liu, Zefan Cai, Yunhui Xia, Li Zhao, Jiang Bian, et al. Adaptivestep: Automatically dividing reasoning step through model confidence. arXiv preprint arXiv:2502.13943, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.52, + 0.827, + 0.562 + ], + "angle": 0, + "content": "[496] Zhaowei Liu, Xin Guo, Fangqi Lou, Lingfeng Zeng, Jinyi Niu, Zixuan Wang, Jiajie Xu, Weige Cai, Ziwei Yang, Xueqian Zhao, et al. Fin-r1: A large language model for financial reasoning through reinforcement learning. arXiv preprint arXiv:2503.16252, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.565, + 0.827, + 0.607 + ], + "angle": 0, + "content": "[497] Zhiyuan Liu, Yuting Zhang, Feng Liu, Changwang Zhang, Ying Sun, and Jun Wang. Othinkmr1: Stimulating multimodal generalized reasoning capabilities through dynamic reinforcement learning. arXiv preprint arXiv:2503.16081, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.611, + 0.829, + 0.653 + ], + "angle": 0, + "content": "[498] Zichen Liu, Changyu Chen, Wenjun Li, Tianyu Pang, Chao Du, and Min Lin. There may not be aha moment in r1-zero-like training — a pilot study. https://oatllm.notion.site/oat-zero, 2025. Notion Blog." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.656, + 0.827, + 0.697 + ], + "angle": 0, + "content": "[499] Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.702, + 0.827, + 0.743 + ], + "angle": 0, + "content": "[500] Zihan Liu, Yang Chen, Mohammad Shoeybi, Bryan Catanzaro, and Wei Ping. Acemath: Advancing frontier math reasoning with post-training and reward modeling. arXiv preprint arXiv:2412.15084, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.747, + 0.827, + 0.788 + ], + "angle": 0, + "content": "[501] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.793, + 0.825, + 0.822 + ], + "angle": 0, + "content": "[502] Elita Lobo, Chirag Agarwal, and Himabindu Lakkaraju. On the impact of fine-tuning on chain-of-thought reasoning. arXiv preprint arXiv:2411.15382, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.825, + 0.827, + 0.867 + ], + "angle": 0, + "content": "[503] Chenwei Lou, Zewei Sun, Xinnian Liang, Meng Qu, Wei Shen, Wenqi Wang, Yuntao Li, Qingping Yang, and Shuangzhi Wu. Adacot: Pareto-optimal adaptive chain-of-thought triggering via reinforcement learning. arXiv preprint arXiv:2505.11896, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.827, + 0.912 + ], + "angle": 0, + "content": "[504] Dakuan Lu, Xiaoyu Tan, Rui Xu, Tianchu Yao, Chao Qu, Wei Chu, Yinghui Xu, and Yuan Qi. Scp-116k: A high-quality problem-solution dataset and a generalized pipeline for automated extraction in the higher education science domain, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.829, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "68" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[505] Haolang Lu, Yilian Liu, Jingxin Xu, Guoshun Nan, Yuanlong Yu, Zhican Chen, and Kun Wang. Auditing meta-cognitive hallucinations in reasoning large language models. arXiv preprint arXiv:2505.13143, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.138, + 0.827, + 0.237 + ], + "angle": 0, + "content": "[506] Jianqiao Lu, Zhiyang Dou, Hongru WANG, Zeyu Cao, Jianbo Dai, Yunlong Feng, and Zhijiang Guo. Autopsy: Automated process-supervised verifier. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 79935-79962. Curran Associates, Inc., December 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/9246aa822579d9b29a140ecdac36ad60-Paper-Conference.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.24, + 0.827, + 0.311 + ], + "angle": 0, + "content": "[507] Pan Lu, Swaroop Mishra, Tony Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho, editors, Advances in Neural Information Processing Systems, November 2022. URL https://openreview.net/forum?id=HjwK-Tc_Bc." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.314, + 0.828, + 0.384 + ], + "angle": 0, + "content": "[508] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=KUNzEQMWU7." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.389, + 0.827, + 0.431 + ], + "angle": 0, + "content": "[509] Pan Lu, Bowen Chen, Sheng Liu, Rahul Thapa, Joseph Boen, and James Zou. Octo tools: An agentic framework with extensible tools for complex reasoning. arXiv preprint arXiv:2502.11271, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.435, + 0.826, + 0.479 + ], + "angle": 0, + "content": "[510] Rubing Lu, João Sedoc, and Arun Sundararajan. Reasoning and the trusting behavior of deepseek and gpt: An experiment revealing hidden fault lines in large language models. arXiv preprint arXiv:2502.12825, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.482, + 0.826, + 0.511 + ], + "angle": 0, + "content": "[511] Wenquan Lu, Yuechuan Yang, Kyle Lee, Yanshu Li, and Enqi Liu. Latent chain-of-thought? decoding the depth-recurrent transformer. arXiv preprint arXiv:2507.02199, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.515, + 0.827, + 0.558 + ], + "angle": 0, + "content": "[512] Zhengxi Lu, Yuxiang Chai, Yaxuan Guo, Xi Yin, Liang Liu, Hao Wang, Guanjing Xiong, and Hongsheng Li. Ui-r1: Enhancing action prediction of gui agents by reinforcement learning. arXiv preprint arXiv:2503.21620, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.562, + 0.827, + 0.605 + ], + "angle": 0, + "content": "[513] Zimu Lu, Aojun Zhou, Houxing Ren, Ke Wang, Weikang Shi, Junting Pan, Mingjie Zhan, and Hongsheng Li. Mathgenie: Generating synthetic data with question back-translation for enhancing mathematical reasoning of llms. arXiv preprint arXiv:2402.16352, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.609, + 0.827, + 0.664 + ], + "angle": 0, + "content": "[514] Haipeng Luo, Qingfeng Sun, Can Xu, Pu Zhao, Jianguang Lou, Chongyang Tao, Xiubo Geng, Qingwei Lin, Shifeng Chen, and Dongmei Zhang. Wizardmath: Empowering mathematical reasoning for large language models via reinforced evol-instruct. arXiv preprint arXiv:2308.09583, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.669, + 0.827, + 0.712 + ], + "angle": 0, + "content": "[515] Hanjun Luo, Shenyu Dai, Chiming Ni, Xinfeng Li, Guibin Zhang, Kun Wang, Tongliang Liu, and Hanan Salam. Agent auditor: Human-level safety and security evaluation for lIm agents. arXiv preprint arXiv:2506.00641, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.716, + 0.825, + 0.759 + ], + "angle": 0, + "content": "[516] Haotian Luo, Li Shen, Haiying He, Yibo Wang, Shiwei Liu, Wei Li, Naiqiang Tan, Xiaochun Cao, and Dacheng Tao. O1-pruner: Length-harmonizing fine-tuning for o1-like reasoning pruning. arXiv preprint arXiv:2501.12570, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.763, + 0.827, + 0.805 + ], + "angle": 0, + "content": "[517] Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, et al. Improve mathematical reasoning in language models by automated process supervision. arXiv preprint arXiv:2406.06592, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.809, + 0.827, + 0.866 + ], + "angle": 0, + "content": "[518] Michael Luo, Sijun Tan, Justin Wong, Xiaoxiang Shi, William Y. Tang, Manan Roongta, Colin Cai, Jeffrey Luo, Tianjun Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. Deepscaler: Surpassing o1-preview with a 1.5b model by scaling rl, February 2025. URL https://github.com/agentica-project/rllm. Notion Blog." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[519] Ruilin Luo, Zhuofan Zheng, Yifan Wang, Yiyao Yu, Xinzhe Ni, Zicheng Lin, Jin Zeng, and Yujiu Yang. Ursa: Understanding and verifying chain-of-thought reasoning in multimodal mathematics. arXiv preprint arXiv:2501.04686, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.828, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "69" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.091, + 0.829, + 0.189 + ], + "angle": 0, + "content": "[520] Xianzhen Luo, Qingfu Zhu, Zhiming Zhang, Libo Qin, Xuanyu Zhang, Qing Yang, Dongliang Xu, and Wanxiang Che. Python is not always the best choice: Embracing multilingual program of thoughts. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 7185-7212, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.408. URL https://aclanthology.org/2024.emnlp-main.408/." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.194, + 0.829, + 0.237 + ], + "angle": 0, + "content": "[521] Yijia Luo, Yulin Song, Xingyao Zhang, Jiaheng Liu, Weixun Wang, GengRu Chen, Wenbo Su, and Bo Zheng. Deconstructing long chain-of-thought: A structured reasoning optimization framework for long cot distillation. arXiv preprint arXiv:2503.16385, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.24, + 0.826, + 0.284 + ], + "angle": 0, + "content": "[522] Chengqi Lyu, Songyang Gao, Yuzhe Gu, Wenwei Zhang, Jianfei Gao, Kuikun Liu, Ziyi Wang, Shuaibin Li, Qian Zhao, Haian Huang, et al. Exploring the limit of outcome reward for learning mathematical reasoning. arXiv preprint arXiv:2502.06781, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.287, + 0.829, + 0.399 + ], + "angle": 0, + "content": "[523] Qing Lyu, Shreya Havaldar, Adam Stein, Li Zhang, Delip Rao, Eric Wong, Marianna Apidianaki, and Chris Callison-Burch. Faithful chain-of-thought reasoning. In Jong C. Park, Yuki Arase, Baotian Hu, Wei Lu, Derry Wijaya, Ayu Purwarianti, and Adila Alfa Krisnadhi, editors, Proceedings of the 13th International Joint Conference on Natural Language Processing and the 3rd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pages 305-329, Nusa Dua, Bali, November 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.ijcnlp-main.20. URL https://aclanthology.org/2023.ijcnlp-main.20/." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.403, + 0.826, + 0.487 + ], + "angle": 0, + "content": "[524] Alexander Lyzhov, Yuliya Molchanova, Armenii Ashukha, Dmitry Molchanov, and Dmitry Vetrov. Greedy policy search: A simple baseline for learnable test-time augmentation. In Jonas Peters and David Sontag, editors, Proceedings of the 36th Conference on Uncertainty in Artificial Intelligence (UAI), volume 124 of Proceedings of Machine Learning Research, pages 1308-1317. PMLR, 03-06 Aug 2020. URL https://proceedings.mlr.press/v124/lyzhov20a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.491, + 0.825, + 0.521 + ], + "angle": 0, + "content": "[525] Jingyuan Ma, Rui Li, Zheng Li, Junfeng Liu, Lei Sha, and Zhifang Sui. Hauntattack: When attack follows reasoning as a shadow. arXiv preprint arXiv:2506.07031, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.524, + 0.829, + 0.567 + ], + "angle": 0, + "content": "[526] Lu Ma, Hao Liang, Meiyi Qiang, Lexiang Tang, Xiaochen Ma, Zhen Hao Wong, Junbo Niu, Chengyu Shen, Running He, Bin Cui, et al. Learning what reinforcement learning can't: Interleaved online fine-tuning for hardest questions. arXiv preprint arXiv:2506.07527, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.57, + 0.826, + 0.613 + ], + "angle": 0, + "content": "[527] Nanye Ma, Shangyuan Tong, Haolin Jia, Hexiang Hu, Yu-Chuan Su, Mingda Zhang, Xuan Yang, Yandong Li, Tommi Jaakkola, Xuhui Jia, et al. Inference-time scaling for diffusion models beyond scaling denoising steps. arXiv preprint arXiv:2501.09732, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.617, + 0.829, + 0.659 + ], + "angle": 0, + "content": "[528] Qianli Ma, Haotian Zhou, Tingkai Liu, Jianbo Yuan, Pengfei Liu, Yang You, and Hongxia Yang. Let's reward step by step: Step-level reward model as the navigators for reasoning. arXiv preprint arXiv:2310.10080, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.664, + 0.829, + 0.707 + ], + "angle": 0, + "content": "[529] Ruotian Ma, Peisong Wang, Cheng Liu, Xingyan Liu, Jiaqi Chen, Bang Zhang, Xin Zhou, Nan Du, and Jia Li. \\(S^2 r\\): Teaching llms to self-verify and self-correct via reinforcement learning. arXiv preprint arXiv:2502.12853, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.711, + 0.829, + 0.74 + ], + "angle": 0, + "content": "[530] Xinyin Ma, Guangnian Wan, Runpeng Yu, Gongfan Fang, and Xinchao Wang. Cot-valve: Length-compressible chain-of-thought tuning. arXiv preprint arXiv:2502.09601, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.744, + 0.829, + 0.785 + ], + "angle": 0, + "content": "[531] Xueguang Ma, Qian Liu, Dongfu Jiang, Ge Zhang, Zejun Ma, and Wenhu Chen. Generalreasoner: Advancing llm reasoning across all domains. arXiv preprint arXiv:2505.14652, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.79, + 0.826, + 0.82 + ], + "angle": 0, + "content": "[532] Xuetao Ma, Wenbin Jiang, and Hua Huang. Problem-solving logic guided curriculum in-context learning for llms complex reasoning. arXiv preprint arXiv:2502.15401, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.824, + 0.826, + 0.866 + ], + "angle": 0, + "content": "[533] Yan Ma, Steffi Chern, Xuyang Shen, Yiran Zhong, and Pengfei Liu. Rethinking rl scaling for vision language models: A transparent, from-scratch framework and comprehensive evaluation scheme. arXiv preprint arXiv:2504.02587, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.829, + 0.913 + ], + "angle": 0, + "content": "[534] Yiran Ma, Zui Chen, Tianqiao Liu, Mi Tian, Zhuo Liu, Zitao Liu, and Weiqi Luo. What are step-level reward models rewarding? counterintuitive findings from mcts-boosted mathematical reasoning. arXiv preprint arXiv:2412.15904, 2024." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.829, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "70" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.829, + 0.133 + ], + "angle": 0, + "content": "[535] Zexiong Ma, Chao Peng, Pengfei Gao, Xiangxin Meng, Yanzhen Zou, and Bing Xie. Sortf: Issue resolving with subtask-oriented reinforced fine-tuning. arXiv preprint arXiv:2502.20127, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.138, + 0.826, + 0.168 + ], + "angle": 0, + "content": "[536] Zeyao Ma, Xiaokang Zhang, Jing Zhang, Jifan Yu, Sijia Luo, and Jie Tang. Dynamic scaling of unit tests for code reward modeling. arXiv preprint arXiv:2501.01054, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.171, + 0.827, + 0.213 + ], + "angle": 0, + "content": "[537] Ziyang Ma, Zhuo Chen, Yuping Wang, Eng Siong Chng, and Xie Chen. Audio-cot: Exploring chain-of-thought reasoning in large audio language model. arXiv preprint arXiv:2501.07246, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.218, + 0.828, + 0.288 + ], + "angle": 0, + "content": "[538] Aman Madaan, Katherine Hermann, and Amir Yazdanbakhsh. What makes chain-of-thought prompting effective? a counterfactual study. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 1448-1535, Singapore, December 2023. URL https://aclanthology.org/2023.findings-emnlp.101.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.292, + 0.828, + 0.404 + ], + "angle": 0, + "content": "[539] Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, Shashank Gupta, Bodhisattwa Prasad Majumder, Katherine Hermann, Sean Welleck, Amir Yazdanbakhsh, and Peter Clark. Self-refine: Iterative refinement with self-feedback. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 46534-46594. Curran Associates, Inc., March 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/91edff07232fb1b55a505a9e9f6c0ff3-Paper-Conference.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.407, + 0.827, + 0.45 + ], + "angle": 0, + "content": "[540] Sathwik Tejaswi Madhusudhan, Shruthan Radhakrishna, Jash Mehta, and Toby Liang. Millions scale dataset distilled from r1-32b. https://huggingface.co/datasets/ServiceNow-AI/R1-Distill-SFT, February 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.454, + 0.827, + 0.497 + ], + "angle": 0, + "content": "[541] Sadegh Mahdavi, Muchen Li, Kaiwen Liu, Christos Thrampoulidis, Leonid Sigal, and Renjie Liao. Leveraging online olympiad-level math problems for llms training and contamination-resistant evaluation. arXiv preprint arXiv:2501.14275, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.501, + 0.825, + 0.53 + ], + "angle": 0, + "content": "[542] Tobias Materzok. Cos (m+ o) s: Curiosity and rl-enhanced mcts for exploring story space via language models. arXiv preprint arXiv:2501.17104, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.534, + 0.827, + 0.591 + ], + "angle": 0, + "content": "[543] Justus Mattern, Sami Jaghourar, Manveer Basra, Jannik Straube, Matthew Di Ferrante, Felix Gabriel, Jack Min Ong, Vincent Weisser, and Johannes Hagemann. Synthetic-1: Two million collaboratively generated reasoning traces from deepseek-r1, 2025. URL https://www.primeintellect.ai/blog/synthetic-1-release." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.595, + 0.827, + 0.636 + ], + "angle": 0, + "content": "[544] Nat McAleese, Rai Michael Pokorny, Juan Felipe Ceron Uribe, Evgenia Nitishinskaya, Maja Trebacz, and Jan Leike. Llm critics help catch llm bugs. arXiv preprint arXiv:2407.00215, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.641, + 0.827, + 0.684 + ], + "angle": 0, + "content": "[545] R Thomas McCoy, Shunyu Yao, Dan Friedman, Mathew D Hardy, and Thomas L Grifths. When a language model is optimized for reasoning, does it still show embers of autoregression? an analysis of openai o1. arXiv preprint arXiv:2410.01792, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.688, + 0.825, + 0.743 + ], + "angle": 0, + "content": "[546] Lingrui Mei, Jiayu Yao, Yuyao Ge, Yiwei Wang, Baolong Bi, Yujun Cai, Jiazhi Liu, Mingyu Li, Zhong-Zhi Li, Duzhen Zhang, Chenlin Zhou, Jiayi Mao, Tianze Xia, Jiafeng Guo, and Shenghua Liu. A survey of context engineering for large language models. arXiv preprint arXiv:2507.13334, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.748, + 0.827, + 0.805 + ], + "angle": 0, + "content": "[547] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, Ping Luo, Yu Qiao, Qiaosheng Zhang, and Wenqi Shao. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.809, + 0.825, + 0.852 + ], + "angle": 0, + "content": "[548] William Merrill and Ashish Sabharwal. The expressive power of transformers with chain of thought. In *The Twelfth International Conference on Learning Representations*, January 2023. URL https://openreview.net/pdf?id=CDmerQ37Zs." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.855, + 0.827, + 0.912 + ], + "angle": 0, + "content": "[549] Ning Miao, Yee Whye Teh, and Tom Rainforth. Selfcheck: Using LLMs to zero-shot check their own step-by-step reasoning. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id= pTHfApDakA." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.829, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.507, + 0.947 + ], + "angle": 0, + "content": "71" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.091, + 0.826, + 0.147 + ], + "angle": 0, + "content": "[550] Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, et al. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems. arXiv preprint arXiv:2412.09413, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.152, + 0.826, + 0.223 + ], + "angle": 0, + "content": "[551] Seyed Iman Mirzadeh, Keivan Alizadeh, Hooman Shahrokhi, Oncel Tuzel, Samy Bengio, and Mehrdad Farajtabar. GSM-symbolic: Understanding the limitations of mathematical reasoning in large language models. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=AjXkRZIvjb." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.226, + 0.826, + 0.27 + ], + "angle": 0, + "content": "[552] Prakamya Mishra, Jiang Liu, Jialian Wu, Xiaodong Yu, Zicheng Liu, and Emad Barsoum. Tttbench: A benchmark for evaluating reasoning ability with simple and novel tic-tac-toe-style games. arXiv preprint arXiv:2506.10209, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.273, + 0.826, + 0.315 + ], + "angle": 0, + "content": "[553] Arindam Mitra, Hamed Khanpour, Corby Rosset, and Ahmed Awadallah. Orca-math: Unlocking the potential of slms in grade school math. arXiv preprint arXiv:2402.14830, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.32, + 0.826, + 0.363 + ], + "angle": 0, + "content": "[554] Chancharik Mitra, Brandon Huang, Trevor Darrell, and Roei Herzig. Compositional chain-of-thought prompting for large multimodal models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14420-14431, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.367, + 0.826, + 0.395 + ], + "angle": 0, + "content": "[555] Purbesh Mitra and Sennur Ulukus. Motif: Modular thinking via reinforcement fine-tuning in llms. arXiv preprint arXiv:2507.02851, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.4, + 0.826, + 0.456 + ], + "angle": 0, + "content": "[556] Shentong Mo and Miao Xin. Tree of uncertain thoughts reasoning for large language models. In ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 12742-12746, April 2024. doi: 10.1109/ICASSP48485.2024.10448355. URL https://ieeexplore.ieee.org/document/10448355." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.46, + 0.826, + 0.489 + ], + "angle": 0, + "content": "[557] Philipp Mondorf and Barbara Plank. Beyond accuracy: Evaluating the reasoning behavior of large language models—a survey. arXiv preprint arXiv:2404.01869, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.493, + 0.826, + 0.591 + ], + "angle": 0, + "content": "[558] Terufumi Morishita, Gaku Morio, Atsuki Yamaguchi, and Yasuhiro Sogawa. Enhancing reasoning capabilities of llms via principled synthetic logic corpus. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 73572-73604. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/8678da90126aa58326b2fc0254b33a8c-Paper-Conference.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.595, + 0.826, + 0.637 + ], + "angle": 0, + "content": "[559] Yongyu Mu, Jiali Zeng, Bei Li, Xinyan Guan, Fandong Meng, Jie Zhou, Tong Xiao, and Jingbo Zhu. Dissecting long reasoning models: An empirical study. arXiv preprint arXiv:2506.04913, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.641, + 0.826, + 0.684 + ], + "angle": 0, + "content": "[560] Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.688, + 0.826, + 0.731 + ], + "angle": 0, + "content": "[561] Tergel Munkhbat, Namgyu Ho, Seohyun Kim, Yongjin Yang, Yujin Kim, and Se-Young Yun. Self-training elicits concise reasoning in large language models. arXiv preprint arXiv:2502.20122, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.735, + 0.826, + 0.763 + ], + "angle": 0, + "content": "[562] Vaskar Nath, Pranav Raja, Claire Yoon, and Sean Hendryx. Toolcomp: A multi-tool reasoning & process supervision benchmark. arXiv preprint arXiv:2501.01290, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.768, + 0.826, + 0.811 + ], + "angle": 0, + "content": "[563] Sania Nayab, Giulio Rossolini, Marco Simoni, Andrea Saracino, Giorgio Buttazzo, Nicola Maria Manes, and Fabrizio Giacomelli. Concise thoughts: Impact of output length on llm reasoning and cost. arXiv preprint arXiv:2407.19825, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.814, + 0.826, + 0.911 + ], + "angle": 0, + "content": "[564] Ansong Ni, Srini Iyer, Dragomir Radev, Veselin Stoyanov, Wen-Tau Yih, Sida Wang, and Xi Victoria Lin. LEVER: Learning to verify language-to-code generation with execution. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett, editors, Proceedings of the 40th International Conference on Machine Learning, volume 202 of Proceedings of Machine Learning Research, pages 26106-26128. PMLR, 23-29 Jul 2023. URL https://proceedings.mlr.press/v202/ni23b.html." + }, + { + "type": "list", + "bbox": [ + 0.183, + 0.091, + 0.826, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "72" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[565] Ziyi Ni, Yifan Li, Ning Yang, Dou Shen, Pin Lv, and Daxiang Dong. Tree-of-code: A tree-structured exploring framework for end-to-end code generation and execution in complex task handling. arXiv preprint arXiv:2412.15305, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.138, + 0.827, + 0.18 + ], + "angle": 0, + "content": "[566] Allen Nie, Yi Su, Bo Chang, Jonathan N Lee, Ed H Chi, Quoc V Le, and Minmin Chen. Evolve: Evaluating and optimizing llms for exploration. arXiv preprint arXiv:2410.06238, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.185, + 0.827, + 0.228 + ], + "angle": 0, + "content": "[567] Yansong Ning, Wei Li, Jun Fang, Naiqiang Tan, and Hao Liu. Not all thoughts are generated equal: Efficient lIm reasoning via multi-turn reinforcement learning. arXiv preprint arXiv:2505.11827, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.232, + 0.827, + 0.276 + ], + "angle": 0, + "content": "[568] Harsha Nori, Naoto Usuyama, Nicholas King, Scott Mayer McKinney, Xavier Fernandes, Sheng Zhang, and Eric Horvitz. From medprompt to o1: Exploration of run-time strategies for medical challenge problems and beyond. arXiv preprint arXiv:2411.03590, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.279, + 0.827, + 0.35 + ], + "angle": 0, + "content": "[569] Maxwell Nye, Anders Johan Andreassen, Guy Gur-Ari, Henryk Michalewski, Jacob Austin, David Bieber, David Dohan, Aitor Lewkowycz, Maarten Bosma, David Luan, Charles Sutton, and Augustus Odena. Show your work: Scratchpads for intermediate computation with language models. In Deep Learning for Code Workshop, March 2022. URL https://openreview.net/forum?id=HB1x2idbkbq." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.354, + 0.827, + 0.382 + ], + "angle": 0, + "content": "[570] Skywork o1 Team. Skywork-o1 open series. https://huggingface.co/Skywork, November 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.386, + 0.827, + 0.415 + ], + "angle": 0, + "content": "[571] OpenCompass. Aime 2025. https://huggingface.co/datasets/opencompass/AIME2025, February 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.42, + 0.827, + 0.463 + ], + "angle": 0, + "content": "[572] Yixin Ou, Yunzhi Yao, Ningyu Zhang, Hui Jin, Jiacheng Sun, Shumin Deng, Zhenguo Li, and Huajun Chen. How do llms acquire new knowledge? a knowledge circuits perspective on continual pre-training. arXiv preprint arXiv:2502.11196, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.467, + 0.827, + 0.497 + ], + "angle": 0, + "content": "[573] Alexander Pan, Kush Bhatia, and Jacob Steinhardt. The effects of reward misspecification: Mapping and mitigating misaligned models. arXiv preprint arXiv:2201.03544, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.5, + 0.829, + 0.598 + ], + "angle": 0, + "content": "[574] Jiabao Pan, Yan Zhang, Chen Zhang, Zuozhu Liu, Hongwei Wang, and Haizhou Li. DynaThink: Fast or slow? a dynamic decision-making framework for large language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 14686-14695, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.814. URL https://aclanthology.org/2024.emnlp-main.814/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.602, + 0.827, + 0.644 + ], + "angle": 0, + "content": "[575] Jianfeng Pan, Senyou Deng, and Shaomang Huang. Coat: Chain-of-associated-thoughts framework for enhancing large language models reasoning. arXiv preprint arXiv:2502.02390, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.649, + 0.827, + 0.678 + ], + "angle": 0, + "content": "[576] Jiayi Pan, Junjie Zhang, Xingyao Wang, Lifan Yuan, Hao Peng, and Alane Suhr. Tinyzero. https://github.com/Jiayi-Pan/TinyZero, 2025. Accessed: 2025-01-24." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.682, + 0.827, + 0.738 + ], + "angle": 0, + "content": "[577] Jiazhen Pan, Che Liu, Junde Wu, Fenglin Liu, Jiayuan Zhu, Hongwei Bran Li, Chen Chen, Cheng Ouyang, and Daniel Rueckert. Medvlm-r1: Incentivizing medical reasoning capability of vision-language models (vlms) via reinforcement learning. arXiv preprint arXiv:2502.19634, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.743, + 0.825, + 0.786 + ], + "angle": 0, + "content": "[578] Liangming Pan, Michael Saxon, Wenda Xu, Deepak Nathani, Xinyi Wang, and William Yang Wang. Automatically correcting large language models: Surveying the landscape of diverse self-correction strategies. arXiv preprint arXiv:2308.03188, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.789, + 0.827, + 0.832 + ], + "angle": 0, + "content": "[579] Wenbo Pan, Zhichao Liu, Qiguang Chen, Xiangyang Zhou, Haining Yu, and Xiaohua Jia. The hidden dimensions of llm alignment: A multi-dimensional safety analysis. arXiv preprint arXiv:2502.09674, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.837, + 0.825, + 0.866 + ], + "angle": 0, + "content": "[580] Zhihong Pan, Kai Zhang, Yuze Zhao, and Yupeng Han. Route to reason: Adaptive routing for lIm and reasoning strategy selection. arXiv preprint arXiv:2505.19435, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.827, + 0.911 + ], + "angle": 0, + "content": "[581] Bo Pang, Hanze Dong, Jiacheng Xu, Silvio Savarese, Yingbo Zhou, and Caiming Xiong. Bolt: Bootstrap long chain-of-thought in language models without distillation. arXiv preprint arXiv:2502.03860, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.829, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "73" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.091, + 0.826, + 0.189 + ], + "angle": 0, + "content": "[582] Richard Yuanzhe Pang, Weizhe Yuan, He He, Kyunghyun Cho, Sainbayar Sukhbaatar, and Jason Weston. Iterative reasoning preference optimization. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 116617-116637. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/d37c9ad425fe5b65304d500c6edcba00-Paper-Conference.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.194, + 0.826, + 0.237 + ], + "angle": 0, + "content": "[583] Shubham Parashar, Blake Olson, Sambhav Khurana, Eric Li, Hongyi Ling, James Caverlee, and Shuiwang Ji. Inference-time computations for llm reasoning and planning: A benchmark and insights. arXiv preprint arXiv:2502.12521, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.242, + 0.827, + 0.284 + ], + "angle": 0, + "content": "[584] Chanwoo Park, Seungju Han, Xingzhi Guo, Asuman Ozdaglar, Kaiqing Zhang, and Joo-Kyung Kim. Maporl: Multi-agent post-co-training for collaborative large language models with reinforcement learning. arXiv preprint arXiv:2502.18439, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.289, + 0.825, + 0.318 + ], + "angle": 0, + "content": "[585] Junsoo Park, Seungyeon Jwa, Meiying Ren, Daeyoung Kim, and Sanghyuk Choi. Offsetbias: Leveraging debiased data for tuning evaluators, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.323, + 0.826, + 0.364 + ], + "angle": 0, + "content": "[586] Sungjin Park, Xiao Liu, Yeyun Gong, and Edward Choi. Ensembling large language models with process reward-guided tree search for better complex reasoning. arXiv preprint arXiv:2412.15797, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.37, + 0.825, + 0.411 + ], + "angle": 0, + "content": "[587] Manojkumar Parmar and Yuvaraj Govindarajulu. Challenges in ensuring ai safety in deepseek-r1 models: The shortcomings of reinforcement learning strategies. arXiv preprint arXiv:2501.17030, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.417, + 0.825, + 0.446 + ], + "angle": 0, + "content": "[588] Avinash Patil. Advancing reasoning in large language models: Promising methods and approaches. arXiv preprint arXiv:2502.03671, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.451, + 0.825, + 0.491 + ], + "angle": 0, + "content": "[589] Avinash Patil and Amardeep Kour Gedhu. Cognitive-mental-llm: Leveraging reasoning in large language models for mental health prediction via online text. arXiv preprint arXiv:2503.10095, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.497, + 0.827, + 0.581 + ], + "angle": 0, + "content": "[590] Debjit Paul, Mete Ismayilzada, Maxime Peyrard, Beatrix Borges, Antoine Bosselut, Robert West, and Boi Faltings. REFINER: Reasoning feedback on intermediate representations. In Yvette Graham and Matthew Purver, editors, Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1100–1126, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.67/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.586, + 0.826, + 0.642 + ], + "angle": 0, + "content": "[591] Patomporn Payoungkhamdee, Pume Tuchinda, Jinheon Baek, Samuel Cahyawijaya, Can Udomcharoenchaikit, Potsawee Manakul, Peerat Limkonchotiwat, Ekapol Chuangsuwanich, and Sarana Nutanong. Towards better understanding of program-of-thought reasoning in cross-lingual and multilingual environments. arXiv preprint arXiv:2502.17956, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.647, + 0.825, + 0.69 + ], + "angle": 0, + "content": "[592] Chunyi Peng, Zhipeng Xu, Zhenghao Liu, Yishan Li, Yukun Yan, Shuo Wang, Zhiyuan Liu, Yu Gu, Minghe Yu, Ge Yu, et al. Learning to route queries across knowledge bases for step-wise retrieval-augmented reasoning. arXiv preprint arXiv:2505.22095, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.695, + 0.826, + 0.737 + ], + "angle": 0, + "content": "[593] Dengyun Peng, Yuhang Zhou, Qiguang Chen, Jinhao Liu, Jingjing Chen, and Libo Qin. Dlpo: Towards a robust, efficient, and generalizable prompt optimization framework from a deep-learning perspective. arXiv preprint arXiv:2503.13413, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.742, + 0.825, + 0.784 + ], + "angle": 0, + "content": "[594] Hao Peng, Yunjia Qi, Xiaozhi Wang, Zijun Yao, Bin Xu, Lei Hou, and Juanzi Li. Agentic reward modeling: Integrating human preferences with verifiable correctness signals for reliable reward systems. arXiv preprint arXiv:2502.19328, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.789, + 0.825, + 0.831 + ], + "angle": 0, + "content": "[595] Keqin Peng, Liang Ding, Yuanxin Ouyang, Meng Fang, and Dacheng Tao. Revisiting overthinking in long chain-of-thought from the perspective of self-doubt. arXiv preprint arXiv:2505.23480, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.837, + 0.825, + 0.865 + ], + "angle": 0, + "content": "[596] Miao Peng, Nuo Chen, Zongrui Suo, and Jia Li. Rewarding graph reasoning process makes llms more generalized reasoners. arXiv preprint arXiv:2503.00845, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.825, + 0.912 + ], + "angle": 0, + "content": "[597] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b lmms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "74" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[598] Ivo Petrov, Jasper Dekoninck, Lyuben Baltadzhiev, Maria Drencheva, Kristian Minchev, Mislav Balunovic, Nikola Jovanovic, and Martin Vechev. Proof or bluff? evaluating llms on 2025 usa math olympiad. arXiv preprint arXiv:2503.21934, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.139, + 0.827, + 0.168 + ], + "angle": 0, + "content": "[599] Rolf Pfister and Hansueli Jud. Understanding and benchmarking artificial intelligence: Openai's o3 is not agi. arXiv preprint arXiv:2501.07458, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.172, + 0.825, + 0.215 + ], + "angle": 0, + "content": "[600] Quang Hieu Pham, Thuy Duong Nguyen, Tung Pham, Anh Tuan Luu, and Dat Quoc Nguyen. Clozemath: Improving mathematical reasoning in language models by learning to fill equations. arXiv preprint arXiv:2506.03763, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.219, + 0.826, + 0.261 + ], + "angle": 0, + "content": "[601] Thinh Pham, Nguyen Nguyen, Pratibha Zunjare, Weiyuan Chen, Yu-Min Tseng, and Tu Vu. Sealqa: Raising the bar for reasoning in search-augmented language models. arXiv preprint arXiv:2506.01062, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.267, + 0.827, + 0.309 + ], + "angle": 0, + "content": "[602] Long Phan, Alice Gatti, Ziwen Han, Nathaniel Li, Josephina Hu, Hugh Zhang, Sean Shi, Michael Choi, Anish Agrawal, Arnav Chopra, et al. Humanity's last exam. arXiv preprint arXiv:2501.14249, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.313, + 0.827, + 0.344 + ], + "angle": 0, + "content": "[603] Aske Plaat, Annie Wong, Suzan Verberne, Joost Broekens, Niki van Stein, and Thomas Back. Reasoning with large language models, a survey. arXiv preprint arXiv:2407.11511, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.348, + 0.827, + 0.391 + ], + "angle": 0, + "content": "[604] Gabriel Poesia, Kanishk Gandhi, Eric Zelikman, and Noah Goodman. Certified deductive reasoning with language models. Transactions on Machine Learning Research, May 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=yXnwrS2T16." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.395, + 0.825, + 0.424 + ], + "angle": 0, + "content": "[605] Stanislas Polu and Ilya Sutskever. Generative language modeling for automated theorem proving. arXiv preprint arXiv:2009.03393, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.429, + 0.827, + 0.513 + ], + "angle": 0, + "content": "[606] Archiki Prasad, Swarnadeep Saha, Xiang Zhou, and Mohit Bansal. ReCEval: Evaluating reasoning chains via correctness and informativeness. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 10066-10086, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.622. URL https://aclanthology.org/2023.emnlp-main.622/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.517, + 0.827, + 0.602 + ], + "angle": 0, + "content": "[607] Archiki Prasad, Alexander Koller, Mareike Hartmann, Peter Clark, Ashish Sabharwal, Mohit Bansal, and Tushar Khot. ADaPT: As-needed decomposition and planning with language models. In Kevin Duh, Helena Gomez, and Steven Bethard, editors, Findings of the Association for Computational Linguistics: NAACL 2024, pages 4226-4252, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-naacl.264. URL https://aclanthology.org/2024-findings-naacl.264/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.606, + 0.825, + 0.636 + ], + "angle": 0, + "content": "[608] Tidor-Vlad Pricope. Hardml: A benchmark for evaluating data science and machine learning knowledge and reasoning in ai. arXiv preprint arXiv:2501.15627, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.639, + 0.827, + 0.724 + ], + "angle": 0, + "content": "[609] Ben Prystawski, Michael Li, and Noah Goodman. Why think step by step? reasoning emerges from the locality of experience. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 70926-70947. Curran Associates, Inc., September 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/e0af79ad53a336b4c4b4f7e2a68eb609-Paper-Conference.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.728, + 0.827, + 0.77 + ], + "angle": 0, + "content": "[610] Israel Puerta-Merino, Carlos Núñez-Molina, Pablo Mesejo, and Juan Fernández-Olivares. A roadmap to guide the integration of llms in hierarchical planning. arXiv preprint arXiv:2501.08068, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.775, + 0.827, + 0.818 + ], + "angle": 0, + "content": "[611] Haritz Puerto, Tilek Chubakov, Xiaodan Zhu, Harish Tayyar Madabushi, and Iryna Gurevych. Fine-tuning with divergent chains of thought boosts reasoning through self-correction in language models. arXiv preprint arXiv:2407.03181, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.822, + 0.827, + 0.865 + ], + "angle": 0, + "content": "[612] Isha Puri, Shivchander Sudalairaj, Guangxuan Xu, Kai Xu, and Akash Srivastava. A probabilistic inference approach to inference-time scaling of llms using particle-based monte carlo methods. arXiv preprint arXiv:2502.01618, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[613] Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv preprint arXiv:2408.07199, 2024." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "75" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.133 + ], + "angle": 0, + "content": "[614] Penghui Qi, Zichen Liu, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Optimizing anytime reasoning via budget relative policy optimization. arXiv preprint arXiv:2505.13438, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.137, + 0.826, + 0.178 + ], + "angle": 0, + "content": "[615] Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual reasoning makes smaller llms stronger problem-solvers. arXiv preprint arXiv:2408.06195, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.183, + 0.824, + 0.211 + ], + "angle": 0, + "content": "[616] Hongjin Qian and Zheng Liu. Scent of knowledge: Optimizing search-enhanced reasoning with information foraging. arXiv preprint arXiv:2505.09316, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.215, + 0.827, + 0.298 + ], + "angle": 0, + "content": "[617] Libo Qin, Qiguang Chen, Fuxuan Wei, Shijue Huang, and Wanxiang Che. Cross-lingual prompting: Improving zero-shot chain-of-thought reasoning across languages. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 2695–2709, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.163. URL https://aclanthology.org/2023.emnlp-main.163/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.301, + 0.827, + 0.342 + ], + "angle": 0, + "content": "[618] Libo Qin, Qiguang Chen, Hao Fei, Zhi Chen, Min Li, and Wanxiang Che. What factors affect multi-modal in-context learning? an in-depth exploration. arXiv preprint arXiv:2410.20482, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.346, + 0.827, + 0.388 + ], + "angle": 0, + "content": "[619] Libo Qin, Qiguang Chen, Xiachong Feng, Yang Wu, Yongheng Zhang, Yinghui Li, Min Li, Wanxiang Che, and Philip S Yu. Large language models meet nlp: A survey. arXiv preprint arXiv:2405.12819, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.392, + 0.826, + 0.435 + ], + "angle": 0, + "content": "[620] Libo Qin, Qiguang Chen, Yuhang Zhou, Zhi Chen, Yinghui Li, Lizi Liao, Min Li, Wanxiang Che, and Philip S Yu. Multilingual large language model: A survey of resources, taxonomy and frontiers. arXiv preprint arXiv:2404.04925, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.438, + 0.827, + 0.493 + ], + "angle": 0, + "content": "[621] Libo Qin, Qiguang Chen, Jingxuan Zhou, Jin Wang, Hao Fei, Wanxiang Che, and Min Li. Divide-solve-combine: An interpretable and accurate prompting framework for zero-shot multi-intent detection. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 39, pages 25038-25046, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.497, + 0.826, + 0.552 + ], + "angle": 0, + "content": "[622] Libo Qin, Qiguang Chen, Yuhang Zhou, Zhi Chen, Yinghui Li, Lizi Liao, Min Li, Wanxiang Che, and S Yu Philip. A survey of multilingual large language models. Patterns, 6(1), January 2025. URL https://www.cell.com/patterns/fulltext/S2666-3899(24)00290-3." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.556, + 0.826, + 0.598 + ], + "angle": 0, + "content": "[623] Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, et al. O1 replication journey: A strategic progress report-part 1. arXiv preprint arXiv:2410.18982, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.602, + 0.827, + 0.643 + ], + "angle": 0, + "content": "[624] Yulei Qin, Gang Li, Zongyi Li, Zihan Xu, Yuchen Shi, Zhekai Lin, Xiao Cui, Ke Li, and Xing Sun. Incentivizing reasoning for advanced instruction-following of large language models. arXiv preprint arXiv:2506.01413, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.647, + 0.826, + 0.69 + ], + "angle": 0, + "content": "[625] Jiahao Qiu, Yifu Lu, Yifan Zeng, Jiacheng Guo, Jiayi Geng, Huazheng Wang, Kaixuan Huang, Yue Wu, and Mengdi Wang. Treebon: Enhancing inference-time alignment with speculative tree-search and best-of-n sampling. arXiv preprint arXiv:2410.16033, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.693, + 0.827, + 0.747 + ], + "angle": 0, + "content": "[626] Xiaoye Qu, Yafu Li, Zhaochen Su, Weigao Sun, Jianhao Yan, Dongrui Liu, Ganqu Cui, Daizong Liu, Shuxian Liang, Junxian He, et al. A survey of efficient reasoning for large reasoning models: Language, multimodality, and beyond. arXiv preprint arXiv:2503.21614, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.752, + 0.827, + 0.808 + ], + "angle": 0, + "content": "[627] Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive introspection: Teaching language model agents how to self-improve. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=DRC9pZwBwR." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.811, + 0.827, + 0.867 + ], + "angle": 0, + "content": "[628] Yuxiao Qu, Matthew Y. R. Yang, Amrith Setlur, Lewis Tunstall, Edward Emanuel Beeching, Ruslan Salakhutdinov, and Aviral Kumar. Optimizing test-time compute via meta reinforcement finetuning. In Workshop on Reasoning and Planning for Large Language Models, March 2025. URL https://openreview.net/forum?id=WGz4ytjolh." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[629] Gollam Rabby, Farhana Keya, Parvez Zamil, and Soren Auer. Mc-nest-enhancing mathematical reasoning in large language models with a monte carlo nash equilibrium self-refine tree. arXiv preprint arXiv:2411.15645, 2024." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "76" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.121 + ], + "angle": 0, + "content": "[630] Santosh Kumar Radha and Oktay Goktas. On the reasoning capacity of ai models and how to quantify it. arXiv preprint arXiv:2501.13833, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.125, + 0.826, + 0.183 + ], + "angle": 0, + "content": "[631] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023. URL https://openreview.net/pdf?id=HPuSIXJaa9." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.187, + 0.826, + 0.272 + ], + "angle": 0, + "content": "[632] Daking Rai and Ziyu Yao. An investigation of neuron activation as a unified lens to explain chain-of-thought eliciting arithmetic reasoning of LLMs. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 7174–7193, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.387. URL https://aclanthology.org/2024.acl-long.387/." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.277, + 0.826, + 0.362 + ], + "angle": 0, + "content": "[633] Leonardo Ranaldi, Giulia Pucci, Federico Ranaldi, Elena Sofia Ruzzetti, and Fabio Massimo Zanzotto. A tree-of-thoughts to broaden multi-step reasoning across languages. In Kevin Duh, Helena Gomez, and Steven Bethard, editors, Findings of the Association for Computational Linguistics: NAACL 2024, pages 1229-1241, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-naacl.78. URL https://aclanthology.org/2024 findings-naacl.78/." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.367, + 0.826, + 0.409 + ], + "angle": 0, + "content": "[634] Leonardo Ranaldi, Marco Valentino, Alexander Polonsky, and André Freitas. Improving chain-of-thought reasoning via quasi-symbolic abstractions. arXiv preprint arXiv:2502.12616, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.415, + 0.826, + 0.457 + ], + "angle": 0, + "content": "[635] Mohammad Raza and Natasha Milic-Frayling. Instantiation-based formalization of logical reasoning tasks using language models and logical solvers. arXiv preprint arXiv:2501.16961, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.463, + 0.826, + 0.493 + ], + "angle": 0, + "content": "[636] Ali Razghandi, Seyed Mohammad Hadi Hosseini, and Mahdieh Soleymani Baghshah. Cer: Confidence enhanced reasoning in llms. arXiv preprint arXiv:2502.14634, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.497, + 0.826, + 0.554 + ], + "angle": 0, + "content": "[637] David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. GPQA: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=Ti67584b98." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.56, + 0.826, + 0.589 + ], + "angle": 0, + "content": "[638] Matthew Renze and Erhan Guven. Self-reflection in llm agents: Effects on problem-solving performance. arXiv preprint arXiv:2405.06682, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.594, + 0.826, + 0.637 + ], + "angle": 0, + "content": "[639] Baptiste Roziere, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaqing Ellen Tan, Yossi Adi, Jingyu Liu, Romain Sauvestre, Tal Remez, et al. Code llama: Open foundation models for code. arXiv preprint arXiv:2308.12950, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.642, + 0.826, + 0.672 + ], + "angle": 0, + "content": "[640] Yangjun Ruan, Neil Band, Chris J Maddison, and Tatsunori Hashimoto. Reasoning to learn from latent thoughts. arXiv preprint arXiv:2503.18866, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.677, + 0.826, + 0.72 + ], + "angle": 0, + "content": "[641] Jon Saad-Falcon, Rajan Vivek, William Berrios, Nandita Shankar Naik, Matija Franklin, Bertie Vidgen, Amanpreet Singh, Douwe Kiela, and Shikib Mehri. Lmunit: Fine-grained evaluation with natural language unit tests. arXiv preprint arXiv:2412.13091, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.725, + 0.826, + 0.755 + ], + "angle": 0, + "content": "[642] Nikta Gohari Sadr, Sangmitra Madhusudan, and Ali Emami. Think or step-by-step? unzipping the black box in zero-shot prompts. arXiv preprint arXiv:2502.03418, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.759, + 0.826, + 0.802 + ], + "angle": 0, + "content": "[643] Swarnadeep Saha, Xian Li, Marjan Ghazvininejad, Jason Weston, and Tianlu Wang. Learning to plan & reason for evaluation with thinking-llm-as-a-judge. arXiv preprint arXiv:2501.18099, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.807, + 0.826, + 0.864 + ], + "angle": 0, + "content": "[644] S Sauhandikaa, R Bhagavath Narethranath, and R Sathya Bama Krishna. Explainable ai in large language models: A review. In 2024 International Conference on Emerging Research in Computational Science (ICERCS), pages 1-6. IEEE, 2024. URL http://ieeexplore.ieee.org/abstract/document/10895578." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.869, + 0.826, + 0.912 + ], + "angle": 0, + "content": "[645] William Saunders, Catherine Yeh, Jeff Wu, Steven Bills, Long Ouyang, Jonathan Ward, and Jan Leike. Self-critiquing models for assisting human evaluators. arXiv preprint arXiv:2206.05802, 2022." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "77" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.133 + ], + "angle": 0, + "content": "[646] Nikunj Saunshi, Nishanth Dikkala, Zhiyuan Li, Sanjiv Kumar, and Sashank J Reddi. Reasoning with latent thoughts: On the power of looped transformers. arXiv preprint arXiv:2502.17416, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.138, + 0.827, + 0.194 + ], + "angle": 0, + "content": "[647] Mark Schöne, Babak Rahmani, Heiner Kremer, Fabian Falck, Hitesh Ballani, and Jannes Gladrow. Implicit language models are RNNs: Balancing parallelization and expressivity. In *Forty-second International Conference on Machine Learning*, May 2025. URL https://openreview.net/forum?id=5EbiopWH6e." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.198, + 0.825, + 0.227 + ], + "angle": 0, + "content": "[648] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.231, + 0.825, + 0.274 + ], + "angle": 0, + "content": "[649] ByteDance Seed, Jiaze Chen, Tiantian Fan, Xin Liu, Lingjun Liu, Zhiqi Lin, Mingxuan Wang, Chengyi Wang, Xiangpeng Wei, Wenyuan Xu, et al. Seed1. 5-thinking: Advancing superb reasoning models with reinforcement learning. arXiv preprint arXiv:2504.13914, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.277, + 0.827, + 0.375 + ], + "angle": 0, + "content": "[650] Amrith Setlur, Saurabh Garg, Xinyang Geng, Naman Garg, Virginia Smith, and Aviral Kumar. Rl on incorrect synthetic data scales the efficiency of lIm math reasoning by eight-fold. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 43000-43031. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/4b77d5b896c321a29277524a98a50215-Paper-Conference.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.379, + 0.827, + 0.449 + ], + "angle": 0, + "content": "[651] Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, and Aviral Kumar. Rewarding progress: Scaling automated process verifiers for LLM reasoning. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=A6Y7Aq1zLW." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.452, + 0.825, + 0.482 + ], + "angle": 0, + "content": "[652] Amrith Setlur, Nived Rajaraman, Sergey Levine, and Aviral Kumar. Scaling test-time compute without verification or r1 is suboptimal. arXiv preprint arXiv:2502.12118, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.485, + 0.825, + 0.528 + ], + "angle": 0, + "content": "[653] Amrith Setlur, Matthew YR Yang, Charlie Snell, Jeremy Greer, Ian Wu, Virginia Smith, Max Simchowitz, and Aviral Kumar. e3: Learning to explore enables extrapolation of test-time compute for llms. arXiv preprint arXiv:2506.09026, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.531, + 0.825, + 0.56 + ], + "angle": 0, + "content": "[654] Yu Shang, Yu Li, Fengli Xu, and Yong Li. Synergy-of-thoughts: Eliciting efficient reasoning in hybrid language models. arXiv preprint arXiv:2402.02563, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.564, + 0.827, + 0.607 + ], + "angle": 0, + "content": "[655] Rulin Shao, Shuyue Stella Li, Rui Xin, Scott Geng, Yiping Wang, Sewoong Oh, Simon Shaolei Du, Nathan Lambert, Sewon Min, Ranjay Krishna, et al. Spurious rewards: Rethinking training signals in rlvr. arXiv preprint arXiv:2506.10947, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.61, + 0.827, + 0.653 + ], + "angle": 0, + "content": "[656] Wenqi Shao, Qiaosheng Zhang, Lingxiao Du, Xiangyan Liu, and Fanqing Meng. R1-multimodal-journey. https://github.com/FanqingM/R1-Multimodal-Journey, February 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.657, + 0.827, + 0.754 + ], + "angle": 0, + "content": "[657] Zhihong Shao, Yeyun Gong, Yelong Shen, Minlie Huang, Nan Duan, and Weizhu Chen. Synthetic prompting: Generating chain-of-thought demonstrations for large language models. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett, editors, Proceedings of the 40th International Conference on Machine Learning, volume 202 of Proceedings of Machine Learning Research, pages 30706-30775. PMLR, 23-29 Jul 2023. URL https://proceedings.mlr.press/v202/shao23a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.758, + 0.825, + 0.801 + ], + "angle": 0, + "content": "[658] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.805, + 0.825, + 0.834 + ], + "angle": 0, + "content": "[659] Shuaijie She, Junxiao Liu, Yifeng Liu, Jiajun Chen, Xin Huang, and Shujian Huang. R-prm: Reasoning-driven process reward modeling. arXiv preprint arXiv:2503.21295, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.837, + 0.825, + 0.88 + ], + "angle": 0, + "content": "[660] Haozhan Shen, Zilun Zhang, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. https://github.com/om-ai-lab/VLM-R1, February 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[661] Maohao Shen, Guangtao Zeng, Zhenting Qi, Zhang-Wei Hong, Zhenfang Chen, Wei Lu, Gregory Wornell, Subhro Das, David Cox, and Chuang Gan. Satori: Reinforcement learning" + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "78" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.092, + 0.826, + 0.12 + ], + "angle": 0, + "content": "with chain-of-action-thought enhances llm reasoning via autoregressive search. arXiv preprint arXiv:2502.02508, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.125, + 0.826, + 0.155 + ], + "angle": 0, + "content": "[662] Xuan Shen, Yizhou Wang, Xiangxi Shi, Yanzhi Wang, Pu Zhao, and Jiuming Gu. Efficient reasoning with hidden thinking. arXiv preprint arXiv:2501.19201, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.16, + 0.827, + 0.203 + ], + "angle": 0, + "content": "[663] Yi Shen, Jian Zhang, Jieyun Huang, Shuming Shi, Wenjing Zhang, Jiangze Yan, Ning Wang, Kai Wang, and Shiguo Lian. Dast: Difficulty-adaptive slow-thinking for large reasoning models. arXiv preprint arXiv:2503.04472, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.208, + 0.825, + 0.251 + ], + "angle": 0, + "content": "[664] Yifan Shen, Yuanzhe Liu, Jingyuan Zhu, Xu Cao, Xiaofeng Zhang, Yixiao He, Wenming Ye, James Matthew Rehg, and Ismini Lourentzou. Fine-grained preference optimization improves spatial reasoning in vlms. arXiv preprint arXiv:2506.21656, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.256, + 0.825, + 0.299 + ], + "angle": 0, + "content": "[665] Leheng Sheng, An Zhang, Zijian Wu, Weixiang Zhao, Changshuo Shen, Yi Zhang, Xiang Wang, and Tat-Seng Chua. On reasoning strength planning in large reasoning models. arXiv preprint arXiv:2506.08390, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.304, + 0.827, + 0.347 + ], + "angle": 0, + "content": "[666] Hengyu Shi, Junhao Su, Huansheng Ning, Xiaoming Wei, and Jialin Gao. Layoutcot: Unleashing the deep reasoning potential of large language models for layout generation. arXiv preprint arXiv:2504.10829, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.353, + 0.827, + 0.395 + ], + "angle": 0, + "content": "[667] Junhao Shi, Zhaoye Fei, Siyin Wang, Qipeng Guo, Jingjing Gong, and Xipeng Qiu. World-aware planning narratives enhance large vision-language model planner. arXiv preprint arXiv:2506.21230, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.401, + 0.827, + 0.443 + ], + "angle": 0, + "content": "[668] Wenhao Shi, Zhiqiang Hu, Yi Bin, Yang Yang, See-Kiong Ng, and Heng Tao Shen. Multimodal mathematical reasoning with diverse solving perspective. arXiv preprint arXiv:2507.02804, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.449, + 0.827, + 0.534 + ], + "angle": 0, + "content": "[669] Noah Shinn, Federico Cassano, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. Reflexion: language agents with verbal reinforcement learning. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 8634-8652. Curran Associates, Inc., December 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/1b44b878bb782e6954cd888628510e90-Paper-Conference.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.539, + 0.825, + 0.581 + ], + "angle": 0, + "content": "[670] Safal Shrestha, Minwu Kim, and Keith Ross. Mathematical reasoning in large language models: Assessing logical and arithmetic errors across wide numerical ranges. arXiv preprint arXiv:2502.08680, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.587, + 0.827, + 0.671 + ], + "angle": 0, + "content": "[671] Kashun Shum, Shizhe Diao, and Tong Zhang. Automatic prompt augmentation and selection with chain-of-thought from labeled data. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 12113-12139, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.811. URL https://aclanthology.org/2023.findings-emnlp.811/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.677, + 0.827, + 0.707 + ], + "angle": 0, + "content": "[672] Chenglei Si, Diyi Yang, and Tatsunori Hashimoto. Can llms generate novel research ideas? a large-scale human study with \\(100+\\) nlp researchers. arXiv preprint arXiv:2409.04109, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.712, + 0.827, + 0.753 + ], + "angle": 0, + "content": "[673] Sam Silver, Jimin Sun, Ivan Zhang, Sara Hooker, and Eddie Kim. Language models can perform single-utterance self-correction of perturbed reasoning. arXiv preprint arXiv:2506.15894, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.759, + 0.825, + 0.816 + ], + "angle": 0, + "content": "[674] Avi Singh, John D Co-Reyes, Rishabh Agarwal, Ankesh Anand, Piyush Patil, Xavier Garcia, Peter J Liu, James Harrison, Jaehoon Lee, Kelvin Xu, et al. Beyond human data: Scaling self-training for problem-solving with language models. Transactions on Machine Learning Research, April 2024. URL https://openreview.net/pdf?id=lnAyUngGFK." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.821, + 0.825, + 0.864 + ], + "angle": 0, + "content": "[675] Oscar Skean, Md Rifat Arefin, Dan Zhao, Niket Patel, Jalal Naghiyev, Yann LeCun, and Ravid Shwartz-Ziv. Layer by layer: Uncovering hidden representations in language models. arXiv preprint arXiv:2502.02013, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.869, + 0.827, + 0.911 + ], + "angle": 0, + "content": "[676] Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024." + }, + { + "type": "list", + "bbox": [ + 0.183, + 0.092, + 0.827, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "79" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[677] Huatong Song, Jinhao Jiang, Yingqian Min, Jie Chen, Zhipeng Chen, Wayne Xin Zhao, Lei Fang, and Ji-Rong Wen. R1-searcher: Incentivizing the search capability in llms via reinforcement learning. arXiv preprint arXiv:2503.05592, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.139, + 0.827, + 0.182 + ], + "angle": 0, + "content": "[678] Jiwon Song, Dongwon Jo, Yulhwa Kim, and Jae-Joon Kim. Reasoning path compression: Compressing generation trajectories for efficient ltm reasoning. arXiv preprint arXiv:2505.13866, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.187, + 0.826, + 0.23 + ], + "angle": 0, + "content": "[679] Mingyang Song, Zhaochen Su, Xiaoye Qu, Jiawei Zhou, and Yu Cheng. Prmbench: A fine-grained and challenging benchmark for process-level reward models. arXiv preprint arXiv:2501.03124, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.234, + 0.826, + 0.279 + ], + "angle": 0, + "content": "[680] Mingyang Song, Mao Zheng, Zheng Li, Wenjie Yang, Xuan Luo, Yue Pan, and Feng Zhang. Fastcurl: Curriculum reinforcement learning with stage-wise context scaling for efficient training r1-like reasoning models. arXiv preprint arXiv:2503.17287, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.282, + 0.826, + 0.327 + ], + "angle": 0, + "content": "[681] Woomin Song, Saket Dingliwal, Sai Muralidhar Jayanthi, Bhavana Ganesh, Jinwoo Shin, Aram Galstyan, and Sravan Babu Bodapati. Accelerated test-time scaling with model-free speculative sampling. arXiv preprint arXiv:2506.04708, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.33, + 0.826, + 0.373 + ], + "angle": 0, + "content": "[682] Xiaoshuai Song, Yanan Wu, Weixun Wang, Jiaheng Liu, Wenbo Su, and Bo Zheng. Progco: Program helps self-correction of large language models. arXiv preprint arXiv:2501.01264, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.378, + 0.826, + 0.435 + ], + "angle": 0, + "content": "[683] Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. arXiv preprint arXiv:2409.12183, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.44, + 0.827, + 0.497 + ], + "angle": 0, + "content": "[684] Zayne Rea Sprague, Xi Ye, Kaj Bostrom, Swarat Chaudhuri, and Greg Durrett. MuSR: Testing the limits of chain-of-thought with multistep soft reasoning. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=jenyYQzuel." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.501, + 0.825, + 0.532 + ], + "angle": 0, + "content": "[685] Gaurav Srivastava, Shuxiang Cao, and Xuan Wang. Towards reasoning ability of small language models. arXiv preprint arXiv:2502.11569, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.535, + 0.825, + 0.566 + ], + "angle": 0, + "content": "[686] Saksham Sahai Srivastava and Vaneet Aggarwal. A technical survey of reinforcement learning techniques for large language models. arXiv preprint arXiv:2507.04136, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.569, + 0.825, + 0.599 + ], + "angle": 0, + "content": "[687] Saksham Sahai Srivastava and Ashutosh Gandhi. Mathdivide: Improved mathematical reasoning by large language models. arXiv preprint arXiv:2405.13004, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.603, + 0.826, + 0.659 + ], + "angle": 0, + "content": "[688] Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. Chain of thoughtlessness? an analysis of cot in planning. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id= kPBEAZU5Nm." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.665, + 0.827, + 0.75 + ], + "angle": 0, + "content": "[689] Nisan Stiennon, Long Ouyang, Jeffrey Wu, Daniel Ziegler, Ryan Lowe, Chelsea Voss, Alec Radford, Dario Amodei, and Paul F Christiano. Learning to summarize with human feedback. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems, volume 33, pages 3008-3021. Curran Associates, Inc., December 2020. URL https://proceedings.neurips.cc/paper_files/paper/2020/file/1f89885d556929e98d3ef9b86448f951-Paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.754, + 0.825, + 0.784 + ], + "angle": 0, + "content": "[690] Josefa Lia Stoisser, Marc Boubnovski Martell, and Julien Fauqueur. Sparks of tabular reasoning via text2sql reinforcement learning. arXiv preprint arXiv:2505.00016, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.788, + 0.827, + 0.831 + ], + "angle": 0, + "content": "[691] DiJia Su, Sainbayar Sukhbaatar, Michael Rabbat, Yuandong Tian, and Qinqing Zheng. Dualformer: Controllable fast and slow thinking by learning with randomized reasoning traces. arXiv preprint arXiv:2410.09918, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.836, + 0.825, + 0.866 + ], + "angle": 0, + "content": "[692] Jinyan Su and Claire Cardie. Thinking fast and right: Balancing accuracy and reasoning length with adaptive rewards. arXiv preprint arXiv:2505.18298, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.869, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[693] Yi Su, Dian Yu, Linfeng Song, Juntao Li, Haitao Mi, Zhaopeng Tu, Min Zhang, and Dong Yu. Expanding rl with verifiable rewards across diverse domains. arXiv preprint arXiv:2503.23829, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "80" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[694] Zhaochen Su, Peng Xia, Hangyu Guo, Zhenhua Liu, Yan Ma, Xiaoye Qu, Jiaqi Liu, Yanshu Li, Kaide Zeng, Zhengyuan Yang, et al. Thinking with images for multimodal reasoning: Foundations, methods, and future frontiers. arXiv preprint arXiv:2506.23918, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.137, + 0.825, + 0.193 + ], + "angle": 0, + "content": "[695] Guangyan Sun, Mingyu Jin, Zhenting Wang, Cheng-Long Wang, Siqi Ma, Qifan Wang, Tong Geng, Ying Nian Wu, Yongfeng Zhang, and Dongfang Liu. Visual agents as fast and slow thinkers. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=ncCuiD3KJQ." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.197, + 0.825, + 0.24 + ], + "angle": 0, + "content": "[696] Jiankai Sun, Chuanyang Zheng, Enze Xie, Zhengying Liu, Ruihang Chu, Jianing Qiu, Jiaqi Xu, Mingyu Ding, Hongyang Li, Mengzhe Geng, et al. A survey of reasoning with foundation models. arXiv preprint arXiv:2312.11562, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.244, + 0.826, + 0.286 + ], + "angle": 0, + "content": "[697] Linzhuang Sun, Hao Liang, Jingxuan Wei, Bihui Yu, Tianpeng Li, Fan Yang, Zenan Zhou, and Wentao Zhang. Mm-verify: Enhancing multimodal reasoning with chain-of-thought verification. arXiv preprint arXiv:2502.13383, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.289, + 0.826, + 0.374 + ], + "angle": 0, + "content": "[698] Qiushi Sun, Zhoumianze Liu, Chang Ma, Zichen Ding, Fangzhi Xu, Zhangyue Yin, Haiteng Zhao, Zhenyu Wu, Kanzhi Cheng, Zhaoyang Liu, Jianing Wang, Qintong Li, Robert Tang, Tianbao Xie, Xiachong Feng, Xiang Li, Ben Kao, Wenhai Wang, Biqing Qi, Lingpeng Kong, and Zhiyong Wu. Scienceboard: Evaluating multimodal autonomous agents in realistic scientific workflows. In ICML 2025 Workshop on Computer Use Agents, June 2025. URL https://openreview.net/forum?id=CTtuHMeU5e." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.377, + 0.825, + 0.434 + ], + "angle": 0, + "content": "[699] Shengyang Sun, Yian Zhang, Alexander Bukharin, David Mosallanezhad, Jiaqi Zeng, Soumye Singhal, Gerald Shen, Adi Renduchintala, Tugrul Konuk, Yi Dong, et al. Reward-aware preference optimization: A unified mathematical framework for model alignment. arXiv preprint arXiv:2502.00203, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.437, + 0.826, + 0.48 + ], + "angle": 0, + "content": "[700] Wei Sun, Qianlong Du, Fuwei Cui, and Jiajun Zhang. An efficient and precise training data construction framework for process-supervised reward model in mathematical reasoning. arXiv preprint arXiv:2503.02382, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.483, + 0.826, + 0.538 + ], + "angle": 0, + "content": "[701] Yifan Sun, Jingyan Shen, Yibin Wang, Tianyu Chen, Zhendong Wang, Mingyuan Zhou, and Huan Zhang. Improving data efficiency for ltm reinforcement fine-tuning through difficulty-targeted online data selection and rollout replay. arXiv preprint arXiv:2506.05316, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.542, + 0.825, + 0.586 + ], + "angle": 0, + "content": "[702] Yuhong Sun, Zhangyue Yin, Xuanjing Huang, Xipeng Qiu, and Hui Zhao. Error classification of large language models on math word problems: A dynamically adaptive framework. arXiv preprint arXiv:2501.15581, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.589, + 0.826, + 0.631 + ], + "angle": 0, + "content": "[703] Zhongxiang Sun, Qipeng Wang, Weijie Yu, Xiaoxue Zang, Kai Zheng, Jun Xu, Xiao Zhang, Song Yang, and Han Li. Rearter: Retrieval-augmented reasoning with trustworthy process rewarding. arXiv preprint arXiv:2501.07861, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.635, + 0.826, + 0.719 + ], + "angle": 0, + "content": "[704] Richard S Sutton, David McAllester, Satinder Singh, and Yishay Mansour. Policy gradient methods for reinforcement learning with function approximation. In S. Solla, T. Leen, and K. Müller, editors, Advances in Neural Information Processing Systems, volume 12. MIT Press, November 1999. URL https://proceedings.neurips.cc/paper_files/paper/1999/file/464d828b85b0bed98e80ade0a5c43b0f-Paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.722, + 0.826, + 0.82 + ], + "angle": 0, + "content": "[705] Mirac Suzgun, Nathan Scales, Nathanael Schärli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc Le, Ed Chi, Denny Zhou, and Jason Wei. Challenging BIG-bench tasks and whether chain-of-thought can solve them. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Findings of the Association for Computational Linguistics: ACL 2023, pages 13003-13051, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.824. URL https://aclanthology.org/2023-findings-acl.824/." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.824, + 0.826, + 0.867 + ], + "angle": 0, + "content": "[706] Jihoon Tack, Jack Lanchantin, Jane Yu, Andrew Cohen, Ilia Kulikov, Janice Lan, Shibo Hao, Yuandong Tian, Jason Weston, and Xian Li. Llm pretraining with continuous concepts. arXiv preprint arXiv:2502.08524, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.87, + 0.826, + 0.913 + ], + "angle": 0, + "content": "[707] Huajie Tan, Yuheng Ji, Xiaoshuai Hao, Minglan Lin, Pengwei Wang, Zhongyuan Wang, and Shanghang Zhang. Reason-rft: Reinforcement fine-tuning for visual reasoning. arXiv preprint arXiv:2503.20752, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "81" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.091, + 0.828, + 0.177 + ], + "angle": 0, + "content": "[708] Juanhe (TJ) Tan. Causal abstraction for chain-of-thought reasoning in arithmetic word problems. In Yonatan Belinkov, Sophie Hao, Jaap Jumelet, Najoung Kim, Arya McCarthy, and Hosein Mohebbi, editors, Proceedings of the 6th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP, pages 155–168, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.blackboxnlp-1.12. URL https://aclanthology.org/2023.blackboxnlp-1.12." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.18, + 0.827, + 0.223 + ], + "angle": 0, + "content": "[709] Sijun Tan, Siyuan Zhuang, Kyle Montgomery, William Y Tang, Alejandro Cuadron, Chenguang Wang, Raluca Ada Popa, and Ion Stoica. Judgebench: A benchmark for evaluating llm-based judges. arXiv preprint arXiv:2410.12784, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.226, + 0.827, + 0.282 + ], + "angle": 0, + "content": "[710] Xiaoyu Tan, Tianchu Yao, Chao Qu, Bin Li, Minghao Yang, Dakuan Lu, Haozhe Wang, Xihe Qiu, Wei Chu, Yinghui Xu, et al. Aurora: Automated training framework of universal process reward models via ensemble prompting and reverse verification. arXiv preprint arXiv:2502.11520, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.286, + 0.825, + 0.33 + ], + "angle": 0, + "content": "[711] Kexian Tang, Junyao Gao, Yanhong Zeng, Haodong Duan, Yanan Sun, Zhening Xing, Wenran Liu, Kaifeng Lyu, and Kai Chen. Lego-puzzles: How good are mllms at multi-step spatial reasoning? arXiv preprint arXiv:2503.19990, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.333, + 0.827, + 0.376 + ], + "angle": 0, + "content": "[712] Yihong Tang, Kehai Chen, Muyun Yang, Zhengyu Niu, Jing Li, Tiejun Zhao, and Min Zhang. Thinking in character: Advancing role-playing agents with role-aware reasoning. arXiv preprint arXiv:2506.01748, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.379, + 0.825, + 0.423 + ], + "angle": 0, + "content": "[713] Zhengyang Tang, Ziniu Li, Zhenyang Xiao, Tian Ding, Ruoyu Sun, Benyou Wang, Dayiheng Liu, Fei Huang, Tianyu Liu, Bowen Yu, et al. Enabling scalable oversight via self-evolving critic. arXiv preprint arXiv:2501.05727, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.426, + 0.825, + 0.469 + ], + "angle": 0, + "content": "[714] Zhengyang Tang, Ziniu Li, Zhenyang Xiao, Tian Ding, Ruoyu Sun, Benyou Wang, Dayiheng Liu, Fei Huang, Tianyu Liu, Bowen Yu, et al. Realcritic: Towards effectiveness-driven evaluation of language model critiques. arXiv preprint arXiv:2501.14492, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.473, + 0.825, + 0.515 + ], + "angle": 0, + "content": "[715] Sree Harsha Tanneru, Dan Ley, Chirag Agarwal, and Himabindu Lakkaraju. On the hardness of faithful chain-of-thought reasoning in large language models. arXiv preprint arXiv:2406.10625, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.519, + 0.827, + 0.561 + ], + "angle": 0, + "content": "[716] Amir Taubenfeld, Tom Sheffer, Eran Ofek, Amir Feder, Ariel Goldstein, Zorik Gekhman, and Gal Yona. Confidence improves self-consistency in llms. arXiv preprint arXiv:2502.06233, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.565, + 0.831, + 0.595 + ], + "angle": 0, + "content": "[717] DolphinR1 Team. Dolphin R1. https://huggingface.co/datasets/cognitivecomputations/dolphin-r1, February 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.599, + 0.827, + 0.628 + ], + "angle": 0, + "content": "[718] Fancy-MLLM Team. R1 Onevision. https://huggingface.co/datasets/Fancy-MLLM/R1-Onevision, February 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.631, + 0.827, + 0.675 + ], + "angle": 0, + "content": "[719] Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.678, + 0.827, + 0.734 + ], + "angle": 0, + "content": "[720] Gemma Team, Morgane Riviere, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, Léonard Hussenot, Thomas Mesnard, Bobak Shahriari, Alexandre Ramé, et al. Gemma 2: Improving open language models at a practical size. arXiv preprint arXiv:2408.00118, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.738, + 0.783, + 0.755 + ], + "angle": 0, + "content": "[721] Huggingface Team. Open r1. https://github.com/huggingface/open-r1, January 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.758, + 0.827, + 0.8 + ], + "angle": 0, + "content": "[722] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.804, + 0.827, + 0.846 + ], + "angle": 0, + "content": "[723] NovaSky Team. Think less, achieve more: Cut reasoning costs by \\(50\\%\\) without sacrificing accuracy. https://novasky-ai.github.io/posts/reduce-overthinking, January 2025. Accessed: 2025-01-23." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.851, + 0.827, + 0.88 + ], + "angle": 0, + "content": "[724] NovaSky Team. Sky-t1: Train your own o1 preview model within $ 450. https://novaskyai.github.io/posts/sky-t1, January 2025. Accessed: 2025-01-09." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.884, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[725] NVIDIA Team. Mistral-nemo-12b-instruct. https://huggingface.co/nvidia/Mistral-NeMo-12B-Instruct, July 2024." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.831, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "82" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.121 + ], + "angle": 0, + "content": "[726] OpenDeepResearch Team. Open deep research. https://github.com/nickscamara/open-deepresearch, February 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.124, + 0.807, + 0.141 + ], + "angle": 0, + "content": "[727] OpenO1 Team. Open o1. https://github.com/Open-Source-O1/Open-O1, February 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.143, + 0.827, + 0.172 + ], + "angle": 0, + "content": "[728] OpenR1 Team. Open r1 math 200k. https://huggingface.co/datasets/open-r1/OpenR1-Math-220k, February 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.175, + 0.74, + 0.192 + ], + "angle": 0, + "content": "[729] OpenThoughts Team. Open Thoughts. https://open-thoughts.ai, January 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.194, + 0.827, + 0.223 + ], + "angle": 0, + "content": "[730] PowerInfer Team. QwQ LongCoT 500k. https://huggingface.co/datasets/PowerInfer/QWQ-LONGCOT-500K, January 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.226, + 0.827, + 0.256 + ], + "angle": 0, + "content": "[731] QwQ Team. Qwq: Reflect deeply on the boundaries of the unknown. https://qwenlm.github.io/blog/qwq-32b-preview/, November 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.259, + 0.702, + 0.275 + ], + "angle": 0, + "content": "[732] X-R1 Team. X-r1. https://github.com/dhcode-cpp/X-R1, February 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.278, + 0.826, + 0.308 + ], + "angle": 0, + "content": "[733] Fengwei Teng, Zhaoyang Yu, Quan Shi, Jiayi Zhang, Chenglin Wu, and Yuyu Luo. Atom of thoughts for markov ltm test-time scaling. arXiv preprint arXiv:2502.12018, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.31, + 0.826, + 0.354 + ], + "angle": 0, + "content": "[734] Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, et al. Llamav-o1: Rethinking step-by-step visual reasoning in llms. arXiv preprint arXiv:2501.06186, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.357, + 0.826, + 0.4 + ], + "angle": 0, + "content": "[735] George Thomas, Alex J Chan, Jikun Kang, Wenqi Wu, Filippos Christianos, Fraser Greenlee, Andy Toulis, and Marvin Purtorab. Webgames: Challenging general-purpose web-browsing ai agents. arXiv preprint arXiv:2502.18356, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.403, + 0.826, + 0.445 + ], + "angle": 0, + "content": "[736] Xiaoyu Tian, Sitong Zhao, Haotian Wang, Shuaiang Chen, Yunjie Ji, Yiping Peng, Han Zhao, and Xiangang Li. Think twice: Enhancing lIm reasoning by scaling multi-round test-time thinking. arXiv preprint arXiv:2503.19855, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.449, + 0.827, + 0.547 + ], + "angle": 0, + "content": "[737] Ye Tian, Baolin Peng, Linfeng Song, Lifeng Jin, Dian Yu, Lei Han, Haitao Mi, and Dong Yu. Toward self-improvement of llms via imagination, searching, and criticizing. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 52723-52748. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/5e5853f35164e434015716a8c2a66543-Paper-Conference.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.551, + 0.827, + 0.635 + ], + "angle": 0, + "content": "[738] Yuxuan Tong, Xiwen Zhang, Rui Wang, Ruidong Wu, and Junxian He. Dart-math: Difficulty-aware rejection tuning for mathematical problem-solving. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 7821-7846. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/0ef1afa0daa888d695dcd5e9513bafa3-Paper-Conference.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.638, + 0.826, + 0.682 + ], + "angle": 0, + "content": "[739] Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. Openmathinstruct-2: Accelerating ai for math with massive open-source instruction data. arXiv preprint arXiv:2410.01560, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.685, + 0.826, + 0.728 + ], + "angle": 0, + "content": "[740] Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. Openmathinstruct-2: Accelerating ai for math with massive open-source instruction data. arXiv preprint arXiv:2410.01560, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.731, + 0.826, + 0.773 + ], + "angle": 0, + "content": "[741] Shubham Toshniwal, Ivan Moshkov, Sean Naresthiran, Daria Gitman, Fei Jia, and Igor Gitman. Openmathinstruct-1: A 1.8 million math instruction tuning dataset. arXiv preprint arXiv: Arxiv-2402.10176, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.777, + 0.826, + 0.82 + ], + "angle": 0, + "content": "[742] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.824, + 0.826, + 0.866 + ], + "angle": 0, + "content": "[743] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.826, + 0.911 + ], + "angle": 0, + "content": "[744] Christoph Treude and Raula Gaikovina Kula. Interacting with ai reasoning models: Harnessing \"thoughts\" for ai-driven software engineering. arXiv preprint arXiv:2503.00483, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "83" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.091, + 0.828, + 0.176 + ], + "angle": 0, + "content": "[745] Luong Trung, Xinbo Zhang, Zhanming Jie, Peng Sun, Xiaoran Jin, and Hang Li. ReFT: Reasoning with reinforced fine-tuning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 7601–7614, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.410. URL https://aclanthology.org/2024.acl-long.410/." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.179, + 0.826, + 0.223 + ], + "angle": 0, + "content": "[746] Songjun Tu, Jiahao Lin, Qichao Zhang, Xiangyu Tian, Linjing Li, Xiangyuan Lan, and Dongbin Zhao. Learning when to think: Shaping adaptive reasoning in r1-style models via multi-stage rl. arXiv preprint arXiv:2505.10832, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.226, + 0.825, + 0.255 + ], + "angle": 0, + "content": "[747] Benjamin Turtel, Danny Franklin, and Philipp Schoenegger. Llms can teach themselves to better predict the future. arXiv preprint arXiv:2502.05253, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.259, + 0.827, + 0.302 + ], + "angle": 0, + "content": "[748] Martin Tutek, Fateme Hashemi Chaleshtori, Ana Marasović, and Yonatan Belinkov. Measuring faithfulness of chains of thought by unlearning reasoning steps. arXiv preprint arXiv:2502.14829, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.305, + 0.826, + 0.35 + ], + "angle": 0, + "content": "[749] Jonathan Uesato, Nate Kushner, Ramana Kumar, Francis Song, Noah Siegel, Lisa Wang, Antonia Creswell, Geoffrey Irving, and Irina Higgins. Solving math word problems with process- and outcome-based feedback. arXiv preprint arXiv:2211.14275, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.352, + 0.826, + 0.396 + ], + "angle": 0, + "content": "[750] Robert Vacareanu, Anurag Pratik, Evangelia Spiliopoulou, Zheng Qi, Giovanni Paolini, Neha Anna John, Jie Ma, Yassine Benajiba, and Miguel Ballesteros. General purpose verification for chain of thought prompting. arXiv preprint arXiv:2405.00204, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.399, + 0.826, + 0.455 + ], + "angle": 0, + "content": "[751] Karthik Valmeekam, Kaya Stechly, and Subbarao Kambhampati. LLMs still can't plan; can LRMs? a preliminary evaluation of openAI's o1 on planbench. In NeurIPS 2024 Workshop on Open-World Agents, October 2024. URL https://openreview.net/forum?id=Gcr1Lx4Koz." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.459, + 0.827, + 0.502 + ], + "angle": 0, + "content": "[752] Jean Vassoyan, Nathanaël Beau, and Roman Plaud. Ignore the kl penalty! boosting exploration on critical tokens to enhance rl fine-tuning. arXiv preprint arXiv:2502.06533, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.506, + 0.828, + 0.605 + ], + "angle": 0, + "content": "[753] Tu Vu, Kalpesh Krishna, Salaheddin Alzubi, Chris Tar, Manaal Faruqui, and Yun-Hsuan Sung. Foundational autorators: Taming large language models for better automatic evaluation. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 17086-17105, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10. 18653/v1/2024.emnlp-main.949. URL https://aclanthology.org/2024.emnlp-main.949/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.608, + 0.828, + 0.651 + ], + "angle": 0, + "content": "[754] Guangya Wan, Yuqi Wu, Jie Chen, and Sheng Li. Cot rerailer: Enhancing the reliability of large language models in complex reasoning tasks through error detection and correction. arXiv preprint arXiv:2408.13940, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.655, + 0.828, + 0.713 + ], + "angle": 0, + "content": "[755] Ziyu Wan, Xidong Feng, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. Alphazero-like tree-search can guide large language model decoding and training. In *Forty-first International Conference on Machine Learning*, May 2024. URL https://openreview.net/forum?id=C4OpREezgj." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.716, + 0.828, + 0.759 + ], + "angle": 0, + "content": "[756] Ziyu Wan, Yunxiang Li, Yan Song, Hanjing Wang, Linyi Yang, Mark Schmidt, Jun Wang, Weinan Zhang, Shuyue Hu, and Ying Wen. Rema: Learning to meta-think for llms with multi-agent reinforcement learning. arXiv preprint arXiv:2503.09501, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.762, + 0.825, + 0.792 + ], + "angle": 0, + "content": "[757] Ante Wang, Linfeng Song, Ye Tian, Baolin Peng, Dian Yu, Haitao Mi, Jinsong Su, and Dong Yu. Litesearch: Efficacious tree search for lIm. arXiv preprint arXiv:2407.00320, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.795, + 0.827, + 0.839 + ], + "angle": 0, + "content": "[758] Ante Wang, Linfeng Song, Ye Tian, Dian Yu, Haitao Mi, Xiangyu Duan, Zhaopeng Tu, Jinsong Su, and Dong Yu. Don't get lost in the trees: Streamlining llm reasoning by overcoming tree search exploration pitfalls. arXiv preprint arXiv:2502.11183, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.842, + 0.828, + 0.913 + ], + "angle": 0, + "content": "[759] Boshi Wang, Sewon Min, Xiang Deng, Jiaming Shen, You Wu, Luke Zettlemoyer, and Huan Sun. Towards understanding chain-of-thought prompting: An empirical study of what matters. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2717–2739, Toronto, Canada, July 2023. Association for Computational" + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.828, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "84" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.091, + 0.826, + 0.121 + ], + "angle": 0, + "content": "Linguistics. doi: 10.18653/v1/2023.acl-long.153. URL https://aclanthology.org/2023.acl-long.153/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.125, + 0.827, + 0.167 + ], + "angle": 0, + "content": "[760] Chao Wang, Luning Zhang, Zheng Wang, and Yang Zhou. Can large language models unveil the mysteries? an exploration of their ability to unlock information in complex scenarios. arXiv preprint arXiv:2502.19973, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.171, + 0.827, + 0.213 + ], + "angle": 0, + "content": "[761] Chaojie Wang, Yanchen Deng, Zhiyi Lyu, Liang Zeng, Jujie He, Shuicheng Yan, and Bo An. Q*: Improving multi-step reasoning for llms with deliberative planning. arXiv preprint arXiv:2406.14283, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.218, + 0.825, + 0.261 + ], + "angle": 0, + "content": "[762] Chenlong Wang, Yuanning Feng, Dongping Chen, Zhaoyang Chu, Ranjay Krishna, and Tianyi Zhou. Wait, we don't need to\" wait!! removing thinking tokens improves reasoning efficiency. arXiv preprint arXiv:2506.08343, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.265, + 0.825, + 0.309 + ], + "angle": 0, + "content": "[763] Clinton J Wang, Dean Lee, Cristina Menghini, Johannes Mols, Jack Doughty, Adam Khoja, Jayson Lynch, Sean Hendryx, Summer Yue, and Dan Hendrycks. Enigmaeval: A benchmark of long multimodal reasoning challenges. arXiv preprint arXiv:2502.08859, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.312, + 0.824, + 0.342 + ], + "angle": 0, + "content": "[764] Danqing Wang, Zhuorui Ye, Fei Fang, and Lei Li. Cooperative strategic planning enhances reasoning capabilities in large language models. arXiv preprint arXiv:2410.20007, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.345, + 0.827, + 0.415 + ], + "angle": 0, + "content": "[765] Evan Z Wang, Federico Cassano, Catherine Wu, Yunfeng Bai, William Song, Vaskar Nath, Ziwen Han, Sean M. Hendryx, Summer Yue, and Hugh Zhang. Planning in natural language improves LLM search for code generation. In The First Workshop on System-2 Reasoning at Scale, NeurIPS'24, October 2024. URL https://openreview.net/forum?id=B2iSfPNj49." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.419, + 0.827, + 0.476 + ], + "angle": 0, + "content": "[766] Guoxin Wang, Minyu Gao, Shuai Yang, Ya Zhang, Lizhi He, Liang Huang, Hanlin Xiao, Yexuan Zhang, Wanyue Li, Lu Chen, et al. Citrus: Leveraging expert cognitive pathways in a medical language model for advanced medical decision support. arXiv preprint arXiv:2502.18274, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.481, + 0.825, + 0.535 + ], + "angle": 0, + "content": "[767] Hanbin Wang, Xiaoxuan Zhou, Zhipeng Xu, Keyuan Cheng, Yuxin Zuo, Kai Tian, Jingwei Song, Junting Lu, Wenhui Hu, and Xueyang Liu. Code-vision: Evaluating multimodal llms logic understanding and code generation capabilities. arXiv preprint arXiv:2502.11829, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.54, + 0.825, + 0.571 + ], + "angle": 0, + "content": "[768] Hanlin Wang, Jian Wang, Chak Tou Leong, and Wenjie Li. Steca: Step-level trajectory calibration for lIm agent learning. arXiv preprint arXiv:2502.14276, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.574, + 0.825, + 0.618 + ], + "angle": 0, + "content": "[769] Hanyin Wang, Zhenbang Wu, Gururaj Kolar, Hariprasad Korsapati, Brian Bartlett, Bryan Hull, and Jimeng Sun. Reinforcement learning for out-of-distribution reasoning in llms: An empirical study on diagnosis-related group coding. arXiv preprint arXiv:2505.21908, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.621, + 0.825, + 0.663 + ], + "angle": 0, + "content": "[770] Hao Wang, Boyi Liu, Yufeng Zhang, and Jie Chen. Seed-cts: Unleashing the power of tree search for superior performance in competitive coding tasks. arXiv preprint arXiv:2412.12544, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.668, + 0.827, + 0.753 + ], + "angle": 0, + "content": "[771] Haoxiang Wang, Wei Xiong, Tengyang Xie, Han Zhao, and Tong Zhang. Interpretable preferences via multi-objective reward modeling and mixture-of-experts. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 10582-10592, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.620. URL https://aclanthology.org/2024/findings-emnlp.620/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.757, + 0.827, + 0.799 + ], + "angle": 0, + "content": "[772] Haoyu Wang, Zeyu Qin, Li Shen, Xueqian Wang, Minhao Cheng, and Dacheng Tao. Leveraging reasoning with guidelines to elicit and utilize knowledge for enhancing safety alignment. arXiv preprint arXiv:2502.04040, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.803, + 0.827, + 0.845 + ], + "angle": 0, + "content": "[773] Huaijie Wang, Shibo Hao, Hanze Dong, Shenao Zhang, Yilin Bao, Ziran Yang, and Yi Wu. Offline reinforcement learning for llm multi-step reasoning. arXiv preprint arXiv:2412.16145, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.85, + 0.825, + 0.879 + ], + "angle": 0, + "content": "[774] Jiaan Wang, Fandong Meng, Yunlong Liang, and Jie Zhou. Drt-o1: Optimized deep reasoning translation via long chain-of-thought. arXiv preprint arXiv:2412.17498, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.883, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[775] Jiaan Wang, Fandong Meng, and Jie Zhou. Extrans: Multilingual deep reasoning translation via exemplar-enhanced reinforcement learning. arXiv preprint arXiv:2505.12996, 2025." + }, + { + "type": "list", + "bbox": [ + 0.183, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "85" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.091, + 0.826, + 0.148 + ], + "angle": 0, + "content": "[776] Jiaqi WANG, Yuhang Zhou, Zhixiong Zhang, Qiguang Chen, Yongqiang Chen, and James Cheng. DivIL: Unveiling and addressing over-invariance for out-of-distribution generalization. Transactions on Machine Learning Research, February 2025. ISSN 2835-8856. URL https://openreview.net/forum?id=2Zan4ATYsh." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.151, + 0.825, + 0.194 + ], + "angle": 0, + "content": "[777] Jun Wang, Meng Fang, Ziyu Wan, Muning Wen, Jiachen Zhu, Anjie Liu, Ziqin Gong, Yan Song, Lei Chen, Lionel M Ni, et al. Openr: An open source framework for advanced reasoning with large language models. arXiv preprint arXiv:2410.09671, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.197, + 0.825, + 0.226 + ], + "angle": 0, + "content": "[778] Junlin Wang, Jue Wang, Ben Athiwaratkun, Ce Zhang, and James Zou. Mixture-of-agents enhances large language model capabilities. arXiv preprint arXiv:2406.04692, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.23, + 0.825, + 0.271 + ], + "angle": 0, + "content": "[779] Junxiong Wang, Wen-Ding Li, Daniele Paliotta, Daniel Ritter, Alexander M Rush, and Tri Dao. M1: Towards scalable test-time compute with mamba reasoning models. arXiv preprint arXiv:2504.10449, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.275, + 0.826, + 0.318 + ], + "angle": 0, + "content": "[780] Junyang Wang, Haiyang Xu, Xi Zhang, Ming Yan, Ji Zhang, Fei Huang, and Jitao Sang. Mobile-agent-v: Learning mobile device operation through video-guided multi-agent collaboration. arXiv preprint arXiv:2502.17110, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.322, + 0.825, + 0.365 + ], + "angle": 0, + "content": "[781] Ke Wang, Houxing Ren, Aojun Zhou, Zimu Lu, Sichun Luo, Weikang Shi, Renrui Zhang, Linqi Song, Mingjie Zhan, and Hongsheng Li. Mathcoder: Seamless code integration in llms for enhanced mathematical reasoning. arXiv preprint arXiv:2310.03731, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.368, + 0.826, + 0.438 + ], + "angle": 0, + "content": "[782] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Houxing Ren, Aojun Zhou, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with MATH-vision dataset. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=QWTCcxMpPA." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.442, + 0.826, + 0.512 + ], + "angle": 0, + "content": "[783] Ke Wang, Houxing Ren, Aojun Zhou, Zimu Lu, Sichun Luo, Weikang Shi, Renrui Zhang, Linqi Song, Mingjie Zhan, and Hongsheng Li. Mathcoder: Seamless code integration in LLMs for enhanced mathematical reasoning. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=z8TW0ttBPp." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.515, + 0.826, + 0.558 + ], + "angle": 0, + "content": "[784] Kevin Wang, Junbo Li, Neel P Bhatt, Yihan Xi, Qiang Liu, Ufuk Topcu, and Zhangyang Wang. On the planning abilities of openai's o1 models: Feasibility, optimality, and generalizability. arXiv preprint arXiv:2409.19924, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.562, + 0.825, + 0.604 + ], + "angle": 0, + "content": "[785] Kun Wang, Guibin Zhang, Zhenhong Zhou, Jiahao Wu, Miao Yu, Shiqian Zhao, Chenlong Yin, Jinhu Fu, Yibo Yan, Hanjun Luo, et al. A comprehensive survey in llm (-agent) full stack safety: Data, training and deployment. arXiv preprint arXiv:2504.15585, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.607, + 0.826, + 0.637 + ], + "angle": 0, + "content": "[786] Liang Wang, Haonan Chen, Nan Yang, Xiaolong Huang, Zhicheng Dou, and Furu Wei. Chain-of-retrieval augmented generation. arXiv preprint arXiv:2501.14342, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.64, + 0.825, + 0.667 + ], + "angle": 0, + "content": "[787] Libo Wang. Dynamic chain-of-thought: Towards adaptive deep reasoning. arXiv preprint arXiv:2502.10428, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.672, + 0.826, + 0.728 + ], + "angle": 0, + "content": "[788] Mengru Wang, Xingyu Chen, Yue Wang, Zhiwei He, Jiahao Xu, Tian Liang, Qizhhi Liu, Yunzhi Yao, Wenxuan Wang, Ruotian Ma, et al. Two experts are all you need for steering thinking: Reinforcing cognitive effort in moe reasoning models without additional training. arXiv preprint arXiv:2505.14681, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.732, + 0.826, + 0.774 + ], + "angle": 0, + "content": "[789] Mingyang Wang, Lukas Lange, Heike Adel, Yunpu Ma, Jannik Strötgen, and Hinrich Schütze. Language mixing in reasoning language models: Patterns, impact, and internal causes. arXiv preprint arXiv:2505.14815, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.778, + 0.826, + 0.82 + ], + "angle": 0, + "content": "[790] Minzheng Wang, Yongbin Li, Haobo Wang, Xinghua Zhang, Nan Xu, Bingli Wu, Fei Huang, Haiyang Yu, and Wenji Mao. Adaptive thinking via mode policy optimization for social language agents. arXiv preprint arXiv:2505.02156, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.824, + 0.825, + 0.853 + ], + "angle": 0, + "content": "[791] Peifeng Wang, Austin Xu, Yilun Zhou, Caiming Xiong, and Shafiq Joty. Direct judgement preference optimization. arXiv preprint arXiv:2409.14664, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.856, + 0.826, + 0.913 + ], + "angle": 0, + "content": "[792] Peiyi Wang, Lei Li, Zhihong Shao, Runxin Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. Math-shepherd: Verify and reinforce LLMs step-by-step without human annotations. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long" + }, + { + "type": "list", + "bbox": [ + 0.183, + 0.091, + 0.826, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "86" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.091, + 0.826, + 0.134 + ], + "angle": 0, + "content": "Papers), pages 9426-9439, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.510. URL https://aclanthology.org/2024.acl-long.510/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.14, + 0.827, + 0.181 + ], + "angle": 0, + "content": "[793] Peng Wang, Xuesi Hu, Jiageng Wu, Yuntao Zou, Qiancheng Zhang, and Dagang Li. What factors affect llms and rllms in financial question answering? arXiv preprint arXiv:2507.08339, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.188, + 0.826, + 0.23 + ], + "angle": 0, + "content": "[794] Peng Wang, Ruihan Tao, Qiguang Chen, Mengkang Hu, and Libo Qin. X-webagentbench: A multilingual interactive web benchmark for evaluating global agentic system. arXiv preprint arXiv:2505.15372, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.236, + 0.826, + 0.279 + ], + "angle": 0, + "content": "[795] Peng-Yuan Wang, Tian-Shuo Liu, Chenyang Wang, Yi-Di Wang, Shu Yan, Cheng-Xing Jia, Xu-Hui Liu, Xin-Wei Chen, Jia-Cheng Xu, Ziniu Li, et al. A survey on large language models for mathematical reasoning. arXiv preprint arXiv:2506.08446, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.285, + 0.826, + 0.327 + ], + "angle": 0, + "content": "[796] Ru Wang, Wei Huang, Selena Song, Haoyu Zhang, Yusuke Iwasawa, Yutaka Matsuo, and Jiaxian Guo. Beyond in-distribution success: Scaling curves of cot granularity for language model generalization. arXiv preprint arXiv:2502.18273, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.333, + 0.826, + 0.375 + ], + "angle": 0, + "content": "[797] Ruida Wang, Rui Pan, Yuxin Li, Jipeng Zhang, Yizhen Jia, Shizhe Diao, Renjie Pi, Junjie Hu, and Tong Zhang. Ma-lot: Model-collaboration lean-based long chain-of-thought reasoning enhances formal theorem proving. arXiv preprint arXiv:2503.03205, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.381, + 0.827, + 0.466 + ], + "angle": 0, + "content": "[798] Ruoyao Wang, Peter Jansen, Marc-Alexandre Côté, and Prithviraj Ammanabrolu. Science-World: Is your agent smarter than a 5th grader? In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang, editors, Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 11279–11298, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.775. URL https://aclanthology.org/2022.emnlp-main.775/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.471, + 0.826, + 0.5 + ], + "angle": 0, + "content": "[799] Siyuan Wang, Enda Zhao, Zhongyu Wei, and Xiang Ren. Stepwise informativeness search for improving llm reasoning. arXiv preprint arXiv:2502.15335, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.505, + 0.826, + 0.547 + ], + "angle": 0, + "content": "[800] Song Wang, Gongfan Fang, Lingdong Kong, Xiangtai Li, Jianyun Xu, Sheng Yang, Qiang Li, Jianke Zhu, and Xinchao Wang. Pixelthink: Towards efficient chain-of-pixel reasoning. arXiv preprint arXiv:2505.23727, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.553, + 0.826, + 0.582 + ], + "angle": 0, + "content": "[801] Tianlong Wang, Junzhe Chen, Xueting Han, and Jing Bai. Cpl: Critical plan step learning boosts llm generalization in reasoning tasks. arXiv preprint arXiv:2409.08642, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.588, + 0.827, + 0.642 + ], + "angle": 0, + "content": "[802] Tianlu Wang, Ping Yu, Xiaoqing Ellen Tan, Sean O'Brien, Ramakanth Pasunuru, Jane Dwivedi-Yu, Olga Golovneva, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. Shepherd: A critic for language model generation. arXiv preprint arXiv:2308.04592, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.65, + 0.827, + 0.692 + ], + "angle": 0, + "content": "[803] Tianlu Wang, Ilia Kulikov, Olga Golovneva, Ping Yu, Weizhe Yuan, Jane Dwivedi-Yu, Richard Yuanzhe Pang, Maryam Fazel-Zarandi, Jason Weston, and Xian Li. Self-taught evaluators. arXiv preprint arXiv:2408.02666, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.698, + 0.827, + 0.739 + ], + "angle": 0, + "content": "[804] Weixuan Wang, Minghao Wu, Barry Haddow, and Alexandra Birch. Demystifying multilingual chain-of-thought in process reward modeling. arXiv preprint arXiv:2502.12663, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.746, + 0.827, + 0.801 + ], + "angle": 0, + "content": "[805] Weixun Wang, Shaopan Xiong, Gengru Chen, Wei Gao, Sheng Guo, Yancheng He, Ju Huang, Jiaheng Liu, Zhendong Li, Xiaoyang Li, et al. Reinforcement learning optimization for large-scale learning: An efficient and user-friendly scaling library. arXiv preprint arXiv:2506.06122, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.808, + 0.827, + 0.863 + ], + "angle": 0, + "content": "[806] Weiyun Wang, Zhe Chen, Wenhai Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Jinguo Zhu, Xizhou Zhu, Lewei Lu, Yu Qiao, et al. Enhancing the reasoning ability of multimodal large language models via mixed preference optimization. arXiv preprint arXiv:2411.10442, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[807] Weiyun Wang, Zhangwei Gao, Lianjie Chen, Zhe Chen, Jinguo Zhu, Xiangyu Zhao, Yangzhou Liu, Yue Cao, Shenglong Ye, Xizhou Zhu, et al. Visualprm: An effective process reward model for multimodal reasoning. arXiv preprint arXiv:2503.10291, 2025." + }, + { + "type": "list", + "bbox": [ + 0.183, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "87" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.133 + ], + "angle": 0, + "content": "[808] Xiaoqiang Wang, Suyuchen Wang, Yun Zhu, and Bang Liu. System-1.5 reasoning: Traversal in language and latent spaces with dynamic shortcuts. arXiv preprint arXiv:2505.18962, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.138, + 0.825, + 0.167 + ], + "angle": 0, + "content": "[809] Xiaoxuan Wang, Yihe Deng, Mingyu Derek Ma, and Wei Wang. Entropy-based adaptive weighting for self-training. arXiv preprint arXiv:2503.23913, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.171, + 0.825, + 0.212 + ], + "angle": 0, + "content": "[810] Xinyi Wang, Lucas Caccia, Oleksiy Ostapenko, Xingdi Yuan, William Yang Wang, and Alessandro Sordoni. Guiding language model reasoning with planning tokens. arXiv preprint arXiv:2310.05707, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.217, + 0.827, + 0.315 + ], + "angle": 0, + "content": "[811] Xinyi Wang, Alfonso Amayuelas, Kexun Zhang, Liangming Pan, Wenhu Chen, and William Yang Wang. Understanding reasoning ability of language models from the perspective of reasoning paths aggregation. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 50026-50042. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/wang24a.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.319, + 0.825, + 0.361 + ], + "angle": 0, + "content": "[812] Xinyi Wang, Shawn Tan, Mingyu Jin, William Yang Wang, Rameswar Panda, and Yikang Shen. Do larger language models imply better reasoning? a pretraining scaling law for reasoning. arXiv preprint arXiv:2504.03635, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.365, + 0.825, + 0.42 + ], + "angle": 0, + "content": "[813] Xiyao Wang, Jiuhai Chen, Zhaoyang Wang, Yuhang Zhou, Yiyang Zhou, Huaxiu Yao, Tianyi Zhou, Tom Goldstein, Parminder Bhatia, Furong Huang, et al. Enhancing visual-language modality alignment in large vision language models via self-improvement. arXiv preprint arXiv:2405.15973, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.425, + 0.825, + 0.468 + ], + "angle": 0, + "content": "[814] Xiyao Wang, Linfeng Song, Ye Tian, Dian Yu, Baolin Peng, Haitao Mi, Furong Huang, and Dong Yu. Towards self-improvement of llms via mcts: Leveraging stepwise knowledge with curriculum preference learning. arXiv preprint arXiv:2410.06508, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.472, + 0.825, + 0.514 + ], + "angle": 0, + "content": "[815] Xuezhi Wang and Denny Zhou. Chain-of-thought reasoning without prompting. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=4Zt7S0B0Jp." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.518, + 0.827, + 0.574 + ], + "angle": 0, + "content": "[816] Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, February 2023. URL https://openreview.net/forum?id=1PL1NIMMrw." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.578, + 0.827, + 0.607 + ], + "angle": 0, + "content": "[817] Yao Wang, Mingxuan Cui, and Arthur Jiang. Enabling ai scientists to recognize innovation: A domain-agnostic algorithm for assessing novelty. arXiv preprint arXiv:2503.01508, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.611, + 0.827, + 0.667 + ], + "angle": 0, + "content": "[818] Yifei Wang, Yuyang Wu, Zeming Wei, Stefanie Jegelka, and Yisen Wang. A theoretical understanding of self-correction through in-context alignment. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=OtvNLTWYww." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.671, + 0.827, + 0.713 + ], + "angle": 0, + "content": "[819] Yiqun Wang, Sile Hu, Yonggang Zhang, Xiang Tian, Xuesong Liu, Yaowu Chen, Xu Shen, and Jieping Ye. How large language models implement chain-of-thought? September 2023. URL https://openreview.net/pdf?id=b2XfOm3RJa." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.717, + 0.825, + 0.746 + ], + "angle": 0, + "content": "[820] Yu Wang, Nan Yang, Liang Wang, and Furu Wei. Examining false positives under inference scaling for mathematical reasoning. arXiv preprint arXiv:2502.06217, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.75, + 0.827, + 0.833 + ], + "angle": 0, + "content": "[821] Yubo Wang, Xueguang Ma, Ge Zhang, Yuansheng Ni, Abhranil Chandra, Shiguang Guo, Weiming Ren, Aaran Arulraj, Xuan He, Ziyan Jiang, Tianle Li, Max Ku, Kai Wang, Alex Zhuang, Rongqi Fan, Xiang Yue, and Wenhu Chen. MMLU-pro: A more robust and challenging multi-task language understanding benchmark. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=y10DM6R2r3." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.838, + 0.825, + 0.866 + ], + "angle": 0, + "content": "[822] Yubo Wang, Xiang Yue, and Wenhu Chen. Critique fine-tuning: Learning to critique is more effective than learning to imitate. arXiv preprint arXiv:2501.17703, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.825, + 0.912 + ], + "angle": 0, + "content": "[823] Yue Wang, Qiuzhi Liu, Jiahao Xu, Tian Liang, Xingyu Chen, Zhiwei He, Linfeng Song, Dian Yu, Juntao Li, Zhuosheng Zhang, et al. Thoughts are all over the place: On the underthinking of o1-like llms. arXiv preprint arXiv:2501.18585, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "88" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.133 + ], + "angle": 0, + "content": "[824] Yuhang Wang, Youhe Jiang, Bin Cui, and Fangcheng Fu. Thinking short and right over thinking long: Serving lmm reasoning efficiently and accurately. arXiv preprint arXiv:2505.13326, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.137, + 0.827, + 0.166 + ], + "angle": 0, + "content": "[825] Zengzhi Wang, Fan Zhou, Xuefeng Li, and Pengfei Liu. Octothinker: Mid-training incentivizes reinforcement learning scaling. arXiv preprint arXiv:2506.20512, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.169, + 0.827, + 0.225 + ], + "angle": 0, + "content": "[826] Zhaoyang Wang, Weilei He, Zhiyuan Liang, Xuchao Zhang, Chetan Bansal, Ying Wei, Weitong Zhang, and Huaxiu Yao. Cream: Consistency regularized self-rewarding language models. In Neurips Safe Generative AI Workshop 2024, October 2024. URL https://openreview.net/forum?id=oaWajnM93y." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.228, + 0.827, + 0.271 + ], + "angle": 0, + "content": "[827] Zhengren Wang, Jiayang Yu, Dongsheng Ma, Zhe Chen, Yu Wang, Zhiyu Li, Feiyu Xiong, Yanfeng Wang, Linpeng Tang, Wentao Zhang, et al. Rare: Retrieval-augmented reasoning modeling. arXiv preprint arXiv:2503.23513, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.273, + 0.827, + 0.316 + ], + "angle": 0, + "content": "[828] Zhenhailong Wang, Haiyang Xu, Junyang Wang, Xi Zhang, Ming Yan, Ji Zhang, Fei Huang, and Heng Ji. Mobile-agent-e: Self-evolving mobile assistant for complex tasks. arXiv preprint arXiv:2501.11733, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.319, + 0.827, + 0.389 + ], + "angle": 0, + "content": "[829] Zhilin Wang, Yi Dong, Olivier Delalleau, Jiaqi Zeng, Gerald Shen, Daniel Egert, Jimmy J. Zhang, Makes Narsimhan Sreedhar, and Oleksii Kuchaiev. Helpsteer 2: Open-source dataset for training top-performing reward models. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=PvVKUFhaNy." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.392, + 0.827, + 0.448 + ], + "angle": 0, + "content": "[830] Zhongsheng Wang, Jiamou Liu, Qiming Bao, Hongfei Rong, and Jingfeng Zhang. Chatlogic: Integrating logic programming with large language models for multi-step reasoning. In Neuro-Symbolic Learning and Reasoning in the era of Large Language Models, December 2023. URL https://openreview.net/forum?id=AOqGF7Po7Z." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.451, + 0.827, + 0.493 + ], + "angle": 0, + "content": "[831] Zihan Wang, Yunxuan Li, Yuexin Wu, Liangchen Luo, Le Hou, Hongkun Yu, and Jingbo Shang. Multi-step problem solving through a verifier: An empirical analysis on model-induced process supervision. arXiv preprint arXiv:2402.02658, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.497, + 0.827, + 0.539 + ], + "angle": 0, + "content": "[832] Zixiao Wang, Yuxin Wang, Xiaorui Wang, Mengting Xing, Jie Gao, Jianjun Xu, Guangcan Liu, Chenhui Jin, Zhuo Wang, Shengzhuo Zhang, et al. Test-time scaling with reflective generative model. arXiv preprint arXiv:2507.01951, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.542, + 0.827, + 0.597 + ], + "angle": 0, + "content": "[833] Anjiang Wei, Jiannan Cao, Ran Li, Hongyu Chen, Yuhui Zhang, Ziheng Wang, Yaofeng Sun, Yuan Liu, Thiago SFX Teixeira, Diyi Yang, et al. Equibench: Benchmarking code reasoning capabilities of large language models via equivalence checking. arXiv preprint arXiv:2502.12466, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.602, + 0.827, + 0.63 + ], + "angle": 0, + "content": "[834] Hao Wei. Medthoughts-8k: A medical question answering dataset, feb 2025. URL https://huggingface.co/datasets/hw-hwei/MedThoughts-8K." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.633, + 0.827, + 0.675 + ], + "angle": 0, + "content": "[835] Haoran Wei, Youyang Yin, Yumeng Li, Jia Wang, Liang Zhao, Jianjian Sun, Zheng Ge, and Xiangyu Zhang. Slow perception: Let's perceive geometric figures step-by-step. arXiv preprint arXiv:2412.20631, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.679, + 0.827, + 0.775 + ], + "angle": 0, + "content": "[836] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed Chi, Quoc V Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh, editors, Advances in Neural Information Processing Systems, volume 35, pages 24824-24837. Curran Associates, Inc., November 2022. URL https://proceedings.neurips.cc/paper_files/paper/2022/file/9d5609613524ecf4f15af0f7b31abca4-Paper-Conference.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.779, + 0.827, + 0.821 + ], + "angle": 0, + "content": "[837] Shuyue Wei, Yongxin Tong, Zimu Zhou, Yi Xu, Jingkai Gao, Tongyu Wei, Tianran He, and Weifeng Lv. Federated reasoning llms: a survey. Frontiers of Computer Science, 19(12): 1-23, jun 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.824, + 0.827, + 0.866 + ], + "angle": 0, + "content": "[838] Ting-Ruen Wei, Haowei Liu, Xuyang Wu, and Yi Fang. A survey on feedback-based multi-step reasoning for large language models on mathematics. arXiv preprint arXiv:2502.14333, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[839] Yana Wei, Liang Zhao, Jianjian Sun, Kangheng Lin, Jisheng Yin, Jingcheng Hu, Yinmin Zhang, En Yu, Haoran Lv, Zejia Weng, et al. Open vision reasoner: Transferring linguistic cognitive behavior for visual reasoning. arXiv preprint arXiv:2507.05255, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "89" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.091, + 0.826, + 0.133 + ], + "angle": 0, + "content": "[840] Yongxian Wei, Anke Tang, Li Shen, Zixuan Hu, Chun Yuan, and Xiaochun Cao. Modeling multi-task model merging as adaptive projective gradient descent. arXiv preprint arXiv:2501.01230, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.138, + 0.826, + 0.193 + ], + "angle": 0, + "content": "[841] Yuxiang Wei, Olivier Duchenne, Jade Copet, Quentin Carbonneaux, Lingming Zhang, Daniel Fried, Gabriel Synnaeve, Rishabh Singh, and Sida I. Wang. Swe-rl: Advancing llm reasoning via reinforcement learning on open software evolution. arXiv preprint arXiv:2502.18449, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.198, + 0.826, + 0.254 + ], + "angle": 0, + "content": "[842] Nathaniel Weir, Muhammad Khalifa, Linlu Qiu, Orion Weller, and Peter Clark. Learning to reason via program generation, emulation, and search. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=te6VagJf6G." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.258, + 0.826, + 0.327 + ], + "angle": 0, + "content": "[843] Sean Welleck, Amanda Bertsch, Matthew Finlayson, Hailey Schoelkopf, Alex Xie, Graham Neubig, Ilia Kulikov, and Zaid Harchaoui. From decoding to meta-generation: Inference-time algorithms for large language models. Transactions on Machine Learning Research, November 2024. ISSN 2835-8856. URL https://openreview.net/forum?id= eskQMcIbMS. Survey Certification." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.331, + 0.826, + 0.373 + ], + "angle": 0, + "content": "[844] Cheng Wen, Tingwei Guo, Shuaijiang Zhao, Wei Zou, and Xiangang Li. Sari: Structured audio reasoning via curriculum-guided reinforcement learning. arXiv preprint arXiv:2504.15900, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.377, + 0.826, + 0.434 + ], + "angle": 0, + "content": "[845] Jiaxin Wen, Jian Guan, Hongning Wang, Wei Wu, and Minlie Huang. Codeplan: Unlocking reasoning potential in large language models by scaling code-form planning. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=dCPF1wlqj8." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.437, + 0.826, + 0.493 + ], + "angle": 0, + "content": "[846] Kaiyue Wen, Huaqing Zhang, Hongzhou Lin, and Jingzhao Zhang. From sparse dependence to sparse attention: Unveiling how chain-of-thought enhances transformer sample efficiency. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=AmEgWDhmTr." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.497, + 0.826, + 0.552 + ], + "angle": 0, + "content": "[847] Xumeng Wen, Zihan Liu, Shun Zheng, Zhijian Xu, Shengyu Ye, Zhirong Wu, Xiao Liang, Yang Wang, Junjie Li, Ziming Miao, et al. Reinforcement learning with verifiable rewards implicitly incentivizes correct reasoning in base llms. arXiv preprint arXiv:2506.14245, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.557, + 0.826, + 0.641 + ], + "angle": 0, + "content": "[848] Yixuan Weng, Minjun Zhu, Fei Xia, Bin Li, Shizhu He, Shengping Liu, Bin Sun, Kang Liu, and Jun Zhao. Large language models are better reasoners with self-verification. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 2550–2575, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.167. URL https://aclanthology.org/2023-findings-emnlp.167/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.645, + 0.826, + 0.673 + ], + "angle": 0, + "content": "[849] Jason Weston and Sainbayar Sukhbaatar. System 2 attention (is something you might need too). arXiv preprint arXiv:2311.11829, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.677, + 0.826, + 0.761 + ], + "angle": 0, + "content": "[850] Colin White, Samuel Dooley, Manley Roberts, Arka Pal, Benjamin Feuer, Siddhartha Jain, Ravid Shwartz-Ziv, Neel Jain, Khalid Saifullah, Sreemanti Dey, Shubh-Agrawal, Sandeep Singh Sandha, Siddartha Venkat Naidu, Chinmay Hegde, Yann LeCun, Tom Goldstein, Willie Neiswanger, and Micah Goldblum. Livebench: A challenging, contamination-limited LLM benchmark. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=sKYHBTAxVa." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.764, + 0.826, + 0.806 + ], + "angle": 0, + "content": "[851] Yotam Wolf, Binyamin Rothberg, Dorin Shteyman, and Amnon Shashua. Compositional hardness of code in large language models—a probabilistic perspective. arXiv preprint arXiv:2409.18028, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.81, + 0.826, + 0.865 + ], + "angle": 0, + "content": "[852] Chengyue Wu, Yixiao Ge, Qiushan Guo, Jiahao Wang, Zhixuan Liang, Zeyu Lu, Ying Shan, and Ping Luo. Plot2code: A comprehensive benchmark for evaluating multi-modal large language models in code generation from scientific plots. arXiv preprint arXiv:2405.07990, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.826, + 0.913 + ], + "angle": 0, + "content": "[853] Jinyang Wu, Mingkuan Feng, Shuai Zhang, Feihu Che, Zengqi Wen, and Jianhua Tao. Beyond examples: High-level automated reasoning paradigm in in-context learning via mcts. arXiv preprint arXiv:2411.18478, 2024." + }, + { + "type": "list", + "bbox": [ + 0.183, + 0.091, + 0.826, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "90" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.091, + 0.826, + 0.133 + ], + "angle": 0, + "content": "[854] Jinyang Wu, Mingkuan Feng, Shuai Zhang, Ruihan Jin, Feihu Che, Zengqi Wen, and Jianhua Tao. Boosting multimodal reasoning with mcts-automated structured thinking. arXiv preprint arXiv:2502.02339, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.138, + 0.827, + 0.18 + ], + "angle": 0, + "content": "[855] Jinyang Wu, Chonghua Liao, Mingkuan Feng, Shuai Zhang, Zhengqi Wen, Pengpeng Shao, Huazhe Xu, and Jianhua Tao. Thought-augmented policy optimization: Bridging external guidance and internal capabilities. arXiv preprint arXiv:2505.15692, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.183, + 0.825, + 0.211 + ], + "angle": 0, + "content": "[856] Junde Wu, Jiayuan Zhu, and Yuyuan Liu. Agentic reasoning: Reasoning llms with tools for the deep research. arXiv preprint arXiv:2502.04644, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.215, + 0.825, + 0.257 + ], + "angle": 0, + "content": "[857] Qiong Wu, Xiangcong Yang, Yiyi Zhou, Chenxin Fang, Baiyang Song, Xiaoshuai Sun, and Rongrong Ji. Grounded chain-of-thought for multimodal large language models. arXiv preprint arXiv:2503.12799, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.261, + 0.825, + 0.302 + ], + "angle": 0, + "content": "[858] Siwei Wu, Zhongyuan Peng, Xinrun Du, Tuney Zheng, Minghao Liu, Jialong Wu, Jiachen Ma, Yizhi Li, Jian Yang, Wangchunshu Zhou, et al. A comparative study on reasoning patterns of openai's o1 model. arXiv preprint arXiv:2410.13639, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.307, + 0.827, + 0.334 + ], + "angle": 0, + "content": "[859] Siye Wu, Jian Xie, Yikai Zhang, Aili Chen, Kai Zhang, Yu Su, and Yanghua Xiao. Arm: Adaptive reasoning model. arXiv preprint arXiv:2505.20258, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.338, + 0.827, + 0.379 + ], + "angle": 0, + "content": "[860] Tianhao Wu, Janice Lan, Weizhe Yuan, Jiantao Jiao, Jason Weston, and Sainbayar Sukhbaatar. Thinking llms: General instruction following with thought generation. arXiv preprint arXiv:2410.10630, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.383, + 0.825, + 0.424 + ], + "angle": 0, + "content": "[861] Wenjie Wu, Yongcheng Jing, Yingjie Wang, Wenbin Hu, and Dacheng Tao. Graph-augmented reasoning: Evolving step-by-step knowledge graph retrieval for llm reasoning. arXiv preprint arXiv:2503.01642, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.429, + 0.825, + 0.457 + ], + "angle": 0, + "content": "[862] Xiaobao Wu. Sailing by the stars: A survey on reward models and learning strategies for learning from rewards. arXiv preprint arXiv:2505.02686, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.461, + 0.827, + 0.515 + ], + "angle": 0, + "content": "[863] Xiong Jun Wu, Zhenduo Zhang, ZuJie Wen, Zhiqiang Zhang, Wang Ren, Lei Shi, Cai Chen, Deng Zhao, Qing Wang, Xudong Han, et al. Sharp: Synthesizing high-quality aligned reasoning problems for large reasoning models reinforcement learning. arXiv preprint arXiv:2505.14147, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.52, + 0.825, + 0.562 + ], + "angle": 0, + "content": "[864] Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models. arXiv preprint arXiv:2408.00724, January 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.566, + 0.827, + 0.607 + ], + "angle": 0, + "content": "[865] Yifan Wu, Jingze Shi, Bingheng Wu, Jiayi Zhang, Xiaotian Lin, Nan Tang, and Yuyu Luo. Concise reasoning, big gains: Pruning long reasoning trace with difficulty-aware prompting. arXiv preprint arXiv:2505.19716, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.611, + 0.827, + 0.652 + ], + "angle": 0, + "content": "[866] Yong Wu, Weihang Pan, Ke Li, Chen Binhui, Ping Li, and Binbin Lin. Beyond templates: Dynamic adaptation of reasoning demonstrations via feasibility-aware exploration. arXiv preprint arXiv:2505.20700, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.656, + 0.827, + 0.685 + ], + "angle": 0, + "content": "[867] Yuyang Wu, Yifei Wang, Tianqi Du, Stefanie Jegelka, and Yisen Wang. When more is less: Understanding chain-of-thought length in IIms. arXiv preprint arXiv:2502.07266, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.688, + 0.827, + 0.729 + ], + "angle": 0, + "content": "[868] Zhenyu Wu, Qingkai Zeng, Zhihan Zhang, Zhaoxuan Tan, Chao Shen, and Meng Jiang. Enhancing mathematical reasoning in llms by stepwise correction. arXiv preprint arXiv:2410.12934, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.734, + 0.825, + 0.775 + ], + "angle": 0, + "content": "[869] Zhenyu Wu, Qingkai Zeng, Zhihan Zhang, Zhaoxuan Tan, Chao Shen, and Meng Jiang. Large language models can self-correct with minimal effort. In AI for Math Workshop @ ICML 2024, May 2024. URL https://openreview.net/forum?id=mmZLMs413d." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.779, + 0.827, + 0.821 + ], + "angle": 0, + "content": "[870] Zirui Wu, Xiao Liu, Jiayi Li, Lingpeng Kong, and Yansong Feng. Haste makes waste: Evaluating planning abilities of llms for efficient and feasible multitasking with time constraints between actions. arXiv preprint arXiv:2503.02238, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.825, + 0.827, + 0.866 + ], + "angle": 0, + "content": "[871] Zongqian Wu, Tianyu Li, Jiaying Yang, Mengmeng Zhan, Xiaofeng Zhu, and Lei Feng. Is depth all you need? an exploration of iterative reasoning in llms. arXiv preprint arXiv:2502.10858, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.827, + 0.912 + ], + "angle": 0, + "content": "[872] Zhiheng Xi, Wenxiang Chen, Boyang Hong, Senjie Jin, Rui Zheng, Wei He, Yiwen Ding, Shichun Liu, Xin Guo, Junzhe Wang, et al. Training large language models for reasoning through reverse curriculum reinforcement learning. arXiv preprint arXiv:2402.05808, 2024." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "91" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[873] Zhiheng Xi, Dingwen Yang, Jixuan Huang, Jiafu Tang, Guanyu Li, Yiwen Ding, Wei He, Boyang Hong, Shihan Do, Wenyu Zhan, et al. Enhancing llm reasoning via critique models with test-time and training-time supervision. arXiv preprint arXiv:2411.16579, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.139, + 0.826, + 0.182 + ], + "angle": 0, + "content": "[874] Zhiheng Xi, Guanyu Li, Yutao Fan, Honglin Guo, Yufang Liu, Xiaoran Fan, Jiaqi Liu, Jingchao Ding, Wangmeng Zuo, Zhenfei Yin, et al. Bmmr: A large-scale bilingual multimodal multi-discipline reasoning dataset. arXiv preprint arXiv:2507.03483, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.187, + 0.827, + 0.228 + ], + "angle": 0, + "content": "[875] Fanzeng Xia, Yidong Luo, Tinko Sebastian Bartels, Yaqi Xu, and Tongxin Li. Rethinking the unsolvable: When in-context search meets test-time scaling. arXiv preprint arXiv:2505.22290, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.233, + 0.827, + 0.262 + ], + "angle": 0, + "content": "[876] Heming Xia, Yongqi Li, Chak Tou Leong, Wenjie Wang, and Wenjie Li. Tokenskip: Controllable chain-of-thought compression in lms. arXiv preprint arXiv:2502.12067, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.267, + 0.825, + 0.297 + ], + "angle": 0, + "content": "[877] Shijie Xia, Xuefeng Li, Yixin Liu, Tongshuang Wu, and Pengfei Liu. Evaluating mathematical reasoning beyond accuracy. arXiv preprint arXiv:2404.05692, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.3, + 0.825, + 0.343 + ], + "angle": 0, + "content": "[878] Yunhui Xia, Wei Shen, Yan Wang, Jason Klein Liu, Huifeng Sun, Siyue Wu, Jian Hu, and Xiaolong Xu. Leetcodedataset: A temporal dataset for robust evaluation and efficient training of code llms. arXiv preprint arXiv:2504.14655, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.348, + 0.825, + 0.391 + ], + "angle": 0, + "content": "[879] Kun Xiang, Zhili Liu, Zihao Jiang, Yunshuang Nie, Runhui Huang, Haoxiang Fan, Hanhui Li, Weiran Huang, Yihan Zeng, Jianhua Han, et al. Atomthink: A slow thinking framework for multimodal mathematical reasoning. arXiv preprint arXiv:2411.11930, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.395, + 0.826, + 0.438 + ], + "angle": 0, + "content": "[880] Violet Xiang, Chase Blagden, Rafael Rafailov, Nathan Lile, Sang Truong, Chelsea Finn, and Nick Haber. Just enough thinking: Efficient reasoning with adaptive length penalties reinforcement learning. arXiv preprint arXiv:2506.05256, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.442, + 0.827, + 0.497 + ], + "angle": 0, + "content": "[881] Violet Xiang, Charlie Snell, Kanishk Gandhi, Alon Albalak, Anikait Singh, Chase Blagden, Duy Phung, Rafael Rafailov, Nathan Lile, Dakota Mahan, et al. Towards system 2 reasoning in llms: Learning how to think with meta chain-of-though. arXiv preprint arXiv:2501.04682, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.503, + 0.827, + 0.558 + ], + "angle": 0, + "content": "[882] Wenyi Xiao, Zechuan Wang, Leilei Gan, Shuai Zhao, Wanggui He, Luu Anh Tuan, Long Chen, Hao Jiang, Zhou Zhao, and Fei Wu. A comprehensive survey of direct preference optimization: Datasets, theories, variants, and applications. arXiv preprint arXiv:2410.15595, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.565, + 0.827, + 0.607 + ], + "angle": 0, + "content": "[883] Chulin Xie, Yangsibo Huang, Chiyuan Zhang, Da Yu, Xinyun Chen, Bill Yuchen Lin, Bo Li, Badih Ghazi, and Ravi Kumar. On memorization of large language models in logical reasoning. arXiv preprint arXiv:2410.23123, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.612, + 0.827, + 0.667 + ], + "angle": 0, + "content": "[884] Enze Xie, Junsong Chen, Yuyang Zhao, Jincheng Yu, Ligeng Zhu, Chengyue Wu, Yujun Lin, Zhekai Zhang, Muyang Li, Junyu Chen, et al. Sana 1.5: Efficient scaling of training-time and inference-time compute in linear diffusion transformer. arXiv preprint arXiv:2501.18427, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.673, + 0.827, + 0.715 + ], + "angle": 0, + "content": "[885] Senwei Xie, Hongyu Wang, Zhanqi Xiao, Ruiping Wang, and Xilin Chen. Robotic programmer: Video instructed policy code generation for robotic manipulation. arXiv preprint arXiv:2501.04268, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.72, + 0.827, + 0.763 + ], + "angle": 0, + "content": "[886] Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768, February 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.767, + 0.827, + 0.852 + ], + "angle": 0, + "content": "[887] Tianbao Xie, Danyang Zhang, Jixuan Chen, Xiaochuan Li, Siheng Zhao, Ruisheng Cao, Toh Jing Hua, Zhoujun Cheng, Dongchan Shin, Fangyu Lei, Yitao Liu, Yiheng Xu, Shuyan Zhou, Silvio Savarese, Caiming Xiong, Victor Zhong, and Tao Yu. OSWorld: Benchmarking multimodal agents for open-ended tasks in real computer environments. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=tN61DTr4Ed." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.856, + 0.827, + 0.912 + ], + "angle": 0, + "content": "[888] Yuxi Xie, Kenji Kawaguchi, Yiran Zhao, Xu Zhao, Min-Yen Kan, Junxian He, and Qizhe Xie. Self-evaluation guided beam search for reasoning. In Thirty-seventh Conference on Neural Information Processing Systems, September 2023. URL https://openreview.net/forum?id=Bw82hwg5Q3." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "92" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[889] Yuxi Xie, Anirudh Goyal, Wenyue Zheng, Min-Yen Kan, Timothy P Lillicrap, Kenji Kawaguchi, and Michael Shieh. Monte carlo tree search boosts reasoning via iterative preference learning. arXiv preprint arXiv:2405.00451, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.137, + 0.827, + 0.182 + ], + "angle": 0, + "content": "[890] Zhifei Xie, Mingbao Lin, Zihang Liu, Pengcheng Wu, Shuicheng Yan, and Chunyan Miao. Audio-reasoner: Improving reasoning capability in large audio language models. arXiv preprint arXiv:2503.02318, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.185, + 0.825, + 0.215 + ], + "angle": 0, + "content": "[891] Zhihui Xie, Liyu Chen, Weichao Mao, Jingjing Xu, Lingpeng Kong, et al. Teaching language models to critique via reinforcement learning. arXiv preprint arXiv:2502.03492, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.218, + 0.827, + 0.259 + ], + "angle": 0, + "content": "[892] Siheng Xiong, Ali Payani, Yuan Yang, and Faramarz Fekri. Deliberate reasoning for llms as structure-aware planning with accurate world model. arXiv preprint arXiv:2410.03136, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.264, + 0.827, + 0.308 + ], + "angle": 0, + "content": "[893] Wei Xiong, Chengshuai Shi, Jiaming Shen, Aviv Rosenberg, Zhen Qin, Daniele Calandriello, Misha Khalman, Rishabh Joshi, Bilal Piot, Mohammad Saleh, et al. Building math agents with multi-turn iterative preference learning. arXiv preprint arXiv:2409.02392, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.311, + 0.827, + 0.354 + ], + "angle": 0, + "content": "[894] Wang Xiyao, Yang Zhengyuan, Li Linjie, Lu Hongjin, Xu Yuancheng, Lin Chung-Ching Lin, Lin Kevin, Huang Furong, and Wang Lijuan. Scaling inference-time search with vision value model for improved visual comprehension. arXiv preprint arXiv:2412.03704, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.357, + 0.825, + 0.4 + ], + "angle": 0, + "content": "[895] Austin Xu, Yilun Zhou, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. J4r: Learning to judge with equivalent initial state group relative policy optimization. arXiv preprint arXiv:2505.13346, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.404, + 0.827, + 0.445 + ], + "angle": 0, + "content": "[896] Bin Xu, Yiguan Lin, Yinghao Li, et al. Sra-mcts: Self-driven reasoning augmentation with monte carlo tree search for enhanced code generation. arXiv preprint arXiv:2411.11053, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.45, + 0.825, + 0.493 + ], + "angle": 0, + "content": "[897] Fangzhi Xu, Qiushi Sun, Kanzhi Cheng, Jun Liu, Yu Qiao, and Zhiyong Wu. Interactive evolution: A neural-symbolic self-training framework for large language models. arXiv preprint arXiv:2406.11736, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.497, + 0.827, + 0.54 + ], + "angle": 0, + "content": "[898] Fangzhi Xu, Hang Yan, Chang Ma, Haiteng Zhao, Qiushi Sun, Kanzhi Cheng, Junxian He, Jun Liu, and Zhiyong Wu. Genius: A generalizable and purely unsupervised self-training framework for advanced reasoning. arXiv preprint arXiv:2504.08672, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.544, + 0.827, + 0.599 + ], + "angle": 0, + "content": "[899] Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, et al. Towards large reasoning models: A survey of reinforced reasoning with large language models. arXiv preprint arXiv:2501.09686, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.604, + 0.825, + 0.634 + ], + "angle": 0, + "content": "[900] Guowei Xu, Peng Jin, Li Hao, Yibing Song, Lichao Sun, and Li Yuan. Llava-ol: Let vision language models reason step-by-step. arXiv preprint arXiv:2411.10440, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.637, + 0.827, + 0.679 + ], + "angle": 0, + "content": "[901] Haotian Xu. No train still gain. unleash mathematical reasoning of large language models with monte carlo tree search guided by energy function. arXiv preprint arXiv:2309.03224, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.683, + 0.825, + 0.727 + ], + "angle": 0, + "content": "[902] Haotian Xu, Xing Wu, Weinong Wang, Zhongzhi Li, Da Zheng, Boyuan Chen, Yi Hu, Shijia Kang, Jiaming Ji, Yingying Zhang, et al. Redstar: Does scaling long-cot data unlock better slow-reasoning systems? arXiv preprint arXiv:2501.11284, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.73, + 0.825, + 0.773 + ], + "angle": 0, + "content": "[903] Huimin Xu, Xin Mao, Feng-Lin Li, Xiaobao Wu, Wang Chen, Wei Zhang, and Anh Tuan Luu. Full-step-dpo: Self-supervised preference optimization with step-wise rewards for mathematical reasoning. arXiv preprint arXiv:2502.14356, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.776, + 0.827, + 0.818 + ], + "angle": 0, + "content": "[904] Jin Xu, Zhifang Guo, Jinzheng He, Hangrui Hu, Ting He, Shuai Bai, Keqin Chen, Jialin Wang, Yang Fan, Kai Dang, et al. Qwen2. 5-omni technical report. arXiv preprint arXiv:2503.20215, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.823, + 0.825, + 0.867 + ], + "angle": 0, + "content": "[905] Pusheng Xu, Yue Wu, Kai Jin, Xiaolan Chen, Mingguang He, and Danli Shi. Deepseek-r1 outperforms gemini 2.0 pro, openai o1, and o3-mini in bilingual complex ophthalmology reasoning. arXiv preprint arXiv:2502.17947, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.87, + 0.827, + 0.911 + ], + "angle": 0, + "content": "[906] Rongwu Xu, Xiaojian Li, Shuo Chen, and Wei Xu. \"nuclear deployed!\": Analyzing catastrophic risks in decision-making of autonomous llm agents. arXiv preprint arXiv:2502.11355, 2025." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "93" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.121 + ], + "angle": 0, + "content": "[907] Silei Xu, Wenhao Xie, Lingxiao Zhao, and Pengcheng He. Chain of draft: Thinking faster by writing less. arXiv preprint arXiv:2502.18600, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.124, + 0.827, + 0.209 + ], + "angle": 0, + "content": "[908] Wenda Xu, Guanglei Zhu, Xuandong Zhao, Liangming Pan, Lei Li, and William Wang. Pride and prejudice: LLM amplifies self-bias in self-refinement. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 15474–15492, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.826. URL https://aclanthology.org/2024.acl-long.826/." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.211, + 0.827, + 0.254 + ], + "angle": 0, + "content": "[909] Xiaoang Xu, Shuo Wang, Xu Han, Zhenghao Liu, Huijia Wu, Peipei Li, Zhiyuan Liu, Maosong Sun, and Zhaofeng He. A\\*thought: Efficient reasoning via bidirectional compression for low-resource settings. arXiv preprint arXiv:2505.24550, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.257, + 0.825, + 0.285 + ], + "angle": 0, + "content": "[910] Xin Xu, Shizhe Diao, Can Yang, and Yang Wang. Can we verify step by step for incorrect answer detection? arXiv preprint arXiv:2402.10528, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.289, + 0.825, + 0.331 + ], + "angle": 0, + "content": "[911] Yao Xu, Mingyu Xu, Fangyu Lei, Wangtao Sun, Xiangrong Zeng, Bingning Wang, Guang Liu, Shizhu He, Jun Zhao, and Kang Liu. Amplify adjacent token differences: Enhancing long chain-of-thought reasoning with shift-ffn. arXiv preprint arXiv:2505.17153, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.334, + 0.825, + 0.39 + ], + "angle": 0, + "content": "[912] Yi Xu, Chengzhu Li, Han Zhou, Xingchen Wan, Caiqi Zhang, Anna Korhonen, and Ivan Vulić. Visual planning: Let's think only with images. In Workshop on Foundation Models Meet Embodied Agents at CVPR 2025, may 2025. URL https://openreview.net/forum?id=ELIt3v3S1J." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.394, + 0.825, + 0.423 + ], + "angle": 0, + "content": "[913] Yige Xu, Xu Guo, Zhiwei Zeng, and Chunyan Miao. Softcot: Soft chain-of-thought for efficient reasoning with llms. arXiv preprint arXiv:2502.12134, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.426, + 0.825, + 0.455 + ], + "angle": 0, + "content": "[914] Yige Xu, Xu Guo, Zhiwei Zeng, and Chunyan Miao. Softcot++: Test-time scaling with soft chain-of-thought reasoning. arXiv preprint arXiv:2505.11484, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.458, + 0.827, + 0.5 + ], + "angle": 0, + "content": "[915] Zhangchen Xu, Fengqing Jiang, Luyao Niu, Yuntian Deng, Radha Poovendran, Yejin Choi, and Bill Yuchen Lin. Magpie: Alignment data synthesis from scratch by prompting aligned lms with nothing. arXiv preprint arXiv:2406.08464, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.504, + 0.825, + 0.533 + ], + "angle": 0, + "content": "[916] Zhangchen Xu, Yang Liu, Yueqin Yin, Mingyuan Zhou, and Radha Poovendran. Kodcode: A diverse, challenging, and verifiable synthetic dataset for coding. February 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.536, + 0.827, + 0.577 + ], + "angle": 0, + "content": "[917] Jianhao Yan, Yafu Li, Zican Hu, Zhi Wang, Ganqu Cui, Xiaoye Qu, Yu Cheng, and Yue Zhang. Learning to reason under off-policy guidance. arXiv preprint arXiv:2504.14945, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.582, + 0.825, + 0.625 + ], + "angle": 0, + "content": "[918] Kai Yan, Yufei Xu, Zhengyin Du, Xuesong Yao, Zheyu Wang, Xiaowen Guo, and Jiecao Chen. Recitation over reasoning: How cutting-edge language models can fail on elementary school-level reasoning problems? arXiv preprint arXiv:2504.00509, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.628, + 0.825, + 0.656 + ], + "angle": 0, + "content": "[919] Ruin Yan, Zheng Liu, and Defu Lian. O1 embedder: Let retrievers think before action. arXiv preprint arXiv:2502.07555, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.66, + 0.827, + 0.702 + ], + "angle": 0, + "content": "[920] Siming Yan, Min Bai, Weifeng Chen, Xiong Zhou, Qixing Huang, and Li Erran Li. Vigor: Improving visual grounding of large vision language models with fine-grained reward modeling. In European Conference on Computer Vision, pages 37-53. Springer, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.706, + 0.827, + 0.761 + ], + "angle": 0, + "content": "[921] Yibo Yan, Jiamin Su, Jianxiang He, Fangteng Fu, Xu Zheng, Yuanhuiyi Lyu, Kun Wang, Shen Wang, Qingsong Wen, and Xuming Hu. A survey of mathematical reasoning in the era of multimodal large language model: Benchmark, method & challenges. arXiv preprint arXiv:2412.11936, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.765, + 0.827, + 0.82 + ], + "angle": 0, + "content": "[922] Yibo Yan, Shen Wang, Jiahao Huo, Hang Li, Boyan Li, Jiamin Su, Xiong Gao, Yi-Fan Zhang, Tianlong Xu, Zhendong Chu, et al. Errorradar: Benchmarking complex mathematical reasoning of multimodal large language models via error detection. arXiv preprint arXiv:2410.04509, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.824, + 0.827, + 0.867 + ], + "angle": 0, + "content": "[923] Yibo Yan, Shen Wang, Jiahao Huo, Jingheng Ye, Zhendong Chu, Xuming Hu, Philip S Yu, Carla Gomes, Bart Selman, and Qingsong Wen. Position: Multimodal large language models can significantly advance scientific reasoning. arXiv preprint arXiv:2502.02871, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.87, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[924] Yuchen Yan, Jin Jiang, Yang Liu, Yixin Cao, Xin Xu, Xunliang Cai, Jian Shao, et al. S \\(^{3}\\) c-math: Spontaneous step-level self-correction makes large language models better mathematical reasoners. arXiv preprint arXiv:2409.01524, 2024." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "94" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.133 + ], + "angle": 0, + "content": "[925] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.138, + 0.826, + 0.18 + ], + "angle": 0, + "content": "[926] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.184, + 0.825, + 0.227 + ], + "angle": 0, + "content": "[927] An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2.5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.231, + 0.825, + 0.273 + ], + "angle": 0, + "content": "[928] Cehao Yang, Xueyuan Lin, Chengjin Xu, Xuhui Jiang, Xiaojun Wu, Honghao Liu, Hui Xiong, and Jian Guo. Select2reason: Efficient instruction-tuning data selection for long-cot reasoning. arXiv preprint arXiv:2505.17266, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.277, + 0.827, + 0.305 + ], + "angle": 0, + "content": "[929] Chen Yang, Chenyang Zhao, Quanquan Gu, and Dongruo Zhou. Cops: Empowering llm agents with provable cross-task experience sharing. arXiv preprint arXiv:2410.16670, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.309, + 0.825, + 0.338 + ], + "angle": 0, + "content": "[930] Cheng Yang, Chufan Shi, Siheng Li, Bo Shui, Yujiu Yang, and Wai Lam. Llm2: Let large language models harness system 2 reasoning. arXiv preprint arXiv:2412.20372, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.341, + 0.827, + 0.411 + ], + "angle": 0, + "content": "[931] Cheng Yang, Chufan Shi, Yaxin Liu, Bo Shui, Junjie Wang, Mohan Jing, Linran Xu, Xinyu Zhu, Siheng Li, Yuxiang Zhang, Gongye Liu, Xiaomei Nie, Deng Cai, and Yujiu Yang. Chartmimic: Evaluating LMM's cross-modal reasoning capability via chart-to-code generation. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=sGpCzsfd1K." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.415, + 0.827, + 0.455 + ], + "angle": 0, + "content": "[932] Kailai Yang, Zhiwei Liu, Qianqian Xie, Jimin Huang, Erxue Min, and Sophia Ananiadou. Selective preference optimization via token-level reward function estimation. arXiv preprint arXiv:2408.13518, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.46, + 0.827, + 0.502 + ], + "angle": 0, + "content": "[933] Kaiyu Yang, Gabriel Poesia, Jingxuan He, Wenda Li, Kristin Lauter, Swarat Chaudhuri, and Dawn Song. Formal mathematical reasoning: A new frontier in ai. arXiv preprint arXiv:2412.16075, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.507, + 0.827, + 0.548 + ], + "angle": 0, + "content": "[934] Lei Yang, Renren Jin, Ling Shi, Jianxiang Peng, Yue Chen, and Deyi Xiong. Probench: Benchmarking large language models in competitive programming. arXiv preprint arXiv:2502.20868, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.553, + 0.825, + 0.582 + ], + "angle": 0, + "content": "[935] Ling Yang, Zhaochen Yu, Bin Cui, and Mengdi Wang. Reasonflux: Hierarchical llm reasoning via scaling thought templates. arXiv preprint arXiv:2502.06772, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.585, + 0.827, + 0.627 + ], + "angle": 0, + "content": "[936] Ruihan Yang, Fanghua Ye, Jian Li, Siyu Yuan, Yikai Zhang, Zhaopeng Tu, Xiaolong Li, and Deqing Yang. The lighthouse of language: Enhancing llm agents via critique-guided improvement. arXiv preprint arXiv:2503.16024, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.631, + 0.825, + 0.687 + ], + "angle": 0, + "content": "[937] Sherry Yang, Dale Schuurmans, Pieter Abbeel, and Ofir Nachum. Chain of thought imitation with procedure cloning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho, editors, Advances in Neural Information Processing Systems, November 2022. URL https://openreview.net/forum?id=ZJqqSa8FsH9." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.691, + 0.825, + 0.734 + ], + "angle": 0, + "content": "[938] Shiming Yang, Yuxuan Tong, Xinyao Niu, Graham Neubig, and Xiang Yue. Demystifying long chain-of-thought reasoning. In *Forty-second International Conference on Machine Learning*, may 2025. URL https://openreview.net/forum?id=OLodUbcWjb." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.737, + 0.827, + 0.779 + ], + "angle": 0, + "content": "[939] Shu Yang, Junchao Wu, Xin Chen, Yunze Xiao, Xinyi Yang, Derek F. Wong, and Di Wang. Understanding aha moments: from external observations to internal mechanisms. arXiv preprint arXiv:2504.02956, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.783, + 0.827, + 0.825 + ], + "angle": 0, + "content": "[940] Shu Yang, Junchao Wu, Xuansheng Wu, Derek Wong, Ninhao Liu, and Di Wang. Is long-to-short a free lunch? investigating inconsistency and reasoning efficiency in Irms. arXiv preprint arXiv:2506.19492, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.829, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[941] Sohee Yang, Elena Gribovskaya, Nora Kassner, Mor Geva, and Sebastian Riedel. Do large language models latently perform multi-hop reasoning? In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 10210–10229, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.550. URL https://aclanthology.org/2024.acl-long.550/." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "95" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.121 + ], + "angle": 0, + "content": "[942] Wang Yang, Hongye Jin, Jingfeng Yang, Vipin Chaudhary, and Xiaotian Han. Thinking preference optimization. arXiv preprint arXiv:2502.13173, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.123, + 0.826, + 0.152 + ], + "angle": 0, + "content": "[943] Wenkai Yang, Shuming Ma, Yankai Lin, and Furu Wei. Towards thinking-optimal scaling of test-time compute for lIm reasoning. arXiv preprint arXiv:2502.18080, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.155, + 0.827, + 0.199 + ], + "angle": 0, + "content": "[944] Xiao-Wen Yang, Xuan-Yi Zhu, Wen-Da Wei, Ding-Chu Zhang, Jie-Jing Shao, Zhi Zhou, Lan-Zhe Guo, and Yu-Feng Li. Step back to leap forward: Self-backtracking for boosting reasoning of language models. arXiv preprint arXiv:2502.04404, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.201, + 0.825, + 0.231 + ], + "angle": 0, + "content": "[945] Yang Yang, Xiaolu Zhou, Bosong Ding, and Miao Xin. Uncertainty-aware reward design process. arXiv preprint arXiv:2507.02256, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.233, + 0.827, + 0.276 + ], + "angle": 0, + "content": "[946] Yifei Yang, Zouying Cao, Qiguang Chen, Libo Qin, Dongjie Yang, Hai Zhao, and Zhi Chen. Kvsharer: Efficient inference via layer-wise dissimilar kv cache sharing. arXiv preprint arXiv:2410.18517, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.279, + 0.827, + 0.334 + ], + "angle": 0, + "content": "[947] Yue Yang, MingKang Chen, Qihua Liu, Mengkang Hu, Qiguang Chen, Gengrui Zhang, Shuyue Hu, Guangtao Zhai, Yu Qiao, Yu Wang, et al. Truly assessing fluid intelligence of large language models through dynamic reasoning evaluation. arXiv preprint arXiv:2506.02648, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.338, + 0.828, + 0.409 + ], + "angle": 0, + "content": "[948] Yuqing Yang, Yan Ma, and Pengfei Liu. Weak-to-strong reasoning. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 8350-8367, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.490. URL https://aclanthology.org/2024 findings-emnlp.490/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.412, + 0.827, + 0.453 + ], + "angle": 0, + "content": "[949] Zeyuan Yang, Xueyang Yu, Delin Chen, Maohao Shen, and Chuang Gan. Machine mental imagery: Empower multimodal reasoning with latent visual tokens. arXiv preprint arXiv:2506.17218, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.457, + 0.827, + 0.5 + ], + "angle": 0, + "content": "[950] Zhe Yang, Yichang Zhang, Yudong Wang, Ziyao Xu, Junyang Lin, and Zhifang Sui. Confidence vs critique: A decomposition of self-correction capability for llms. arXiv preprint arXiv:2412.19513, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.503, + 0.827, + 0.56 + ], + "angle": 0, + "content": "[951] Zonghan Yang, Peng Li, Ming Yan, Ji Zhang, Fei Huang, and Yang Liu. React meets actre: Autonomous annotation of agent trajectories for contrastive self-training. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=0VLBwQGWpA." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.563, + 0.825, + 0.619 + ], + "angle": 0, + "content": "[952] Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, et al. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. arXiv preprint arXiv:2412.18319, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.622, + 0.827, + 0.678 + ], + "angle": 0, + "content": "[953] Huanjin Yao, Jiaxing Huang, Yawen Qiu, Michael K Chen, Wenzheng Liu, Wei Zhang, Wenjie Zeng, Xikun Zhang, Jingyi Zhang, Yuxin Song, et al. Mmreason: An open-ended multi-modal multi-step reasoning benchmark for mllms toward agi. arXiv preprint arXiv:2506.23563, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.682, + 0.827, + 0.751 + ], + "angle": 0, + "content": "[954] Shunyu Yao, Howard Chen, John Yang, and Karthik R Narasimhan. Webshop: Towards scalable real-world web interaction with grounded language agents. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho, editors, Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=R9KnuFlvnU." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.755, + 0.827, + 0.853 + ], + "angle": 0, + "content": "[955] Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Tom Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 11809-11822. Curran Associates, Inc., September 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/271db9922b8d1f4dd7aaef84ed5ac703-Paper-Conference.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.856, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[956] Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik R Narasimhan, and Yuan Cao. React: Synergizing reasoning and acting in language models. In The Eleventh International Conference on Learning Representations, February 2023. URL https://openreview.net/forum?id=WE_vluYUL-X." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.828, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "96" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.134 + ], + "angle": 0, + "content": "[957] Xinhao Yao, Ruifeng Ren, Yun Liao, and Yong Liu. Unveiling the mechanisms of explicit cot training: How chain-of-thought enhances reasoning generalization. arXiv preprint arXiv:2502.04667, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.139, + 0.826, + 0.182 + ], + "angle": 0, + "content": "[958] Yang Yao, Xuan Tong, Ruofan Wang, Yixu Wang, Lujundong Li, Liang Liu, Yan Teng, and Yingchun Wang. A mousetrap: Fooling large reasoning models for jailbreak with chain of iterative chaos. arXiv preprint arXiv:2502.15806, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.187, + 0.826, + 0.23 + ], + "angle": 0, + "content": "[959] Wang Yaoting, Wu Shengqiong, Zhang Yuechen, Yan Shuicheng, Liu Ziwei, Luo Jiebo, and Fei Hao. Multimodal chain-of-thought reasoning: A comprehensive survey. arXiv preprint arXiv:2503.12605, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.235, + 0.826, + 0.277 + ], + "angle": 0, + "content": "[960] Michihiro Yasunaga, Luke Zettlemoyer, and Marjan Ghazvininejad. Multimodal reward-bench: Holistic evaluation of reward models for vision language models. arXiv preprint arXiv:2502.14191, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.282, + 0.826, + 0.313 + ], + "angle": 0, + "content": "[961] Nicolas Yax, Hernán Anló, and Stefano Palminteri. Studying and improving reasoning in humans and machines. Communications Psychology, 2(1):51, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.317, + 0.826, + 0.36 + ], + "angle": 0, + "content": "[962] Guanghao Ye, Khiem Duc Pham, Xinzhi Zhang, Sivakanth Gopi, Baolin Peng, Beibin Li, Janardhan Kulkarni, and Huseyin A Inan. On the emergence of thinking in llms i: Searching for the right intuition. arXiv preprint arXiv:2502.06773, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.364, + 0.826, + 0.407 + ], + "angle": 0, + "content": "[963] Jiaran Ye, Zijun Yao, Zhidian Huang, Liangming Pan, Jinxin Liu, Yushi Bai, Amy Xin, Liu Weichuan, Xiaoyin Che, Lei Hou, et al. How does transformer learn implicit reasoning? arXiv preprint arXiv:2505.23653, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.412, + 0.826, + 0.469 + ], + "angle": 0, + "content": "[964] Rui Ye, Shuo Tang, Rui Ge, Yaxin Du, Zhenfei Yin, Jing Shao, and Siheng Chen. MAS-GPT: Training LLMs to build LLM-based multi-agent systems. In Workshop on Reasoning and Planning for Large Language Models, March 2025. URL https://openreview.net/forum?id=TqHoQIlumy." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.473, + 0.826, + 0.531 + ], + "angle": 0, + "content": "[965] Tian Ye, Zicheng Xu, Yuanzhi Li, and Zeyuan Allen-Zhu. Physics of language models: Part 2.2, how to learn from mistakes on grade-school math problems. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=zpDGwcmMV4." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.535, + 0.826, + 0.578 + ], + "angle": 0, + "content": "[966] Xinwu Ye, Chengfan Li, Siming Chen, Xiangru Tang, and Wei Wei. Mmscibench: Benchmarking language models on multimodal scientific problems. arXiv preprint arXiv:2503.01891, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.583, + 0.826, + 0.613 + ], + "angle": 0, + "content": "[967] Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning. arXiv preprint arXiv:2502.03387, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.617, + 0.826, + 0.659 + ], + "angle": 0, + "content": "[968] Zihuiwen Ye, Fraser Greenlee-Scott, Max Bartolo, Phil Blunsom, Jon Ander Campos, and Matthias Galle. Improving reward models with synthetic critiques. arXiv preprint arXiv:2405.20850, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.665, + 0.826, + 0.708 + ], + "angle": 0, + "content": "[969] Zihuiwen Ye, Luckeciano Carvalho Melo, Younesse Kaddar, Phil Blunsom, Sam Staton, and Yarin Gal. Uncertainty-aware step-wise verification with generative reward models. arXiv preprint arXiv:2502.11250, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.713, + 0.826, + 0.755 + ], + "angle": 0, + "content": "[970] Hao Yi, Qingyang Li, Yulan Hu, Fuzheng Zhang, Di Zhang, and Yong Liu. Sppd: Self-training with process preference learning using dynamic value margin. arXiv preprint arXiv:2502.13516, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.76, + 0.826, + 0.79 + ], + "angle": 0, + "content": "[971] Jingyang Yi, Jiazheng Wang, and Sida Li. Shorterbetter: Guiding reasoning models to find optimal inference length for efficient reasoning. arXiv preprint arXiv:2504.21370, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.794, + 0.826, + 0.851 + ], + "angle": 0, + "content": "[972] Qiyue Yin, Pei Xu, Qiaozhe Li, Shengda Liu, Shengqi Shen, Tong Wang, Yihong Han, Xiaonan Zhao, Likun Yang, Shiyue Cao, et al. Wgsr-bench: Wargame-based game-theoretic strategic reasoning benchmark for large language models. arXiv preprint arXiv:2506.10264, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.856, + 0.826, + 0.913 + ], + "angle": 0, + "content": "[973] Zhangyue Yin, Qiushi Sun, Qipeng Guo, Zhiyuan Zeng, Xiaonan Li, Junqi Dai, Qinyuan Cheng, Xuanjing Huang, and Xipeng Qiu. Reasoning in flux: Enhancing large language models reasoning through uncertainty-aware adaptive guidance. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association" + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "97" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.221, + 0.091, + 0.828, + 0.135 + ], + "angle": 0, + "content": "for Computational Linguistics (Volume 1: Long Papers), pages 2401-2416, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.131. URL https://aclanthology.org/2024.acl-long.131/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.139, + 0.826, + 0.182 + ], + "angle": 0, + "content": "[974] Huaiyuan Ying, Shuo Zhang, Linyang Li, Zhejian Zhou, Yunfan Shao, Zhaoye Fei, Yichuan Ma, Jiawei Hong, Kuikun Liu, Ziyi Wang, et al. Internl m - Math: Open math large language models toward verifiable reasoning. arXiv preprint arXiv:2402.06332, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.187, + 0.827, + 0.243 + ], + "angle": 0, + "content": "[975] Eunseop Yoon, Hee Suk Yoon, SooHwan Eom, Gunsoo Han, Daniel Wontae Nam, Daejin Jo, Kyoung-Woon On, Mark A Hasegawa-Johnson, Sungwoong Kim, and Chang D Yoo. Tlcr: Token-level continuous reward for fine-grained reinforcement learning from human feedback. arXiv preprint arXiv:2407.16574, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.248, + 0.825, + 0.276 + ], + "angle": 0, + "content": "[976] Jaesik Yoon, Hyeonseo Cho, Doojin Baek, Yoshua Bengio, and Sungjin Ahn. Monte carlo tree diffusion for system 2 planning. arXiv preprint arXiv:2502.07202, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.281, + 0.825, + 0.324 + ], + "angle": 0, + "content": "[977] Bin Yu, Hang Yuan, Haotian Li, Xueyin Xu, Yuliang Wei, Bailing Wang, Weizhen Qi, and Kai Chen. Long-short chain-of-thought mixture supervised fine-tuning eliciting efficient reasoning in large language models. arXiv preprint arXiv:2505.03469, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.328, + 0.827, + 0.37 + ], + "angle": 0, + "content": "[978] Dian Yu, Baolin Peng, Ye Tian, Linfeng Song, Haitao Mi, and Dong Yu. Siam: Self-improving code-assisted mathematical reasoning of large language models. arXiv preprint arXiv:2408.15565, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.375, + 0.828, + 0.459 + ], + "angle": 0, + "content": "[979] Fei Yu, Anningzhe Gao, and Benyou Wang. OVM, outcome-supervised value models for planning in mathematical reasoning. In Kevin Duh, Helena Gomez, and Steven Bethard, editors, Findings of the Association for Computational Linguistics: NAACL 2024, pages 858-875, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-naacl.55. URL https://aclanthology.org/2024.findings-naacl.55/." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.465, + 0.827, + 0.507 + ], + "angle": 0, + "content": "[980] Fei Yu, Hongbo Zhang, Prayag Tiwari, and Benyou Wang. Natural language reasoning, a survey. ACM Comput. Surv., 56(12), October 2024. ISSN 0360-0300. doi: 10.1145/3664194. URL https://doi.org/10.1145/3664194." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.511, + 0.825, + 0.54 + ], + "angle": 0, + "content": "[981] Fei Yu, Yingru Li, and Benyou Wang. Uncertainty-aware search and value models: Mitigating search scaling flaws in llms. arXiv preprint arXiv:2502.11155, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.545, + 0.827, + 0.6 + ], + "angle": 0, + "content": "[982] Hongli Yu, Tinghong Chen, Jiangtao Feng, Jiangjie Chen, Weinan Dai, Qiying Yu, YaQin Zhang, Wei-Ying Ma, Jingjing Liu, Mingxuan Wang, et al. Memagent: Reshaping long-context llm with multi-conv rl-based memory agent. arXiv preprint arXiv:2507.02259, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.606, + 0.827, + 0.676 + ], + "angle": 0, + "content": "[983] Longhui Yu, Weisen Jiang, Han Shi, Jincheng YU, Zhengying Liu, Yu Zhang, James Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. Metamath: Bootstrap your own mathematical questions for large language models. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=N8N0hgNDRt." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.681, + 0.825, + 0.71 + ], + "angle": 0, + "content": "[984] Ping Yu, Jing Xu, Jason Weston, and Ilia Kulikov. Distilling system 2 into system 1. arXiv preprint arXiv:2407.06023, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.715, + 0.827, + 0.757 + ], + "angle": 0, + "content": "[985] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.762, + 0.827, + 0.804 + ], + "angle": 0, + "content": "[986] Tianyu Yu, Bo Ji, Shouli Wang, Shu Yao, Zefan Wang, Ganqu Cui, Lifan Yuan, Ning Ding, Yuan Yao, Zhiyuan Liu, et al. Rlpr: Extrapolating rlvr to general domains without verifiers. arXiv preprint arXiv:2506.18254, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.809, + 0.827, + 0.851 + ], + "angle": 0, + "content": "[987] Tong Yu, Yongcheng Jing, Xikun Zhang, Wentao Jiang, Wenjie Wu, Yingjie Wang, Wenbin Hu, Bo Du, and Dacheng Tao. Benchmarking reasoning robustness in large language models. arXiv preprint arXiv:2503.04550, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.856, + 0.827, + 0.912 + ], + "angle": 0, + "content": "[988] Xiao Yu, Baolin Peng, Vineeth Vajipey, Hao Cheng, Michel Galley, Jianfeng Gao, and Zhou Yu. ExACT: Teaching AI agents to explore with reflective-MCTS and exploratory learning. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=GBIUbwW9D8." + }, + { + "type": "list", + "bbox": [ + 0.183, + 0.091, + 0.828, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "98" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.091, + 0.826, + 0.121 + ], + "angle": 0, + "content": "[989] Yahan Yu, Yuyang Dong, and Masafumi Oyamada. Learning deliberately, acting intuitively: Unlocking test-time reasoning in multimodal llms. arXiv preprint arXiv:2507.06999, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.125, + 0.827, + 0.182 + ], + "angle": 0, + "content": "[990] Yiyao Yu, Yuxiang Zhang, Dongdong Zhang, Xiao Liang, Hengyuan Zhang, Xingxing Zhang, Ziyi Yang, Mahmoud Khademi, Hany Awadalla, Junjie Wang, et al. Chain-of-reasoning: Towards unified mathematical reasoning in large language models via a multi-paradigm perspective. arXiv preprint arXiv:2501.11110, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.187, + 0.827, + 0.23 + ], + "angle": 0, + "content": "[991] Yue Yu, Zhengxing Chen, Aston Zhang, Liang Tan, Chenguang Zhu, Richard Yuanzhe Pang, Yundi Qian, Xuewei Wang, Suchin Gururangan, Chao Zhang, et al. Self-generated critiques boost reward modeling for language models. arXiv preprint arXiv:2411.16646, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.235, + 0.827, + 0.276 + ], + "angle": 0, + "content": "[992] Zeping Yu, Yonatan Belinkov, and Sophia Ananiadou. Back attention: Understanding and enhancing multi-hop reasoning in large language models. arXiv preprint arXiv:2502.10835, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.283, + 0.826, + 0.325 + ], + "angle": 0, + "content": "[993] Zhaojian Yu, Yilun Zhao, Arman Cohan, and Xiao-Ping Zhang. Humaneval pro and mbpp pro: Evaluating large language models on self-invoking code generation. arXiv preprint arXiv:2412.21199, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.33, + 0.824, + 0.36 + ], + "angle": 0, + "content": "[994] Zhaojian Yu, Yinghao Wu, Yilun Zhao, Arman Cohan, and Xiao-Ping Zhang. Z1: Efficient test-time scaling with code. arXiv preprint arXiv:2504.00810, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.365, + 0.827, + 0.407 + ], + "angle": 0, + "content": "[995] Zhouliang Yu, Yuhuan Yuan, Tim Z Xiao, Fuxiang Frank Xia, Jie Fu, Ge Zhang, Ge Lin, and Weiyang Liu. Generating symbolic world models via test-time scaling of large language models. arXiv preprint arXiv:2502.04728, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.412, + 0.825, + 0.453 + ], + "angle": 0, + "content": "[996] Zhuohao Yu, Weizheng Gu, Yidong Wang, Zhengran Zeng, Jindong Wang, Wei Ye, and Shikun Zhang. Outcome-refining process supervision for code generation. arXiv preprint arXiv:2412.15118, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.46, + 0.827, + 0.515 + ], + "angle": 0, + "content": "[997] Zishun Yu, Tengyu Xu, Di Jin, Karthik Abinav Sankararaman, Yun He, Wenxuan Zhou, Zhouhao Zeng, Eryk Helenowski, Chen Zhu, Sinong Wang, et al. Think smarter not harder: Adaptive reasoning with inference aware optimization. arXiv preprint arXiv:2501.17974, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.521, + 0.827, + 0.563 + ], + "angle": 0, + "content": "[998] Hang Yuan, Bin Yu, Haotian Li, Shijun Yang, Christina Dan Wang, Zhou Yu, Xueyin Xu, Weizhen Qi, and Kai Chen. Not all tokens are what you need in thinking. arXiv preprint arXiv:2505.17827, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.57, + 0.827, + 0.611 + ], + "angle": 0, + "content": "[999] Jiahao Yuan, Dehui Du, Hao Zhang, Zixiang Di, and Usman Naseem. Reversal of thought: Enhancing large language models with preference-guided reverse reasoning warm-up. arXiv preprint arXiv:2410.12323, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.617, + 0.827, + 0.658 + ], + "angle": 0, + "content": "[1000] Lifan Yuan, Wendi Li, Huayu Chen, Ganqu Cui, Ning Ding, Kaiyan Zhang, Bowen Zhou, Zhiyuan Liu, and Hao Peng. Free process rewards without process labels. arXiv preprint arXiv:2412.01981, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.665, + 0.827, + 0.735 + ], + "angle": 0, + "content": "[1001] Lifan Yuan, Ganqu Cui, Hanbin Wang, Ning Ding, Xingyao Wang, Boji Shan, Zeyuan Liu, Jia Deng, Huimin Chen, Ruobing Xie, Yankai Lin, Zhenghao Liu, Bowen Zhou, Hao Peng, Zhiyuan Liu, and Maosong Sun. Advancing LLM reasoning generalists with preference trees. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=2ea5TNVR0c." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.74, + 0.827, + 0.783 + ], + "angle": 0, + "content": "[1002] Michelle Yuan, Elman Mansimov, Katerina Margatina, Anurag Pratik, Daniele Bonadiman, Monica Sunkara, Yi Zhang, Yassine Benajiba, et al. A study on leveraging search and self-feedback for agent reasoning. arXiv preprint arXiv:2502.12094, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.788, + 0.827, + 0.83 + ], + "angle": 0, + "content": "[1003] Siyu Yuan, Zehui Chen, Zhiheng Xi, Junjie Ye, Zhengyin Du, and Jiecao Chen. Agentr: Training language model agents to reflect via iterative self-training. arXiv preprint arXiv:2501.11425, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.836, + 0.827, + 0.878 + ], + "angle": 0, + "content": "[1004] Weizhe Yuan, Jane Yu, Song Jiang, Karthik Padthe, Yang Li, Dong Wang, Ilia Kulikov, Kyunghyun Cho, Yuandong Tian, Jason E Weston, and Xian Li. Naturalreasoning: Reasoning in the wild with 2.8m challenging questions, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.883, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[1005] Yige Yuan, Teng Xiao, Shuchang Tao, Xue Wang, Jinyang Gao, Bolin Ding, and Bingbing Xu. Incentivizing reasoning from weak supervision. arXiv preprint arXiv:2505.20072, 2025." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "99" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[1006] Xiang Yue, Xingwei Qu, Ge Zhang, Yao Fu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mammoth: Building math generalist models through hybrid instruction tuning. arXiv preprint arXiv:2309.05653, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.137, + 0.827, + 0.194 + ], + "angle": 0, + "content": "[1007] Xiang Yue, Tianyu Zheng, Ge Zhang, and Wenhu Chen. Mammoth2: Scaling instructions from the web. Advances in Neural Information Processing Systems, 37:90629-90660, 2025. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/a4ca07aa108036f80cbb5b82285fd4b1-Paper-Conference.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.198, + 0.827, + 0.241 + ], + "angle": 0, + "content": "[1008] Zhenrui Yue, Bowen Jin, Huimin Zeng, Honglei Zhuang, Zhen Qin, Jinsung Yoon, Lanyu Shang, Jiawei Han, and Dong Wang. Hybrid latent reasoning via reinforcement learning. arXiv preprint arXiv:2505.18454, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.244, + 0.827, + 0.3 + ], + "angle": 0, + "content": "[1009] Mert Yuksekgonul, Federico Bianchi, Joseph Boen, Sheng Liu, Pan Lu, Zhi Huang, Carlos Guestrin, and James Zou. Optimizing generative ai by backpropagating language model feedback. Nature, 639(8055):609-616, March 2025. URL https://www.nature.com/articles/s41586-025-08661-4." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.304, + 0.827, + 0.387 + ], + "angle": 0, + "content": "[1010] YuYue, Yufeng Yuan, Qiying Yu, Xiaochen Zuo, Ruofei Zhu, Wenyuan Xu, Jiaze Chen, Chengyi Wang, TianTian Fan, Zhengyin Du, Xiangpeng Wei, Gaohong Liu, Juncai Liu, Lingjun Liu, Haibin Lin, Zhiqi Lin, Bole Ma, Chi Zhang, Mofan Zhang, Wang Zhang, Hang Zhu, Ru Zhang, Xin Liu, Mingxuan Wang, Yonghui Wu, and Lin Yan. Vapo: Efficient and reliable reinforcement learning for advanced reasoning tasks. arXiv preprint arXiv:2504.05118, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.392, + 0.825, + 0.436 + ], + "angle": 0, + "content": "[1011] Yuhang Zang, Xiaoyi Dong, Pan Zhang, Yuhang Cao, Ziyu Liu, Shengyuan Ding, Shenxi Wu, Yubo Ma, Haodong Duan, Wenwei Zhang, et al. Internlm-xcomposer2.5-reward: A simple yet effective multi-modal reward model. arXiv preprint arXiv:2501.12368, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.439, + 0.827, + 0.482 + ], + "angle": 0, + "content": "[1012] Eric Zelikman, Yuhuai Wu, Jesse Mu, and Noah Goodman. Star: Bootstrapping reasoning with reasoning. Advances in Neural Information Processing Systems, 35:15476-15488, November 2022. URL https://openreview.net/pdf?id=3ELRdg2sqI." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.485, + 0.827, + 0.528 + ], + "angle": 0, + "content": "[1013] Eric Zelikman, Georges Harik, Yijia Shao, Varuna Jayasiri, Nick Haber, and Noah D Goodman. Quiet-star: Language models can teach themselves to think before speaking. arXiv preprint arXiv:2403.09629, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.531, + 0.827, + 0.573 + ], + "angle": 0, + "content": "[1014] Huaye Zeng, Dongfu Jiang, Haozhe Wang, Ping Nie, Xiaotong Chen, and Wenhu Chen. Acecoder: Acing coder rl via automated test-case synthesis. arXiv preprint arXiv:2502.01718, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.577, + 0.827, + 0.621 + ], + "angle": 0, + "content": "[1015] Thomas Zeng, Shuibai Zhang, Shutong Wu, Christian Classen, Daewon Chae, Ethan Ewer, Minjae Lee, Heeju Kim, Wonjun Kang, Jackson Kunde, et al. Versaprm: Multi-domain process reward model via synthetic reasoning data. arXiv preprint arXiv:2502.06737, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.624, + 0.827, + 0.667 + ], + "angle": 0, + "content": "[1016] Weihao Zeng, Yuzhen Huang, Lulu Zhao, Yijun Wang, Zifei Shan, and Junxian He. B-star: Monitoring and balancing exploration and exploitation in self-taught reasoners. arXiv preprint arXiv:2412.17256, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.67, + 0.827, + 0.713 + ], + "angle": 0, + "content": "[1017] Weihao Zeng, Yuzhen Huang, Qian Liu, Wei Liu, Keqing He, Zejun Ma, and Junxian He. Simplerl-zoo: Investigating and taming zero reinforcement learning for open base models in the wild, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.716, + 0.825, + 0.761 + ], + "angle": 0, + "content": "[1018] Yongcheng Zeng, Xinyu Cui, Xuanfa Jin, Guoqing Liu, Zexu Sun, Quan He, Dong Li, Ning Yang, Jianye Hao, Haifeng Zhang, et al. Aries: Stimulating self-refinement of large language models by iterative preference optimization. arXiv preprint arXiv:2502.05605, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.763, + 0.827, + 0.819 + ], + "angle": 0, + "content": "[1019] Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Bo Wang, Shimin Li, Yunhua Zhou, Qipeng Guo, Xuanjing Huang, and Xipeng Qiu. Scaling of search and learning: A roadmap to reproduce o1 from reinforcement learning perspective. arXiv preprint arXiv:2412.14135, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.822, + 0.827, + 0.865 + ], + "angle": 0, + "content": "[1020] Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Yunhua Zhou, and Xipeng Qiu. Revisiting the test-time scaling of o1-like models: Do they truly possess test-time scaling capabilities? arXiv preprint arXiv:2502.12215, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.869, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[1021] Zhongshen Zeng, Yinhong Liu, Yingjia Wan, Jingyao Li, Pengguang Chen, Jianbo Dai, Yuxuan Yao, Rongwu Xu, Zehan Qi, Wanru Zhao, Linling Shen, Jianqiao Lu, Haochen Tan, Yukang Chen, Hao Zhang, Zhan Shi, Bailin Wang, Zhijiang Guo, and Jiaya Jia. MR-ben:" + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.935, + 0.513, + 0.948 + ], + "angle": 0, + "content": "100" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.092, + 0.826, + 0.135 + ], + "angle": 0, + "content": "A meta-reasoning benchmark for evaluating system-2 thinking in LLMs. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, June 2024. URL https://openreview.net/forum?id=GN2qbxZ1ni." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.139, + 0.826, + 0.168 + ], + "angle": 0, + "content": "[1022] Zihao Zeng, Xuyao Huang, Boxiu Li, and Zhijie Deng. Sift: Grounding llm reasoning in contexts via stickers. arXiv preprint arXiv:2502.14922, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.172, + 0.827, + 0.243 + ], + "angle": 0, + "content": "[1023] Yuexiang Zhai, Hao Bai, Zipeng Lin, Jiayi Pan, Shengbang Tong, Yifei Zhou, Alane Suhr, Saining Xie, Yann LeCun, Yi Ma, and Sergey Levine. Fine-tuning large vision-language models as decision-making agents via reinforcement learning. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=nBjmMF2IZU." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.246, + 0.825, + 0.29 + ], + "angle": 0, + "content": "[1024] Zaifu Zhan, Shuang Zhou, Huixue Zhou, Jiawen Deng, Yu Hou, Jeremy Yeung, and Rui Zhang. An evaluation of deepseek models in biomedical natural language processing. arXiv preprint arXiv:2503.00624, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.293, + 0.826, + 0.338 + ], + "angle": 0, + "content": "[1025] Alexander Zhang, Marcus Dong, Jiaheng Liu, Wei Zhang, Yejie Wang, Jian Yang, Ge Zhang, Tianyu Liu, Zhongyuan Peng, Yingshui Tan, et al. Codecriticbench: A holistic code critique benchmark for large language models. arXiv preprint arXiv:2502.16614, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.341, + 0.827, + 0.386 + ], + "angle": 0, + "content": "[1026] Beichen Zhang, Yuhong Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Haodong Duan, Yuhang Cao, Dahua Lin, and Jiaqi Wang. Booststep: Boosting mathematical capability of large language models via improved single-step reasoning. arXiv preprint arXiv:2501.03226, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.388, + 0.827, + 0.432 + ], + "angle": 0, + "content": "[1027] Bohan Zhang, Xiaokang Zhang, Jing Zhang, Jifan Yu, Sijia Luo, and Jie Tang. Cot-based synthesizer: Enhancing llm performance through answer synthesis. arXiv preprint arXiv:2501.01668, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.435, + 0.825, + 0.479 + ], + "angle": 0, + "content": "[1028] Che Zhang, Zhenyang Xiao, Chengcheng Han, Yixin Lian, and Yuejian Fang. Learning to check: Unleashing potentials for self-correction in large language models. arXiv preprint arXiv:2402.13035, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.482, + 0.827, + 0.526 + ], + "angle": 0, + "content": "[1029] Chi Zhang, Jiajun Song, Siyu Li, Yitao Liang, Yuxi Ma, Wei Wang, Yixin Zhu, and Song-Chun Zhu. Proposing and solving olympiad geometry with guided tree search. arXiv preprint arXiv:2412.10673, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.53, + 0.827, + 0.601 + ], + "angle": 0, + "content": "[1030] Chunhui Zhang, Zhongyu Ouyang, Kwonjoon Lee, Nakul Agarwal, Sean Dae Houlihan, Soroush Vosoughi, and Shao-Yuan Lo. Overcoming multi-step complexity in multimodal theory-of-mind reasoning: A scalable bayesian planner. In *Forty-second International Conference on Machine Learning*, 2025. URL https://openreview.net/forum?id=2dz6psiiA0." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.605, + 0.825, + 0.649 + ], + "angle": 0, + "content": "[1031] Dalong Zhang, Jun Xu, Jun Zhou, Lei Liang, Lin Yuan, Ling Zhong, Mengshu Sun, Peilong Zhao, QiWei Wang, Xiaorui Wang, et al. Kag-thinker: Teaching large language models to think with human-like reasoning process. arXiv preprint arXiv:2506.17728, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.652, + 0.827, + 0.71 + ], + "angle": 0, + "content": "[1032] Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. ReST-MCTS*: LLM self-training via process reward guided tree search. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=8rcFOqEud5." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.713, + 0.827, + 0.757 + ], + "angle": 0, + "content": "[1033] Di Zhang, Xiaoshui Huang, Dongzhan Zhou, Yuqiang Li, and Wanli Ouyang. Accessing gpt-4 level mathematical olympiad solutions via monte carlo tree self-refine with llama-3 8b. arXiv preprint arXiv:2406.07394, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.76, + 0.825, + 0.805 + ], + "angle": 0, + "content": "[1034] Di Zhang, Jianbo Wu, Jingdi Lei, Tong Che, Jiatong Li, Tong Xie, Xiaoshui Huang, Shufei Zhang, Marco Pavone, Yuqiang Li, et al. Llama-berry: Pairwise optimization for o1-like olympiad-level mathematical reasoning. arXiv preprint arXiv:2410.02884, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.808, + 0.827, + 0.865 + ], + "angle": 0, + "content": "[1035] Fengji Zhang, Linquan Wu, Huiyu Bai, Guancheng Lin, Xiao Li, Xiao Yu, Yue Wang, Bei Chen, and Jacky Keung. Humaneval-v: Evaluating visual understanding and reasoning abilities of large multimodal models through coding tasks. arXiv preprint arXiv:2410.12381, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.868, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[1036] Hanning Zhang, Pengcheng Wang, Shizhe Diao, Yong Lin, Rui Pan, Hanze Dong, Dylan Zhang, Pavlo Molchanov, and Tong Zhang. Entropy-regularized process reward model. arXiv preprint arXiv:2412.11006, 2024." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.092, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.511, + 0.948 + ], + "angle": 0, + "content": "101" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[1037] Haoyue Zhang, Hualei Zhang, Xiaosong Ma, Jie Zhang, and Song Guo. Lazyeviction: Lagged kv eviction with attention pattern observation for efficient long reasoning. arXiv preprint arXiv:2506.15969, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.137, + 0.826, + 0.182 + ], + "angle": 0, + "content": "[1038] Hongbo Zhang, Han Cui, Guangsheng Bao, Linyi Yang, Jun Wang, and Yue Zhang. Direct value optimization: Improving chain-of-thought reasoning in llms with refined values. arXiv preprint arXiv:2502.13723, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.184, + 0.827, + 0.227 + ], + "angle": 0, + "content": "[1039] Jiayi Zhang, Jinyu Xiang, Zhaoyang Yu, Fengwei Teng, Xionghui Chen, Jiaqi Chen, Mingchen Zhuge, Xin Cheng, Sirui Hong, Jinlin Wang, et al. Aflow: Automating agentic workflow generation. arXiv preprint arXiv:2410.10762, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.229, + 0.827, + 0.273 + ], + "angle": 0, + "content": "[1040] Jinghan Zhang, Xiting Wang, Fengran Mo, Yeyang Zhou, Wanfu Gao, and Kunpeng Liu. Entropy-based exploration conduction for multi-step reasoning. arXiv preprint arXiv:2503.15848, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.275, + 0.827, + 0.319 + ], + "angle": 0, + "content": "[1041] Jintian Zhang, Yuqi Zhu, Mengshu Sun, Yujie Luo, Shuofei Qiao, Lun Du, Da Zheng, Huajun Chen, and Ningyu Zhang. Lighthinker: Thinking step-by-step compression. arXiv preprint arXiv:2502.15589, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.321, + 0.827, + 0.365 + ], + "angle": 0, + "content": "[1042] Kaiyi Zhang, Ang Lv, Jinpeng Li, Yongbo Wang, Feng Wang, Haoyuan Hu, and Rui Yan. Stephint: Multi-level stepwise hints enhance reinforcement learning to reason. arXiv preprint arXiv:2507.02841, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.367, + 0.827, + 0.41 + ], + "angle": 0, + "content": "[1043] Kechi Zhang, Ge Li, Jia Li, Yihong Dong, and Zhi Jin. Focused-dpo: Enhancing code generation through focused preference optimization on error-prone points. arXiv preprint arXiv:2502.11475, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.413, + 0.827, + 0.457 + ], + "angle": 0, + "content": "[1044] Kechi Zhang, Ge Li, Jia Li, Huangzhao Zhang, Jingjing Xu, Hao Zhu, Lecheng Wang, Yihong Dong, Jing Mai, Bin Gu, et al. Computational thinking reasoning in large language models. arXiv preprint arXiv:2506.02658, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.459, + 0.827, + 0.49 + ], + "angle": 0, + "content": "[1045] Kexun Zhang, Shang Zhou, Danqing Wang, William Yang Wang, and Lei Li. Scaling llm inference with optimized sample compute allocation. arXiv preprint arXiv:2410.22480, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.492, + 0.827, + 0.535 + ], + "angle": 0, + "content": "[1046] Kongcheng Zhang, Qi Yao, Baisheng Lai, Jiaxing Huang, Wenkai Fang, Dacheng Tao, Mingli Song, and Shunyu Liu. Reasoning with reinforced functional token tuning. arXiv preprint arXiv:2502.13389, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.537, + 0.827, + 0.581 + ], + "angle": 0, + "content": "[1047] Kongcheng Zhang, Qi Yao, Shunyu Liu, Yingjie Wang, Baisheng Lai, Jieping Ye, Mingli Song, and Dacheng Tao. Consistent paths lead to truth: Self-rewarding reinforcement learning for lIm reasoning. arXiv preprint arXiv:2506.08745, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.584, + 0.827, + 0.627 + ], + "angle": 0, + "content": "[1048] Lunjun Zhang, Arian Hosseini, Hritik Bansal, Mehran Kazemi, Aviral Kumar, and Rishabh Agarwal. Generative verifiers: Reward modeling as next-token prediction. arXiv preprint arXiv:2408.15240, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.63, + 0.827, + 0.687 + ], + "angle": 0, + "content": "[1049] Ming Zhang, Yu jiong Shen, Zelin Li, Huayu Sha, Binze Hu, Yuhui Wang, Chenhao Huang, Shichun Liu, Jingqi Tong, Changhao Jiang, et al. Llmeval-med: A real-world clinical benchmark for medical llms with physician validation. arXiv preprint arXiv:2506.04078, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.689, + 0.827, + 0.773 + ], + "angle": 0, + "content": "[1050] Ming-Liang Zhang, Fei yin, and Cheng-Lin Liu. A multi-modal neural geometric solver with textual clauses parsed from diagram. In Edith Elkind, editor, Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence, IJCAI-23, pages 3374-3382. International Joint Conferences on Artificial Intelligence Organization, 8 2023. doi: 10.24963/ijcai.2023/376. URL https://doi.org/10.24963/ijcai.2023/376. Main Track." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.776, + 0.827, + 0.82 + ], + "angle": 0, + "content": "[1051] Qingjie Zhang, Han Qiu, Di Wang, Haoting Qian, Yiming Li, Tianwei Zhang, and Minlie Huang. Understanding the dark side of llms' intrinsic self-correction. arXiv preprint arXiv:2412.14959, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.823, + 0.827, + 0.868 + ], + "angle": 0, + "content": "[1052] Qiyuan Zhang, Fuyuan Lyu, Zexu Sun, Lei Wang, Weixu Zhang, Zhihan Guo, Yufei Wang, Irwin King, Xue Liu, and Chen Ma. What, how, where, and how well? a survey on test-time scaling in large language models. arXiv preprint arXiv:2503.24235, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.87, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[1053] Qiyuan Zhang, Fuyuan Lyu, Zexu Sun, Lei Wang, Weixu Zhang, Wenyue Hua, Haolun Wu, Zhihan Guo, Yufei Wang, Niklas Muennighoff, et al. A survey on test-time scaling in large language models: What, how, where, and how well? arXiv preprint arXiv:2503.24235, 2025." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.512, + 0.948 + ], + "angle": 0, + "content": "102" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.161 + ], + "angle": 0, + "content": "[1054] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Yu Qiao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? In European Conference on Computer Vision, pages 169-186. Springer, October 2024. URL https://link.springer.com/chapter/10.1007/978-3-031-73242-3_10." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.165, + 0.827, + 0.25 + ], + "angle": 0, + "content": "[1055] Shaowei Zhang and Deyi Xiong. BackMATH: Towards backward reasoning for solving math problems step by step. In Owen Rambow, Leo Wanner, Marianna Apidianaki, Hend Al-Khalifa, Barbara Di Eugenio, Steven Schockaert, Kareem Darwish, and Apoorv Agarwal, editors, Proceedings of the 31st International Conference on Computational Linguistics: Industry Track, pages 466-482, Abu Dhabi, UAE, January 2025. Association for Computational Linguistics. URL https://aclanthology.org/2025.coling-industry.40/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.253, + 0.825, + 0.295 + ], + "angle": 0, + "content": "[1056] Shenao Zhang, Yaqing Wang, Yinxiao Liu, Tianqi Liu, Peter Grabowski, Eugene Ie, Zhaoran Wang, and Yunxuan Li. Beyond markovian: Reflective exploration via bayes-adaptive rl for llm reasoning. arXiv preprint arXiv:2505.20561, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.298, + 0.825, + 0.341 + ], + "angle": 0, + "content": "[1057] Shengjia Zhang, Junjie Wu, Jiawei Chen, Changwang Zhang, Xingyu Lou, Wangchunshu Zhou, Sheng Zhou, Can Wang, and Jun Wang. Othink-r1: Intrinsic fast/slow thinking mode switching for over-reasoning mitigation. arXiv preprint arXiv:2506.02397, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.345, + 0.827, + 0.387 + ], + "angle": 0, + "content": "[1058] Shengyu Zhang, Linfeng Dong, Xiaoya Li, Sen Zhang, Xiaofei Sun, Shuhe Wang, Jiwei Li, Runyi Hu, Tianwei Zhang, Fei Wu, et al. Instruction tuning for large language models: A survey. arXiv preprint arXiv:2308.10792, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.391, + 0.827, + 0.433 + ], + "angle": 0, + "content": "[1059] Shimao Zhang, Xiao Liu, Xin Zhang, Junxiao Liu, Zheheng Luo, Shujian Huang, and Yeyun Gong. Process-based self-rewarding language models. arXiv preprint arXiv:2503.03746, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.437, + 0.827, + 0.492 + ], + "angle": 0, + "content": "[1060] Weizhi Zhang, Yangning Li, Yuanchen Bei, Junyu Luo, Guancheng Wan, Liangwei Yang, Chenxuan Xie, Yuyao Yang, Wei-Chieh Huang, Chunyu Miao, et al. From web search towards agentic deep research: Incentivizing search with reasoning agents. arXiv preprint arXiv:2506.18959, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.497, + 0.827, + 0.54 + ], + "angle": 0, + "content": "[1061] Wenjing Zhang, Xuejiao Lei, Zhaoxiang Liu, Ning Wang, Zhenhong Long, Peijun Yang, Jiaojiao Zhao, Minjie Hua, Chaoyang Ma, Kai Wang, et al. Safety evaluation of deepseek models in Chinese contexts. arXiv preprint arXiv:2502.11137, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.543, + 0.827, + 0.64 + ], + "angle": 0, + "content": "[1062] Wenqi Zhang, Yongliang Shen, Linjuan Wu, Qiuying Peng, Jun Wang, Yueting Zhuang, and Weiming Lu. Self-contrast: Better reflection through inconsistent solving perspectives. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 3602–3622, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.197. URL https://aclanthology.org/2024.acl-long.197/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.644, + 0.825, + 0.687 + ], + "angle": 0, + "content": "[1063] Xiaoyun Zhang, Jingqing Ruan, Xing Ma, Yawen Zhu, Haodong Zhao, Hao Li, Jiansong Chen, Ke Zeng, and Xunliang Cai. When to continue thinking: Adaptive thinking mode switching for efficient reasoning. arXiv preprint arXiv:2505.15400, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.69, + 0.827, + 0.733 + ], + "angle": 0, + "content": "[1064] Xinyu Zhang, Yuxuan Dong, Yanrui Wu, Jiaxing Huang, Chengyou Jia, Basura Fernando, Mike Zheng Shou, Lingling Zhang, and Jun Liu. Physreason: A comprehensive benchmark towards physics-based reasoning. arXiv preprint arXiv:2502.12054, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.737, + 0.827, + 0.834 + ], + "angle": 0, + "content": "[1065] Xuan Zhang, Chao Du, Tianyu Pang, Qian Liu, Wei Gao, and Min Lin. Chain of preference optimization: Improving chain-of-thought reasoning in llms. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 333-356. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/00d80722b756de0166523a87805dd00f-Paper-Conference.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.838, + 0.827, + 0.879 + ], + "angle": 0, + "content": "[1066] Xuanliang Zhang, Dingzirui Wang, Keyan Xu, Qingfu Zhu, and Wanxiang Che. Rot: Enhancing table reasoning with iterative row-wise traversals. arXiv preprint arXiv:2505.15110, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[1067] Yifan Zhang, Yang Yuan, and Andrew Chi-Chih Yao. On the diagram of thought. arXiv preprint arXiv:2409.10038, 2024." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.512, + 0.948 + ], + "angle": 0, + "content": "103" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.091, + 0.826, + 0.134 + ], + "angle": 0, + "content": "[1068] Yifan Zhang, Wenyu Du, Dongming Jin, Jie Fu, and Zhi Jin. Finite state automata inside transformers with chain-of-thought: A mechanistic study on state tracking. arXiv preprint arXiv:2502.20129, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.139, + 0.827, + 0.194 + ], + "angle": 0, + "content": "[1069] Yong Zhang, Bingyuan Zhang, Zhitao Li, Ming Li, Ning Cheng, Minchuan Chen, Tao Wei, Jun Ma, Shaojun Wang, and Jing Xiao. Self-enhanced reasoning training: Activating latent reasoning in small models for enhanced reasoning distillation. arXiv preprint arXiv:2502.12744, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.199, + 0.827, + 0.284 + ], + "angle": 0, + "content": "[1070] Yongheng Zhang, Qiguang Chen, Min Li, Wanxiang Che, and Libo Qin. AutoCAP: Towards automatic cross-lingual alignment planning for zero-shot chain-of-thought. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 9191–9200, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.546. URL https://aclanthology.org/2024-findings-acl.546/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.288, + 0.827, + 0.387 + ], + "angle": 0, + "content": "[1071] Yongheng Zhang, Qiguang Chen, Jingxuan Zhou, Peng Wang, Jiasheng Si, Jin Wang, Wenpeng Lu, and Libo Qin. Wrong-of-thought: An integrated reasoning framework with multi-perspective verification and wrong information. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 6644-6653, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.388. URL https://aclanthology.org/2024-findings-emnlp.388/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.39, + 0.825, + 0.435 + ], + "angle": 0, + "content": "[1072] Yongheng Zhang, Xu Liu, Ruihan Tao, Qiguang Chen, Hao Fei, Wanxiang Che, and Libo Qin. Vitcot: Video-text interleaved chain-of-thought for boosting video understanding in large language models. arXiv preprint arXiv:2507.09876, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.438, + 0.827, + 0.481 + ], + "angle": 0, + "content": "[1073] Yongheng Zhang, Xu Liu, Ruoxi Zhou, Qiguang Chen, Hao Fei, Wenpeng Lu, and Libo Qin. Cchall: A novel benchmark for joint cross-lingual and cross-modal hallucinations detection in large language models. arXiv preprint arXiv:2505.19108, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.484, + 0.827, + 0.54 + ], + "angle": 0, + "content": "[1074] Yudi Zhang, Lu Wang, Meng Fang, Yali Du, Chenghua Huang, Jun Wang, Qingwei Lin, Mykola Pechenizkiy, Dongmei Zhang, Saravan Rajmohan, et al. Distill not only data but also rewards: Can smaller language models surpass larger ones? arXiv preprint arXiv:2502.19557, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.546, + 0.827, + 0.63 + ], + "angle": 0, + "content": "[1075] Yunxiang Zhang, Muhammad Khalifa, Lajanugen Logeswaran, Jaekyeom Kim, Moontae Lee, Honglak Lee, and Lu Wang. Small language models need strong verifiers to self-correct reasoning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 15637–15653, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.924. URL https://aclanthology.org/2024 findings-acl.924/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.634, + 0.825, + 0.664 + ], + "angle": 0, + "content": "[1076] Yuxiang Zhang, Shangxi Wu, Yuqi Yang, Jiangming Shu, Jinlin Xiao, Chao Kong, and Jitao Sang. o1-coder: an o1 replication for coding. arXiv preprint arXiv:2412.00154, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.667, + 0.827, + 0.711 + ], + "angle": 0, + "content": "[1077] Yuxiang Zhang, Yuqi Yang, Jiangming Shu, Yuhang Wang, Jinlin Xiao, and Jitao Sang. Openrft: Adapting reasoning foundation model for domain-specific tasks with reinforcement fine-tuning. arXiv preprint arXiv:2412.16849, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.715, + 0.825, + 0.758 + ], + "angle": 0, + "content": "[1078] Zhenru Zhang, Chujie Zheng, Yangzhen Wu, Beichen Zhang, Runji Lin, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. The lessons of developing process reward models in mathematical reasoning. arXiv preprint arXiv:2501.07301, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.762, + 0.827, + 0.805 + ], + "angle": 0, + "content": "[1079] Zhihao Zhang, Qiaole Dong, Qi Zhang, Jun Zhao, Enyu Zhou, Zhiheng Xi, Senjie Jin, Xiaoran Fan, Yuhao Zhou, Yanwei Fu, et al. Reinforcement fine-tuning enables mllms learning novel tasks stably. arXiv preprint arXiv:2506.23508, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.809, + 0.827, + 0.852 + ], + "angle": 0, + "content": "[1080] Zhongwang Zhang, Pengxiao Lin, Zhiwei Wang, Yaoyu Zhang, and Zhi-Qin John Xu. Complexity control facilitates reasoning-based compositional generalization in transformers. arXiv preprint arXiv:2501.08537, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.856, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[1081] Zhuosheng Zhang, Aston Zhang, Mu Li, hai zhao, George Karypis, and Alex Smola. Multi-modal chain-of-thought reasoning in language models. Transactions on Machine Learning Research, June 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=y1pPWFVfvR." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.512, + 0.948 + ], + "angle": 0, + "content": "104" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[1082] Deji Zhao, Donghong Han, Jia Wu, Zhongjiang He, Bo Ning, Ye Yuan, Yongxiang Li, Chao Wang, and Shuangyong Song. Enhancing math reasoning ability of large language models via computation logic graphs. Knowledge-Based Systems, page 113905, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.137, + 0.825, + 0.168 + ], + "angle": 0, + "content": "[1083] Eric Zhao, Pranjal Awasthi, and Sreenivas Gollapudi. Sample, scrutinize and scale: Effective inference-time search by scaling verification. arXiv preprint arXiv:2502.01839, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.17, + 0.825, + 0.214 + ], + "angle": 0, + "content": "[1084] Han Zhao, Haotian Wang, Yiping Peng, Sitong Zhao, Xiaoyu Tian, Shuaiying Chen, Yunjie Ji, and Xiangang Li. 1.4 million open-source distilled reasoning dataset to empower large language model training. arXiv preprint arXiv:2503.19633, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.217, + 0.829, + 0.315 + ], + "angle": 0, + "content": "[1085] Jun Zhao, Jingqi Tong, Yurong Mou, Ming Zhang, Qi Zhang, and Xuanjing Huang. Exploring the compositional deficiency of large language models in mathematical reasoning through trap problems. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 16361-16376, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.915. URL https://aclanthology.org/2024.emnlp-main.915/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.318, + 0.827, + 0.375 + ], + "angle": 0, + "content": "[1086] Lili Zhao, Yang Wang, Qi Liu, Mengyun Wang, Wei Chen, Zhichao Sheng, and Shijin Wang. Evaluating large language models through role-guide and self-reflection: A comparative study. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=E36NHwe7Zc." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.377, + 0.827, + 0.422 + ], + "angle": 0, + "content": "[1087] Shangziqi Zhao, Jiahao Yuan, Guisong Yang, and Usman Naseem. Can pruning improve reasoning? revisiting long-cot compression with capability in mind for better reasoning. arXiv preprint arXiv:2505.14582, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.424, + 0.827, + 0.48 + ], + "angle": 0, + "content": "[1088] Weixiang Zhao, Jiahe Guo, Yang Deng, Xingyu Sui, Yulin Hu, Yanyan Zhao, Wanxiang Che, Bing Qin, Tat-Seng Chua, and Ting Liu. Exploring and exploiting the inherent efficiency within large reasoning models for self-guided efficiency enhancement. arXiv preprint arXiv:2506.15647, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.483, + 0.825, + 0.514 + ], + "angle": 0, + "content": "[1089] Xuandong Zhao, Zhewei Kang, Aosong Feng, Sergey Levine, and Dawn Song. Learning to reason without external rewards. arXiv preprint arXiv:2505.19590, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.517, + 0.827, + 0.56 + ], + "angle": 0, + "content": "[1090] Xueliang Zhao, Wei Wu, Jian Guan, and Lingpeng Kong. Promptcot: Synthesizing olympiad-level problems for mathematical reasoning in large language models. arXiv preprint arXiv:2503.02324, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.563, + 0.827, + 0.662 + ], + "angle": 0, + "content": "[1091] Xufeng Zhao, Mengdi Li, Wenhao Lu, Cornelius Weber, Jae Hee Lee, Kun Chu, and Stefan Wermter. Enhancing zero-shot chain-of-thought reasoning in large language models through logic. In Nicoletta Calzolari, Min-Yen Kan, Veronique Hoste, Alessandro Lenci, Sakriani Sakti, and Nianwen Xue, editors, Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 6144-6166, Torino, Italia, May 2024. ELRA and ICCL. URL https://aclanthology.org/2024.lrec-main.543/." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.665, + 0.825, + 0.695 + ], + "angle": 0, + "content": "[1092] Yachao Zhao, Bo Wang, and Yan Wang. Explicit vs. implicit: Investigating social bias in large language models through self-reflection. arXiv preprint arXiv:2501.02295, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.698, + 0.827, + 0.741 + ], + "angle": 0, + "content": "[1093] Yang Zhao, Kai Xiong, Xiao Ding, Li Du, Zhouhao Sun, Jiannan Guan, Wenbin Zhang, Bin Liu, Dong Hu, Bing Qin, et al. Ufo-rl: Uncertainty-focused optimization for efficient reinforcement learning data selection. arXiv preprint arXiv:2505.12457, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.744, + 0.827, + 0.786 + ], + "angle": 0, + "content": "[1094] Yichong Zhao and Susumu Goto. Can frontier llms replace annotators in biomedical text mining? analyzing challenges and exploring solutions. arXiv preprint arXiv:2503.03261, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.79, + 0.827, + 0.834 + ], + "angle": 0, + "content": "[1095] Yu Zhao, Huifeng Yin, Bo Zeng, Hao Wang, Tianqi Shi, Chenyang Lyu, Longyue Wang, Weihua Luo, and Kaifu Zhang. Marco-o1: Towards open reasoning models for open-ended solutions. arXiv preprint arXiv:2411.14405, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.837, + 0.825, + 0.868 + ], + "angle": 0, + "content": "[1096] Yurui Zhao, Xiang Wang, Jiahong Liu, Irwin King, and Zhitao Huang. Towards geometry problem solving in the large model era: A survey. arXiv preprint arXiv:2506.02690, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[1097] Zhonghan Zhao, Wenwei Zhang, Haian Huang, Kuikun Liu, Jianfei Gao, Gaoang Wang, and Kai Chen. Rig: Synergizing reasoning and imagination in end-to-end generalist policy. arXiv preprint arXiv:2503.24388, 2025." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.829, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.935, + 0.512, + 0.948 + ], + "angle": 0, + "content": "105" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.043, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[1098] Zilong Zhao, Yao Rong, Dongyang Guo, Emek Gözlüklü, Emir Gülboy, and Enkelejda Kasneci. Stepwise self-consistent mathematical reasoning with large language models. arXiv preprint arXiv:2402.17786, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.14, + 0.827, + 0.196 + ], + "angle": 0, + "content": "[1099] Zirui Zhao, Wee Sun Lee, and David Hsu. Large language models as commonsense knowledge for large-scale task planning. Advances in Neural Information Processing Systems, 36:31967-31987, December 2023. URL https://openreview.net/pdf?id=ted747HURfX." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.201, + 0.827, + 0.246 + ], + "angle": 0, + "content": "[1100] Bowen Zheng, Xiaolei Wang, Enze Liu, Xi Wang, Lu Hongyu, Yu Chen, Wayne Xin Zhao, and Ji-Rong Wen. Deeprec: Towards a deep dive into the item space with large language model based recommendation. arXiv preprint arXiv:2505.16810, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.25, + 0.827, + 0.294 + ], + "angle": 0, + "content": "[1101] Chuanyang Zheng, Zhengying Liu, Enze Xie, Zhenguo Li, and Yu Li. Progressive-hint prompting improves reasoning in large language models. In AI for Math Workshop @ ICML 2024, June 2024. URL https://openreview.net/forum?id=UkFEs3ciz8." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.297, + 0.827, + 0.341 + ], + "angle": 0, + "content": "[1102] Chujie Zheng, Zhenru Zhang, Beichen Zhang, Runji Lin, Keming Lu, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. Processbench: Identifying process errors in mathematical reasoning. arXiv preprint arXiv:2412.06559, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.346, + 0.827, + 0.39 + ], + "angle": 0, + "content": "[1103] Da Zheng, Lun Du, Junwei Su, Yuchen Tian, Yuqi Zhu, Jintian Zhang, Lanning Wei, Ningyu Zhang, and Huajun Chen. Knowledge augmented complex problem solving with large language models: A survey. arXiv preprint arXiv:2505.03418, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.394, + 0.827, + 0.438 + ], + "angle": 0, + "content": "[1104] Ge Zheng, Bin Yang, Jiajin Tang, Hong-Yu Zhou, and Sibei Yang. Ddcot: Duty-distinct chain-of-thought prompting for multimodal reasoning in language models. Advances in Neural Information Processing Systems, 36:5168-5191, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.442, + 0.827, + 0.485 + ], + "angle": 0, + "content": "[1105] Hang Zheng, Hongshen Xu, Yuncong Liu, Lu Chen, Pascale Fung, and Kai Yu. Enhancing llm reliability via explicit knowledge boundary modeling. arXiv preprint arXiv:2503.02233, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.491, + 0.827, + 0.535 + ], + "angle": 0, + "content": "[1106] Jiani Zheng, Lu Wang, Fangkai Yang, Chaoyun Zhang, Lingrui Mei, Wenjie Yin, Qingwei Lin, Dongmei Zhang, Saravan Rajmohan, and Qi Zhang. Vem: Environment-free exploration for training gui agent with value environment model. arXiv preprint arXiv:2502.18906, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.539, + 0.827, + 0.596 + ], + "angle": 0, + "content": "[1107] Kunhao Zheng, Juliette Decugis, Jonas Gehring, Taco Cohen, benjamin negrevergne, and Gabriel Synnaeve. What makes large language models reason in (multi-turn) code generation? In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=Zk9guO19NS." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.601, + 0.827, + 0.686 + ], + "angle": 0, + "content": "[1108] Tianyu Zheng, Ge Zhang, Tianhao Shen, Xueling Liu, Bill Yuchen Lin, Jie Fu, Wenhu Chen, and Xiang Yue. OpenCodeInterpreter: Integrating code generation with execution and refinement. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 12834–12859, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.762. URL https://aclanthology.org/2024-findings-acl.762/." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.69, + 0.827, + 0.734 + ], + "angle": 0, + "content": "[1109] Xin Zheng, Jie Lou, Boxi Cao, Xueru Wen, Yuqiu Ji, Hongyu Lin, Yaojie Lu, Xianpei Han, Debing Zhang, and Le Sun. Critic-cot: Boosting the reasoning abilities of large language model via chain-of-thoughts critic. arXiv preprint arXiv:2408.16326, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.739, + 0.827, + 0.782 + ], + "angle": 0, + "content": "[1110] Zhi Zheng, Zhuoliang Xie, Zhenkun Wang, and Bryan Hooi. Monte carlo tree search for comprehensive exploration in llm-based automatic heuristic design. arXiv preprint arXiv:2501.08603, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.786, + 0.827, + 0.817 + ], + "angle": 0, + "content": "[1111] Jianyuan Zhong, Zeju Li, Zhijian Xu, Xiangyu Wen, and Qiang Xu. Dyve: Thinking fast and slow for dynamic process verification. arXiv preprint arXiv:2502.11157, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.821, + 0.827, + 0.864 + ], + "angle": 0, + "content": "[1112] Qihuang Zhong, Kang Wang, Ziyang Xu, Juhua Liu, Liang Ding, and Bo Du. Achieving> 97% on gsm8k: Deeply understanding the problems makes llms better solvers for math word problems. arXiv preprint arXiv:2404.14963, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.869, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[1113] Tianyang Zhong, Zhengliang Liu, Yi Pan, Yutong Zhang, Yifan Zhou, Shizhe Liang, Zihao Wu, Yanjun Lyu, Peng Shu, Xiaowei Yu, et al. Evaluation of openai o1: Opportunities and challenges of agi. arXiv preprint arXiv:2409.18486, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.512, + 0.948 + ], + "angle": 0, + "content": "106" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.829, + 0.149 + ], + "angle": 0, + "content": "[1114] Andy Zhou, Kai Yan, Michal Shlapentokh-Rothman, Haohan Wang, and Yu-Xiong Wang. Language agent tree search unifies reasoning, acting, and planning in language models. In *Forty-first International Conference on Machine Learning*, May 2024. URL https://openreview.net/forum?id=njwv9BsGHF." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.153, + 0.83, + 0.224 + ], + "angle": 0, + "content": "[1115] Aojun Zhou, Ke Wang, Zimu Lu, Weikang Shi, Sichun Luo, Zipeng Qin, Shaoqing Lu, Anya Jia, Linqi Song, Mingjie Zhan, and Hongsheng Li. Solving challenging math word problems using GPT-4 code interpreter with code-based self-verification. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=c8McWs4Av0." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.229, + 0.829, + 0.285 + ], + "angle": 0, + "content": "[1116] Changzhi Zhou, Xinyu Zhang, Dandan Song, Xiancai Chen, Wanli Gu, Huipeng Ma, Yuhang Tian, Mengdi Zhang, and Linmei Hu. Refinecoder: Iterative improving of large language models via adaptive critique refinement for code generation. arXiv preprint arXiv:2502.09183, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.291, + 0.83, + 0.362 + ], + "angle": 0, + "content": "[1117] Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc V Le, and Ed H. Chi. Least-to-most prompting enables complex reasoning in large language models. In The Eleventh International Conference on Learning Representations, February 2023. URL https://openreview.net/forum?id=WZH7099tgfM." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.367, + 0.829, + 0.41 + ], + "angle": 0, + "content": "[1118] Fan Zhou, Haoyu Dong, Qian Liu, Zhoujun Cheng, Shi Han, and Dongmei Zhang. Reflection of thought: Inversely eliciting numerical reasoning in language models via solving linear systems. arXiv preprint arXiv:2210.05075, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.415, + 0.829, + 0.458 + ], + "angle": 0, + "content": "[1119] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's\" aha moment\" in visual reasoning on a 2b non-sft model. arXiv preprint arXiv:2503.05132, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.463, + 0.83, + 0.521 + ], + "angle": 0, + "content": "[1120] Jin Peng Zhou, Charles E Staats, Wenda Li, Christian Szegedy, Kilian Q Weinberger, and Yuhuai Wu. Don't trust: Verify – grounding LLM quantitative reasoning with autoformalization. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=V5tdi14ple." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.525, + 0.829, + 0.569 + ], + "angle": 0, + "content": "[1121] Jin Peng Zhou, Kaiwen Wang, Jonathan Chang, Zhaolin Gao, Nathan Kallus, Kilian Q Weinberger, Kianté Brantley, and Wen Sun. q#: Provably optimal distributional rl for llm post-training. arXiv preprint arXiv:2502.20548, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.573, + 0.829, + 0.616 + ], + "angle": 0, + "content": "[1122] Kaiwen Zhou, Chengzhi Liu, Xuandong Zhao, Shreedhar Jangam, Jayanth Srinivasa, Gaowen Liu, Dawn Song, and Xin Eric Wang. The hidden risks of large reasoning models: A safety assessment of r1. arXiv preprint arXiv:2502.12659, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.622, + 0.83, + 0.678 + ], + "angle": 0, + "content": "[1123] Lexin Zhou, Wout Schellaert, Fernando Martínez-Plumed, Yael Moros-Daval, César Ferri, and José Hernández-Orallo. Larger and more instructable language models become less reliable. Nature, 634(8032):61–68, 2024. URL https://www.nature.com/articles/s41586-024-07930-y." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.683, + 0.83, + 0.725 + ], + "angle": 0, + "content": "[1124] Li Zhou, Ruijie Zhang, Xunlian Dai, Daniel Hershcovich, and Haizhou Li. Large language models penetration in scholarly writing and peer review. arXiv preprint arXiv:2502.11193, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.731, + 0.83, + 0.788 + ], + "angle": 0, + "content": "[1125] Ruochen Zhou, Minrui Xu, Shiqi Chen, Junteng Liu, Yunqi Li, LIN Xinxin, Zhengyu Chen, and Junxian He. AI for math or math for AI? on the generalization of learning mathematical problem solving. In The 4th Workshop on Mathematical Reasoning and AI at NeurIPS'24, 2024. URL https://openreview.net/forum?id=xlnvZ85CSo." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.793, + 0.83, + 0.865 + ], + "angle": 0, + "content": "[1126] Shuyan Zhou, Frank F. Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Tianyue Ou, Yonatan Bisk, Daniel Fried, Uri Alon, and Graham Neubig. Webarena: A realistic web environment for building autonomous agents. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=oKn9c6ytLx." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.869, + 0.829, + 0.912 + ], + "angle": 0, + "content": "[1127] Xiangxin Zhou, Zichen Liu, Anya Sims, Haonan Wang, Tianyu Pang, Chongxuan Li, Liang Wang, Min Lin, and Chao Du. Reinforcing general reasoning without verifiers. arXiv preprint arXiv:2505.21493, 2025." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.83, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.935, + 0.512, + 0.948 + ], + "angle": 0, + "content": "107" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.091, + 0.829, + 0.135 + ], + "angle": 0, + "content": "[1128] Xiaofeng Zhou, Heyan Huang, and Lizi Liao. Debate, reflect, and distill: Multi-agent feedback with tree-structured preference optimization for efficient language model enhancement. arXiv preprint arXiv:2506.03541, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.139, + 0.829, + 0.183 + ], + "angle": 0, + "content": "[1129] Xin Zhou, Yiwen Guo, Ruotian Ma, Tao Gui, Qi Zhang, and Xuanjing Huang. Self-consistency of the internal reward models improves self-rewarding language models. arXiv preprint arXiv:2502.08922, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.187, + 0.829, + 0.232 + ], + "angle": 0, + "content": "[1130] Yang Zhou, Hongyi Liu, Zhuoming Chen, Yuandong Tian, and Beidi Chen. Gsm-infinite: How do your llms behave over infinitely increasing context length and reasoning complexity? arXiv preprint arXiv:2502.05252, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.235, + 0.829, + 0.28 + ], + "angle": 0, + "content": "[1131] Yifei Zhou, Song Jiang, Yuandong Tian, Jason Weston, Sergey Levine, Sainbayar Sukhbaatar, and Xian Li. Sweet-rl: Training multi-turn llm agents on collaborative reasoning tasks. arXiv preprint arXiv:2503.15478, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.283, + 0.829, + 0.341 + ], + "angle": 0, + "content": "[1132] Yufa Zhou, Shaobo Wang, Xingyu Dong, Xiangqi Jin, Yifang Chen, Yue Min, Kexin Yang, Xingzhang Ren, Dayiheng Liu, and Linfeng Zhang. Reasoning like an economist: Posttraining on economic problems induces strategic generalization in llms. arXiv preprint arXiv:2506.00577, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.345, + 0.829, + 0.39 + ], + "angle": 0, + "content": "[1133] Zhanke Zhou, Zhaocheng Zhu, Xuan Li, Mikhail Galkin, Xiao Feng, Sanmi Koyejo, Jian Tang, and Bo Han. Landscape of thoughts: Visualizing the reasoning process of large language models. arXiv preprint arXiv:2503.22165, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.394, + 0.829, + 0.438 + ], + "angle": 0, + "content": "[1134] Zhi Zhou, Tan Yuhao, Zenan Li, Yuan Yao, Lan-Zhe Guo, Xiaoxing Ma, and Yu-Feng Li. Bridging internal probability and self-consistency for effective and efficient lIm reasoning. arXiv preprint arXiv:2502.00511, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.442, + 0.829, + 0.473 + ], + "angle": 0, + "content": "[1135] Bin Zhu, Hailong Yin, Jingjing Chen, and Yu-Gang Jiang. Reasoning models are more easily gaslighted than you think. arXiv preprint arXiv:2506.09677, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.476, + 0.829, + 0.533 + ], + "angle": 0, + "content": "[1136] Dawei Zhu, Xiyu Wei, Guangxiang Zhao, Wenhao Wu, Haosheng Zou, Junfeng Ran, Xun Wang, Lin Sun, Xiangzheng Zhang, and Sujian Li. Chain-of-thought matters: Improving long-context language models with reasoning path supervision. arXiv preprint arXiv:2502.20790, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.538, + 0.829, + 0.569 + ], + "angle": 0, + "content": "[1137] Jason Zhu and Hongyu Li. Towards concise and adaptive thinking in large reasoning models: A survey. arXiv preprint arXiv:2507.09662, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.573, + 0.829, + 0.616 + ], + "angle": 0, + "content": "[1138] Junda Zhu, Lingyong Yan, Shuaiqiang Wang, Dawei Yin, and Lei Sha. Reasoning-to-defend: Safety-aware reasoning can defend large language models from jailbreaking. arXiv preprint arXiv:2502.12970, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.621, + 0.829, + 0.665 + ], + "angle": 0, + "content": "[1139] King Zhu, Hanhao Li, Siwei Wu, Tianshun Xing, Dehua Ma, Xiangru Tang, Minghao Liu, Jian Yang, Jiaheng Liu, Yuchen Eleanor Jiang, et al. Scaling test-time compute for llm agents. arXiv preprint arXiv:2506.12928, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.669, + 0.829, + 0.714 + ], + "angle": 0, + "content": "[1140] Kunlun Zhu, Hongyi Du, Zhaochen Hong, Xiaocheng Yang, Shuyi Guo, Zhe Wang, Zhenhailong Wang, Cheng Qian, Xiangru Tang, Heng Ji, et al. Multiagentbench: Evaluating the collaboration and competition of lIm agents. arXiv preprint arXiv:2503.01935, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.718, + 0.829, + 0.762 + ], + "angle": 0, + "content": "[1141] Rongzhi Zhu, Yi Liu, Zequn Sun, Yiwei Wang, and Wei Hu. When can large reasoning models save thinking? mechanistic analysis of behavioral divergence in reasoning. arXiv preprint arXiv:2505.15276, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.766, + 0.829, + 0.81 + ], + "angle": 0, + "content": "[1142] Tinghui Zhu, Kai Zhang, Jian Xie, and Yu Su. Deductive beam search: Decoding deducible rationale for chain-of-thought reasoning. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=S1XnUsqwr7." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.814, + 0.829, + 0.913 + ], + "angle": 0, + "content": "[1143] Xinyu Zhu, Junjie Wang, Lin Zhang, Yuxiang Zhang, Yongfeng Huang, Ruyi Gan, Jiaxing Zhang, and Yujiu Yang. Solving math word problems via cooperative reasoning induced language models. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 4471–4485, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.245. URL https://aclanthology.org/2023.acl-long.245/." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.829, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.936, + 0.512, + 0.948 + ], + "angle": 0, + "content": "108" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.042, + 0.31, + 0.072 + ], + "angle": 0, + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[1144] Zihao Zhu, Hongbao Zhang, Ruotong Wang, Ke Xu, Siwei Lyu, and Baoyuan Wu. To think or not to think: Exploring the unthinking vulnerability in large reasoning models. arXiv preprint arXiv:2502.12202, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.137, + 0.826, + 0.182 + ], + "angle": 0, + "content": "[1145] Zihao Zhu, Hongbao Zhang, Mingda Zhang, Ruotong Wang, Guanzong Wu, Ke Xu, and Baoyuan Wu. Bot: Breaking long thought processes of o1-like large language models through backdoor attack. arXiv preprint arXiv:2502.12202, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.184, + 0.827, + 0.215 + ], + "angle": 0, + "content": "[1146] Ren Zhuang, Ben Wang, and Shuifa Sun. Accelerating chain-of-thought reasoning: When goal-gradient importance meets dynamic skipping. arXiv preprint arXiv:2505.08392, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.216, + 0.828, + 0.302 + ], + "angle": 0, + "content": "[1147] Ziyu Zhuang, Qiguang Chen, Longxuan Ma, Mingda Li, Yi Han, Yushan Qian, Haopeng Bai, Weinan Zhang, and Liu Ting. Through the lens of core competency: Survey on evaluation of large language models. In Proceedings of the 22nd Chinese National Conference on Computational Linguistics (Volume 2: Frontier Forum), pages 88–109, Harbin, China, August 2023. Chinese Information Processing Society of China. URL https://aclanthology.org/2023.ccl-2.8/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.304, + 0.825, + 0.348 + ], + "angle": 0, + "content": "[1148] Alireza S Ziabari, Nona Ghazizadeh, Zhivar Sourati, Farzan Karimi-Malekabadi, Payam Piray, and Morteza Dehghani. Reasoning on a spectrum: Aligning llms to system 1 and system 2 thinking. arXiv preprint arXiv:2502.12470, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.35, + 0.827, + 0.406 + ], + "angle": 0, + "content": "[1149] Henry Peng Zou, Zhengyao Gu, Yue Zhou, Yankai Chen, Weizhi Zhang, Liancheng Fang, Yibo Wang, Yangning Li, Kay Liu, and Philip S Yu. Testnuc: Enhancing test-time computing approaches through neighboring unlabeled data consistency. arXiv preprint arXiv:2502.19163, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.41, + 0.827, + 0.455 + ], + "angle": 0, + "content": "[1150] Yuxin Zuo, Shang Qu, Yifei Li, Zhangren Chen, Xuekai Zhu, Ermo Hua, Kaiyan Zhang, Ning Ding, and Bowen Zhou. Medxpertqa: Benchmarking expert-level medical reasoning and understanding. arXiv preprint arXiv:2501.18362, 2025." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.828, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.935, + 0.512, + 0.948 + ], + "angle": 0, + "content": "109" + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09567/17e53201-29b3-43fd-8f2e-78d7b00a58a6_origin.pdf b/data/2025/2503_09xxx/2503.09567/17e53201-29b3-43fd-8f2e-78d7b00a58a6_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f386050431686988350388b083dd20514b792798 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/17e53201-29b3-43fd-8f2e-78d7b00a58a6_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e74008e30a58df4cecb6092e546785a2f104523621d4336ab24b5a717549cdb +size 6417696 diff --git a/data/2025/2503_09xxx/2503.09567/full.md b/data/2025/2503_09xxx/2503.09567/full.md new file mode 100644 index 0000000000000000000000000000000000000000..f0ea2e382f489d40c122c84e252206599e961055 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/full.md @@ -0,0 +1,1923 @@ +Qiguang Chen† Libo Qin‡ Jinhao Liu† Dengyun Peng† Jiannan Guan† Peng Wang‡ Mengkang Hu◇ Yuhang Zhou Te Gao† Wanxiang Che† LARG, + +$\dagger$ Research Center for Social Computing and Interactive Robotics, $\dagger$ Harbin Institute of Technology + +$^{\ddagger}$ School of Computer Science and Engineering, Central South University + +The University of Hong Kong + +Fudan University + +{qgchen,car}@ir.hit.edu.cn,lbqin@csu.edu.cn + +Project: https://long-cot.github.io/ + +Github: LightChen233/Awesome-Long-Chain-of-Thought-Reasoning + +![](images/3bcec7826e1fbcdb6dfe89578c968d54d801a27312caf0fc018f86ba59fa632d.jpg) + +# Abstract + +Recent advancements in reasoning with large language models (RLLMs), such as OpenAI-o1 and DeepSeek-R1, have demonstrated their impressive capabilities in complex domains like mathematics and coding. A central factor in their success lies in the application of long chain-of-thought (Long CoT) characteristics, which enhance reasoning abilities and enable the solution of intricate problems. However, despite these developments, a comprehensive survey on Long CoT is still lacking, limiting our understanding of its distinctions from traditional short chain-of-thought (Short CoT) and complicating ongoing debates on issues like "overthinking" and "inference-time scaling". This survey seeks to fill this gap by offering a unified perspective on Long CoT. Specifically, (1) We first distinguish Long CoT from Short CoT and introduce a novel taxonomy to categorize current reasoning paradigms. (2) Next, we explore the key characteristics of Long CoT: deep reasoning, extensive exploration, and feasible reflection, which enable models to handle more complex tasks and produce more efficient, coherent outcomes compared to the shallower Short CoT. (3) We then investigate key phenomena such as the emergence of Long CoT with these characteristics, including overthinking, and inference-time scaling, offering insights into how these processes manifest in practice. (4) Finally, we identify significant research gaps and highlight promising future directions, including the integration of multi-modal reasoning, efficiency improvements, and enhanced knowledge frameworks. By providing a structured overview, this survey aims to inspire future research and further the development of reasoning large language models1. + +# 1 Introduction + +In recent years, as shown in Figure 1, the emergence of reasoning large language models (RLLMs) such as OpenAI o1 [307] and DeepSeek R1 [227] has sparked a growing body of research into Long Chain-of-Thought (Long CoT) reasoning, greatly improving their mathematical reasoning, programming tasks, and multidisciplinary knowledge reasoning capabilities [696, 980, 722, 79, 961, 200, 1113, 793], even passing Turing Test [334]. This shift marks a significant departure from traditional approaches to task handling in large language models (LLMs) [1147, 619, 622, 599]. Unlike the shorter chain-of-thought (Short CoT) used in traditional LLMs, Long CoT reasoning entails a more detailed, iterative process of exploration and reflection within a given problem space by inference-time scaling [419, 733, 524]. This process has led to notable advancements in mathematical and logical reasoning, as well as in exploring how supervised fine-tuning (SFT) and reinforcement learning (RL) techniques can enhance the learning and exploration of extended reasoning chains [623, 550]. + +However, there is no comprehensive survey to systematically understand the main factors and recent efforts of Long CoT for RLLMs, which hinders the development of RLLMs. As a result, there are ongoing debates about the effectiveness of simple "inference-time scaling" for Longer CoT [864, 486] versus the argument that "over-thinking" from excessively long scaling can harm LLMs and introduce unnecessary complexity [103, 142, 357]. Moreover, some researchers argue that, when solving specific problems, there is no clear relationship between length and accuracy [886]. + +To address this gap, we provide an extensive and comprehensive survey of Long CoT. Specifically, as illustrated in Figure 2, we first define and examine the distinctions between Long CoT and traditional Short CoT, focusing on the following key aspects: (1) Deep Reasoning, which requires a sufficient depth of logical processing to manage an extensive set of logical nodes; (2) Extensive Exploration, which involves generating parallel uncertain nodes and transitioning from known to unknown logic; and (3) Feasible Reflection, which involves feedback and refinement of logical connections. These characteristics enable Long CoT paradigms to integrate more intricate reasoning and accommodate a broader range of logical structures, ultimately leading to more efficient and coherent outcomes. Subsequently, we systematically explore the underlying explanations for key phenomena associated with Long CoT, such as its emergence, the overthinking phenomenon, + +![](images/be56637e4c051b7d3d3a17014777899ed5d63d7f01c713db6f624e8d8196114d.jpg) +Figure 1: Evolution of selected Long CoT over the past three years, where colored branches represent different characteristics: deep reasoning, feasible reflection, and extensive exploration. Each characteristic is further divided into key areas: Deep reasoning includes its format and learning methods. Feasible reflection focuses on feedback and refinement techniques during reflection process as optimization strategies. Extensive exploration addresses scaling, internal, and external exploration as key improvements to Long CoT. + +inference-time scaling during testing, and the "Aha Moment," among others. To our knowledge, This is the first comprehensive survey dedicated to these specific topics. Finally, considering the extensive body of literature, we highlight promising areas for future research and suggest valuable open-resource frameworks and datasets that can serve as a foundation for future investigations. + +The main contributions of this work are as follows: + +- Systematic Distinction: In this work, we first introduce the concept of Long CoT reasoning and distinguish it from the traditional Short CoT, thereby providing a clear framework for understanding both paradigms and their respective characteristics. +- Explanation of Hot Phenomena: We systematically investigate the notable phenomena associated with Long CoT reasoning, such as overthinking, inference-time scaling, and the "Aha Moment", offering valuable insights into the cognitive processes involved in complex reasoning. +- Emerging Challenges and Frontiers: We explore the emerging challenges within the field of Long CoT reasoning and identify key research frontiers. Given the vast body of literature, we highlight areas where further inquiry could significantly advance the development of Long CoT methodologies. + +# 2 Discussion of Long CoT v.s. Short CoT + +This section formalizes the key differences between Long Chain-of-Thought (Long CoT) and Short Chain-of-Thought (Short CoT), emphasizing reasoning depth, revisiting connections, and logical node exploration [858]. These distinctions are clearly separate from System 1 and System 2 thinking. The comparison between Long CoT and Short CoT is framed within System 2, with Long CoT involving more thorough reasoning, reflection, and exploration, while Short CoT generally prioritizes shallow and efficient logic over exhaustive reasoning. + +# 2.1 Overview of Short CoT + +As illustrated by Figure 2, Short CoT is typically characterized by a shallow, linear reasoning process, where conclusions are drawn sequentially, often relying on a limited number of logical nodes [551]. This reasoning is usually rapid and straightforward, with simple, surface-level transitions and minimal + +Proof of Number Theory Problem: For any positive integer $n$ , there exists a positive integer $m$ such that $m^2 + 1$ is divisible by $n$ . + +![](images/f0eadf51633e7fb658ee5728c1487fe0163f55fb50102d3c0b34bdb3de0da945.jpg) +Figure 2: The differences between advanced Long CoT and traditional Short CoT are characterized by three key characteristics: deep reasoning, feasible reflection, and extensive exploration. Moreover, Long CoT integrates all these characteristics to achieve substantial logical efficacy. + +exploration of alternative paths, which restricts its generalizability [683]. Formally, given a reasoning model $\mathcal{R}$ , we can define the rationale of Short CoT $(\mathsf{C}\circ \mathsf{T}_S)$ as follows: + +$$ +\mathrm {C o T} _ {S} = \mathcal {R} \left(\left\{n _ {i} \right\} _ {i = 1} ^ {k} | (k \leq \mathcal {B} _ {s}) \wedge (j = 1 \Leftrightarrow \forall i \leq k, n _ {i} \rightarrow n _ {i + j}) \wedge (\forall i \neq j \leq k, n _ {i} \neq n _ {j})\right), \tag {1} +$$ + +where $n_1$ to $n_k$ represent a sequence of logical nodes, which naturally satisfy that $\forall i, n_i \to n_{i+1}$ . Here, $\mathcal{B}_s$ denotes the upper boundary on the number of logical nodes, as defined by Chen et al. [90]. In this paradigm, the reasoning progresses sequentially from one node to the next, with minimal revisitation of previous nodes and little exploration of alternative logical paths. + +# 2.2 Overview of Long CoT + +In contrast, Long CoT involves deeper reasoning, reflective analysis, and a broader exploration of logical structures. It facilitates reasoning across a wider range of logical steps, addressing both known and unknown elements of a problem [194, 858]. Building on this, Long CoT expands upon the constraints presented in Equation 1 based on tree structures by incorporating three critical components: deep reasoning, exploration, and reflection. + +These components play distinct yet complementary roles in enhancing cognitive processes. Deep reasoning ensures each logical step is executed rigorously, even within complex structures, fostering robust logic across intricate relationships. Exploration encourages the identification of new pathways, revealing potential avenues that may not be immediately obvious. Reflection enables iterative analysis and reassessment of conclusions, allowing reasoning to evolve throughout problem-solving. By distinguishing these three categories, Long CoT enhances its ability to address a broader range of problems with precision and depth. As shown in Figure 3, we will now discuss these key differences in detail. + +# 2.2.1 Deep Reasoning for Long CoT + +As shown by Figure 2, deep reasoning refers to the capability to perform deep and thorough logical analysis across multiple interconnected logical nodes, where Short CoT generally can never achieve. This capability is essential when tackling complex problems that require a massive number of logical deductions to arrive at a valid conclusion. To better define and understand deep reasoning, we frame it as a capability that primarily relaxes the first constraint in Equation 1, as expressed by the following: + +$$ +k \leq \mathcal {B} _ {s} \mapsto k \leq \mathcal {B} _ {l} \wedge \mathcal {B} _ {s} \ll \mathcal {B} _ {l}, \tag {2} +$$ + +where $\mathcal{B}_l$ represents the upper boundary for Long CoT reasoning, which can accommodate much more intricate logical nodes compared to the smaller boundary $\mathcal{B}_s$ for Short CoT. The larger boundary $\mathcal{B}_l$ alleviates issues related to insufficient depth in reasoning, thereby reducing the risk of generating unresolved answers or hallucinated responses in short-form reasoning. + +![](images/fa1bbeeb7a7a9e97707e957eb9cfc744f2a2eba4ab0e5a7c5f73282936c28213.jpg) +Figure 3: Taxonomy of Long CoT, which includes deep reasoning, feasible reflection, and extensive exploration methodologies. + +# Key Difference: Reasoning Depth + +- Short CoT typically addresses a limited set of logical nodes, involving shallow reasoning, and struggles with problems requiring complex or intricate logical structures. +- Long CoT is designed to accommodate a significantly larger set of logical nodes, allowing for deeper logic and more thorough analysis during the reasoning process. + +# 2.2.2 Extensive Exploration for Long CoT + +As shown by Figure 2, Long CoT encourages branching out to extensively explore uncertain or unknown logical nodes, thereby expanding the potential set of reasoning paths. This exploration is particularly critical when solving problems characterized by ambiguity, incomplete information, or multiple possible solutions [43, 1016, 871]. More specifically, we describe how extensive exploration primarily addresses the relaxation of the second constraint in Equation 1, which can be formalized as follows: + +$$ +j = 1 \Leftrightarrow \forall i \leq k, n _ {i} \rightarrow n _ {i + j} \mapsto \exists m, \forall i, \forall j \leq m, n _ {i} \rightarrow n _ {i + j}, \tag {3} +$$ + +where the condition indicates that for a logical node $n_i$ , there are $m$ nodes that are explored in parallel. The acceptability of parallel exploration allows for a more systematic approach, enabling the exploration of previously unconsidered logical paths. This, in turn, helps maximize the understanding of all possible solutions, ultimately leading to the correct final answer. + +# Key Difference: Exploration of Logical Nodes + +- Short CoT generally restricts exploration to a fixed set of logical nodes, often resulting in oversimplified reasoning and limited exploration. +- Long CoT explores more various paths, including uncertain or uncharted areas, fostering more nuanced and comprehensive problem-solving. + +# 2.2.3 Feasible Reflection for Long CoT + +As shown by Figure 2, Long CoT involves revisiting previous logical nodes to verify their connections are valid and accurate, and then correcting them or selecting an alternative logical path. Formally, feasible reflection relaxes the third constraint in Equation 1, which originally requires acyclic reasoning such that $n_i \neq n_j$ for all $i \neq j \leq k$ . In contrast, feasible reflection permits the reasoning path to return to a previously visited node, captured as: + +$$ +\forall i \neq j \leq k, n _ {i} \neq n _ {j} \mapsto \exists i < j \leq k, n _ {i} = n _ {j}, \tag {4} +$$ + +where this condition indicates that, for a logical node $n_{j-1}$ , the subsequent node is not limited to the original next node $\hat{n}_j$ . Instead, it may transition to $n_i$ (i.e., the next logical node becomes $n_j$ , where $n_j = n_i$ ). Practically, reflection implementation consists of two components: + +Feedback refers to evaluating both overall and intermediate outputs for correctness and quality, also known as critique or verification. It can be derived from external sources, validation checks, or by reflecting on prior conclusions within the reasoning process. Formally, at each step $n_i$ , a verification process $\mathcal{V}_i$ ensures the correctness, feasibility, and consistency of the reasoning. If an issue is identified, the process redirects $n_i$ to the nearest correct node $n_j$ , where $j < i$ . This relationship is formalized as: + +$$ +\mathcal {F} _ {i}, n _ {j} \leftarrow \operatorname {F e e d b a c k} \left(\mathrm {C o T} _ {L} ^ {i}\right) \tag {5} +$$ + +where $\mathrm{CoT}_L^i = \{n_1,\dots ,n_i\}$ represents the current logical path up to the $i$ -th logical node for Long CoT. + +Refinement involves adjusting intermediate steps or modifying the logical flow to correct inconsistencies or address gaps based on the given feedback. This process can be expressed mathematically as follows: + +$$ +\widetilde {n} _ {i + 1} \leftarrow \operatorname {R e f i n e} \left(n _ {i + 1} \mid \mathrm {C o T} _ {L} ^ {i}, \mathcal {F} _ {i}, n _ {j}\right), \tag {6} +$$ + +where $\widetilde{n}_{i+1}$ represents the refined version of the subsequent logical node $n_{i+1}$ , according to the current logic $\mathbb{C} \circ \mathbb{T}_L^i$ , feedback result $\mathcal{F}_i$ , and previous logical node $n_j$ . + +Overall, incorporating reflection ensures that errors are identified and corrected promptly. This capability enables LLMs to quickly shift to alternative reasoning paths or correct their current trajectory. By doing so, error propagation is minimized, resulting in more accurate conclusions. + +# Key Difference: Feedback & Refinement + +- Short CoT typically moves in a straightforward, non-repetitive manner from one node to the next, so that cannot correct their logic. +- Long CoT allows for revisiting and revising earlier decisions by feedback and refinement, ensuring that estimizable and prior logical conclusions during the reasoning progress. + +# 2.2.4 Unified Application and Development History of Three Capabilities + +The Long CoT discussed here represents a unified reasoning system that seamlessly integrates and applies three key capabilities: deep reasoning, reflective mechanisms, and exploration capabilities. In contrast, during the Short CoT era, these capabilities developed independently, each evolving in isolation. + +As shown in Figure 2, early efforts primarily focused on enhancing deep reasoning within traditional CoT paradigms. This was followed by the gradual introduction of reflective mechanisms, which were initially based on human-designed pipelines. Over time, exploration capabilities were added, and + +these components were ultimately merged, giving rise to the modern concept of Long CoT, a unified approach to reasoning that seeks to enhance all three capabilities in harmony. + +The progression of Long CoT is gradual, rather than a sudden emergence through isolated models like o1 [307] and R1 [227]. Instead, it develops gradually. For example, earlier systems, such as ToT [955], enhance exploration but lack reflective mechanisms, disqualifying them as Long CoT [95]. While GoT [48] incorporates self-reflection based on ToT, its original model still lacked robust deep reasoning, preventing it from qualifying as Long CoT at that time. It is also notable that modern Long CoT systems, often neglect earlier technologies. This article addresses this gap by tracing the evolution of each capability, with the final section offering a comprehensive analysis of the integrated Long CoT system. + +In summary, Long CoT and Short CoT represent distinct paradigms. Long CoT features a deeper, broader, and more reflective reasoning process, enhancing both accuracy and coherence. Short CoT, by contrast, is better suited to simpler, well-defined problems. This distinction highlights the scalability and adaptability of Long CoT, making it particularly effective for more complex reasoning. + +# Key Difference: Unified Application of Three Capabilities + +It is important to highlight that Long CoT integrates these three distinct capabilities to perform complex reasoning. In contrast, traditional Short CoT optimization typically focuses on only one of these characteristics. + +# 3 Long CoT Analysis & Evaluation + +# 3.1 Analysis & Explanation for Long CoT + +Research on Long CoT has significantly enhanced RLLMs by improving reasoning accuracy, reducing errors, and supporting dynamic decision-making. However, several phenomena and their corresponding mechanisms remain inadequately summarized. This section addresses key topics, including the mechanisms of Long CoT and their underlying principles [644, 63, 545, 642]. Methodologically, two main perspectives have emerged to explain Long CoT: (1) External Behavior Analysis (§ 3.1.1) and (2) Internal Mechanism Analysis (§ 3.1.2). + +# 3.1.1 Long CoT External Behavior Analysis + +The primary research stream focuses on explaining RLLM behaviors for Long CoT [25]. As illustrated in Figure 4, six key phenomena are identified and discussed for Long CoT in this part. + +Long CoT Emergence Phenomenon Research shows that contextual examples improve large models' generative abilities by guiding the formation of reasoning chains [1012, 671, 417, 343, 532, 846, 1017, 1141]. Wang et al. [759] and Lippmann and Yang [461] demonstrate that these examples standardize reasoning chain generation relevant to the answers both in in-context-learning and supervised-finetuning. In an experiment by Madaan et al. [538], removing problem-specific entities from contextual examples, while retaining only the logical structure, led to similar performance as using complete examples, highlighting the logical structure imitation of Long CoT during inference. From a learning perspective, Ye et al. [963] analyzes and reveals the three-stage developmental trajectory of Long CoT: early memorization, followed by in-distribution generalization, and ultimately cross-distribution generalization, thereby enabling the model to exhibit Long CoT capabilities. + +More recently, Stechly et al. [688] and Wang and Zhou [815] have shown that modifying the decoding process or designing specific prompts can activate the Long CoT within pre-trained models. They propose that CoT is embedded during pre-training and requires specific activation [941]. Further, Sadr et al. [642] focus the Long CoT source from the training data, and build on this with the notion of "model attribution", to specifically identify the training data most influential for specific outputs. Building on this, Guo et al. [227] and Xie et al. [886] investigate using rule-based reinforcement learning to directly activate Long CoT during pre-training, aiming to enhance performance [881]. Furthermore, Gandhi et al. [194] identify four key cognitive behaviors, including verification, backtracking, sub-target setting, and backward chaining, which successfully facilitate Long CoT. Qwen series models [926] inherently demonstrate these behaviors, which can be easily triggered by rule-based reinforcement. In contrast, the models of Llama series [168] lack these + +![](images/5a5b622a5ef9a492838399c45ff5d29022e17e93ea38f8784aa310a395d4009d.jpg) +Figure 4: Analysis of the six classic phenomena of Long CoT external behavior: (a) emergence of Long CoT in current RLLMs; (b) reasoning boundaries and limitations of current Long CoT systems; (c) overthinking caused by scaling beyond RLLMs' reasoning boundaries, leading to performance decay; (d) inference-time scaling, discussing mainstream scaling methods, corresponding scaling laws and their limitations; (e) use of process reward model (PRM) or outcome reward model (ORM); (f) exploration of the "aha" moment and its underlying causes. + +![](images/b58ea3cfdd162d9e8dc0a98bea568dac497bec49f930188fabfb39d4a8af9188.jpg) + +![](images/7574cfd5bdc73debbbe23c4cd13dc43c38b3f705661075e1c69e68c8876576bc.jpg) + +![](images/7acf371a734b42dff8be38ed39013e080e5d0020e7a712fdcc41abb09ba80b65.jpg) + +capabilities and thus requires example-based reinforcement learning to improve significantly [65]. Moreover, Wang et al. [812] identify a pretraining scaling law that explains how increasing calculation size in RLLMs enhances their reasoning capabilities. Wang et al. [796] further explore the scaling law of Long CoT, showing that more fine-grained Long CoT granularity leads to more efficient and effective generalization performance. + +Reasoning Boundary Phenomenon Recent research has highlighted the upper bounds and limitations of RLLMs across various reasoning tasks [303, 283, 684, 261, 185, 252]. Specifically, Bi et al. [53] investigate these bounds in code generation, showing that RLLMs struggle with tasks that exceed certain complexity thresholds [600], especially when imitating Long CoT samples of varying complexity. In the context of upper-bound performance, Merrill and Sabharwal [548] and Li et al. [430] focus on single-step arithmetic tasks, concluding that model performance is constrained by input length. Moreover, Feng et al. [177] proposes a mathematical model indicating that fixed-size models cannot produce accurate numerical answers beyond specific limits. However, increasing the number of reasoning steps improves a model's capability requirements to solve more complex problems. + +Inspired by these explorations, Chen et al. [90] first define the "reasoning boundary" phenomenon and quantify these limits, showing that surpassing an RLLM's reasoning capacity leads to performance decline [92]. Similarly, Zhou et al. [1130] introduce GSM-Infinite, linking different upper limits to accuracy levels. Chen et al. [90] also examine the interaction between these boundaries across tasks of varying complexity, providing insights into the effectiveness of Long CoT strategies [1085]. Moreover, Amiri et al. [12] propose a "tight lower bound" for Long CoT further guiding reasoning error reductions. Further, Baeumel et al. [28] suggest that due to its reliance on a single-digit lookahead heuristic, there are inherent boundaries in performing addition with multiple operands, which thus hinders the fundamental limitation of LLMs in scaling to more complex numerical reasoning. Liu et al. [483] further investigate the role of reinforcement learning in expanding these reasoning boundaries instead of relying solely on pretraining capabilities. + +Overthinking Phenomenon Research has highlighted the overthinking phenomenon [103, 330, 574, 142, 357, 595], where performance improves with longer reasoning chains up to a threshold, after which it declines. In contrast, Xie et al. [886] and Ma et al. [534] find no significant correlation between reasoning length and accuracy. To explain this, one line of research suggests that Long CoT strategies [21, 441], like avoiding "snowball errors" [192]. Alternatively, Chen et al. [90], Wolf et al. [851] highlight a performance drop when the reasoning boundaries are exceeded, providing an explanation for the overthinking phenomenon. This suggests that reasoning length and logical complexity should be kept below a certain boundary [1080]. Building on this, Wu et al. [867] mathematically determine the feasible reasoning length for Long CoT. Finally, Chen et al. [93] introduces Ohm's law of Long CoT, which accurately predicts and controls performance. + +Inference-Time Scaling Phenomenon Recent advances in inference-time scaling algorithms [524, 843] have garnered significant attention, particularly for their ability to extend reasoning length and improve performance [524, 455, 875]. Specifically, Brown et al. [57] identify a phenomenon called "Large Language Monkeys", in which a series of reasoning tasks show that with enough trials, a correct result can be achieved. Additionally, o1 [307] and R1 [227] demonstrated that directly scaling the length of model inference improves final performance. + +To understand inference-time scaling, we will discuss these two paradigms: (1) Sequential Scaling: Sequential scaling involves increasing the reasoning path length. While this can enhance performance, studies by Jin et al. [330] show that, beyond a certain point, longer reasoning paths can degrade performance due to error accumulation. They suggest an optimal path length that depends on the model's capabilities and task complexity [15, 652, 31]. Furthermore, Chen et al. [90] and Wu et al. [867] explain that excessive exploration lengths beyond the RLLM's inherent reasoning boundary lead to performance decay, which guides RLLMs for deeper reasoning capabilities [32]. (2) Parallel Scaling: Parallel scaling involves performing multiple reasoning steps and verifying the results. While it shows promise, Parashar et al. [583] and Wang et al. [820] argue that simply increasing inference time does not guarantee improved performance. Wu et al. [864] show that the computational FLOPs $N$ of inference are correlated with the lower bound of performance error, which scales with $\log N$ . Additionally, Chen et al. [93] establish an upper bound for parallel scaling, showing that RLLMs cannot exceed Pass@k verification through various verifiers. They further argue that sampling optimization cannot exceed the model's internal reasoning limitations, demonstrating that for $N$ samples, accuracy is proportional to $\frac{m}{(k / \log N + b)^2}$ , where $m$ , $n$ , and $b$ are model-dependent constants. + +PRM & ORM Selection Phenomenon As RLLMs evolve, it is crucial to navigate the debate around the selection between process supervision and outcome supervision, two key reinforcement learning paradigms for complex reasoning tasks. The phenomenon of choosing between these two approaches has become a pivotal issue, as it is essential to differentiate and decide which supervision strategy is more suitable for specific tasks [899, 187, 1059]. While process supervision is intuitively advantageous for long-term reward assignments, the exact relationship between the two approaches remains unclear. It is commonly believed that process supervision is more challenging due to the trajectory-level coverage problem, which demands significant effort to collect fine-grained supervision data [1102, 679]. Additionally, PRM faces the issue of reward hacking [13, 152, 573, 30, 399], where agents exploit flaws in the reward function to produce unintended behaviors [227]. Addressing this to surpass rule-based reward systems has become an important research area [227, 886, 594]. Furthermore, Lampinen et al. [368] and Tan [708] establish a causal link between intermediate steps and final answers in qualitative experiments. Building on this, Jia et al. [317] demonstrate that, under the standard data coverage assumption, reinforcement learning with outcome supervision is not statistically more challenging than process supervision, aside from polynomial factors. More strictly, He et al. [247] mathematically demonstrate that outcome-level rewards suffice for online reinforcement learning in RLLMs. + +Aha Moment Phenomenon Earlier, Guo et al. [227] demonstrated that direct RL using rule-based rewards can trigger the aha moment, fostering natural self-reflection without supervision [172]. Following this, Team [721], Xie et al. [886] replicate this phenomenon. Further, Zhou et al. [1119] and Meng et al. [547] further extend this phenomenon to multimodal scenarios. However, Liu et al. [498] argue that the aha moment may not emerge in R1-Zero-like training. Instead, they observe that self-reflection patterns, such as superficial self-reflection (SSR), appear at epoch 0, the stage of base models. In this case, self-reflections do not necessarily lead to correct answers. Upon closer examination of R1-Zero training via RL, they find that the increasing response length results not from self-reflection, but from RL optimizing well-designed rule-based rewards. Moreover, Yang et al. [939] demonstrate that the "aha moment" is externally marked by increased use of anthropomorphic language during self-reflection and a dynamic adjustment of uncertainty in response to problem difficulty. This process enables the model to maintain reasoning without succumbing to "Reasoning Collapse." Internally, it is characterized by a clear distinction between anthropomorphic traits and logical reasoning, with anthropomorphic language intensifying as the problem becomes more complex. + +Reinforcement Learning Entropy Phenomenon In reinforcement learning for Long CoT, the entropy mechanism is a crucial factor influencing the performance of RLLMs. Policy entropy measures the diversity and exploratory strength of a model's outputs. By managing this entropy + +effectively, a model preserves exploration and thus excels on complex reasoning tasks. Earlier, Jang and Kim [310] investigate how initial entropy affects exploration in deep RL and proposed an entropy-aware initialization to encourage effective exploration. Building on this, Zhang et al. [1036] developed an Entropy-Regularized PRM that balances policy updates against large deviations from the starting distribution, thereby improving reasoning. Cheng et al. [116] found that high-entropy regions correlate positively with three exploratory reasoning behaviors: (1) key tokens linking logical steps, (2) self-verification and correction, and (3) rare behaviors underrepresented in the base model. Most recently, Agarwal et al. [5] introduced an Entropy Minimization method and demonstrated its strong impact on LLM performance in mathematical, physical, and coding tasks. + +However, recent research indicates that, during early training, policy entropy declines sharply, causing the model to converge prematurely on specific output patterns and limiting further reasoning improvement [144]. In reinforcement learning, policy entropy $(H)$ and downstream task performance $(R)$ follow an exponential relation: $R = -a\cdot e^{H} + b$ , so a drop in entropy produces a rapid performance decline until saturation. This "policy entropy collapse" is common without entropy control, as reduced entropy constrains exploration and stalls reasoning gains [144]. To counter this collapse, two methods, Clip-Cov and KL-Cov, regulate entropy by constraining updates on high-covariance tokens. Clip-Cov clips their update magnitudes, whereas KL-Cov imposes a Kullback-Leibler penalty. Empirical results show both techniques prevent collapse and enhance reasoning performance [144]. + +# 3.1.2 Long CoT Internal Mechanism Analysis + +The second stream of research investigates the internal mechanisms of Long CoT-related RLLMs. + +Reasoning Internal Mechanism Recent studies have explored the internal mechanisms underlying the coherent rationale outputs of Long CoT, with particular emphasis on attention mechanisms [675, 632]. These studies primarily examine neural substructures in RLLMs, framing CoT reasoning from a white-box perspective [819, 992, 233, 169]. Weston and Sukhbaatar [849] introduces the concept of System 2 Attention (S2A), which demonstrates Long CoT generation by selectively focusing attention on relevant information. Additionally, Li et al. [407] explore gradient distributions between direct output and Long CoT layers, revealing that Long CoT layers help maintain stability by distinguishing relevant from irrelevant reasoning [840]. Finally, Zhang et al. [1068] conceptualize RLLMs as finite state automata, offering further insight into how internal dynamics influence external behavior. Despite Short CoT's struggles with self-correction, Bertolazzi et al. [47] show that these models rely on consistency heads (attention heads) to assess the alignment of numerical values in arithmetic solutions through internal shortcuts. + +Knowledge Incorporating Mechanism Current RLLMs primarily focus on mathematics and coding but have shown potential for generalization to other knowledge-rich domains, sparking growing interest in the mechanism for integrating domain-specific knowledge into Long CoT [860, 886, 1105]. Prystawski et al. [609] suggest that generative models store entity knowledge learned during pre-training independently, with the reasoning process in Long CoT linking this knowledge across entities. Radha and Goktas [630] recently introduced the Probabilistic Mixture Model (PMM), which categorizes model outputs into reasoning, memorization, and guessing. They also propose an Information-Theoretic Consistency (ITC) analysis to quantify the relationship between model confidence and strategy selection. Additionally, Jin et al. [331] define "Concept Depth" as the lowest layers at which complex concepts are understood, demonstrating varying levels of knowledge integration in RLLMs. Ou et al. [572] examine RLLM knowledge internalization through knowledge loop evolution, arguing that new knowledge acquisition is shaped by its connection to existing knowledge, with the loop evolving from formation to optimization and from shallow to deep. + +# 3.2 Long CoT Evaluations + +# 3.2.1 Metrics + +In benchmarking, various metrics assess model performance across reasoning tasks, each focusing on different aspects of reasoning ability. These metrics evaluate both RLLMs' effectiveness in achieving desired outcomes and their learning efficiency. As a result, metrics for RLLMs have gained increasing attention in recent research. For mathematical or code-related tasks, three key metrics are commonly used: Accuracy, Pass@k, and Cons@k based on regex extraction: + +Accuracy measures the proportion of correct outputs. +- Pass@k evaluates the likelihood of generating at least one correct solution within $k$ attempts. +- Cons@k assesses consistency by determining the model's ability to consistently produce correct or logically coherent solutions across multiple attempts. + +In scientific or commonsense question-answering tasks, evaluation often uses Exact Match (EM) and Accuracy based on regex extraction, where EM determines whether the model's output exactly matches the expected solution. + +For feedback techniques like ORM or PRM, Rank and Best-of-N metrics are often used: + +- Rank measures whether the reward model correctly prioritizes the best reasoning processes from the top $k$ candidates. +- Best-of-N selects the highest-scoring solution from $N$ generated reasoning trajectories, indirectly measuring the reward model's effectiveness based on final outcomes. + +# 3.2.2 Decoding Strategies + +Decoding strategies are essential for controlling the inference process. Common approaches include Greedy Decoding, Beam Search, and Major@k. Both Greedy Decoding and Beam Search limit the sampling range to reduce randomness, guiding the model toward more consistent outputs. In contrast, Major@k identifies the most reliable solution by selecting the one with the highest consistency from a set of $k$ candidate solutions. + +# 3.2.3 Benchmarks + +In the realm of Benchmarks, the focus lies on assessing the reasoning capabilities of RLLMs across diverse domains. There are two primary categories: (1) Outcome Benchmarks, which focus on the holistic view of Long CoT reasoning, and (2) Process Benchmarks, which concentrate on the local view of the Long CoT process or individual capabilities. + +Outcome Benchmarks In the realm of Outcome Benchmarks, the first focus lies on evaluating the logical reasoning capabilities: + +- Complex Mathematics: A central focus in complex mathematics is evaluating benchmarks like GSM8K [141] and MATH [253], which assess basic mathematical problem-solving abilities [1125, 1112]. Recent additions, such as AIME 2024 [8], AIME 2025 [571], MATH-500 [449], AMC 2023 [9], USAMO [598], OlympiadBench [239], and OlympiadArena [298], expand the evaluation of LLM performance in mathematics. Moreover, Putnam-AXIOM [224] and FrontierMath [210] introduce more complex problems that challenge future reasoning systems. Additionally, ThinkBench [291] and MATH-Perturb [288] focus on robust evaluation for Long CoT [38, 987]. +- Complex Coding: Complex coding benchmarks are also vital, with competitions like Codeforces, SWEbench [327], CodeContests [427], and LiveCodeBench [309] evaluating LLM coding and problem-solving skills. Notable additions such as MHPP [148], ProBench [934], HumanEval Pro, MBPP Pro [993], and EquiBench [833] enhance the scope and complexity of coding challenges. Moreover, some studies have explored applying these benchmarks in real-world code development scenarios for automatic code generation and evaluation [243, 744]. +- Commonsense Puzzle: Commonsense puzzle benchmarks, including LiveBench [850], BIG-Bench Hard [705] and ZebraLogic [450], assess models' ability to reason about commonsense situations. The ARC [131] and DRE-Bench [947] is often viewed as a challenging commonsense-based AGI test. JustLogic [87] further contributes to the evaluation of deductive reasoning and commonsense problem-solving. Moreover, Li et al. [382] introduce QuestBench, a benchmark designed to evaluate the ability of RLLMs to generate insightful and meaningful questions. + +The second focus area concerns Knowledge Benchmarks, essential for evaluating a model's capability in complex reasoning across various tasks for out of distribution evaluation [776]: + +- Scientific Reasoning: Scientific Reasoning benchmarks, such as GPQA Diamond [637], MMLU-Pro [821], and SuperGPQA [165], assess multi-domain reasoning in fields like chemistry, biology, and physics [157]. These benchmarks test models' ability to not only accumulate knowledge + +but also integrate it for problem-solving. Humanity's Last Exam (HLE) [602] further challenges models by requiring deep interdisciplinary reasoning across scientific disciplines. Further, Chung et al. [140] propose TPBench to evaluate the effectiveness of RLLMs in solving theoretical physics problems. + +- Medical Reasoning: In the realm of Medical Reasoning, the need for complex, domain-specific, and accurate reasoning is paramount [1094, 1024, 905, 589]. Benchmarks, such as MedQA [328], JAMA Clinical Challenge [76], LLMEval-Med [1049] and Medbullets [76], simulate diagnostic and treatment decision-making processes, reflecting real-world medical practice. These benchmarks evaluate a model's handling of medical knowledge and reasoning, from diagnosis to treatment planning. Additionally, MedXpertQA [1150] introduces a comprehensive evaluation framework combining text and multimodal data, specifically assessing AI's reasoning capabilities in healthcare. + +# 3.2.4 Process Evaluations + +Deep Reasoning Benchmarks Recent progress in RLLMs underscores the need for specialized benchmarks to evaluate their deep reasoning abilities in Long CoT [375, 1133]. Notably, Lin et al. [450] introduces ZebraLogic, a framework for assessing logical reasoning, especially in complex non-monotonic scenarios. Similarly, BigGSM [90] and GSM-Ranges [670] focus on perturbing numerical values to test logical and arithmetic reasoning in edge cases beyond the models' training distribution. ROSCOE [212], ReCEval [606], DiVeRSe [425], HLV [71], and CoT-Kinetics [51] are designed to assess each step in the deep reasoning process during Long CoT tasks. + +Exploration Benchmarks Several studies assess RLLMs' exploration capabilities in Long CoT tasks. Specifically, Sys2Bench [583] evaluates the exploration and scaling abilities of RLLMs, emphasizing generalization across diverse tasks. BanditBench [566] extends this by testing model performance in interactive environments, offering insights into practical applications. Additionally, Heyman and Zylberberg [254] introduce a graph coloring problem to assess reasoning and spatial exploration in complex problem-solving scenarios. + +Reflection Benchmarks Reflection benchmarks measure RLLMs' ability to identify, reflect upon, and correct errors in Long CoT reasoning. These benchmarks fall into two categories: feedback and refinement. (1) Feedback Benchmark: These benchmarks assess the ability of LLMs to detect errors and respond to feedback for improvement. For example, Lambert et al. [367] introduces RewardBench to evaluate RLLMs' reward capabilities. This framework is extended by Multimodal RewardBench[960], and CodeCriticBench [1025] to include multimodal and code contexts, respectively. Benchmarks such as ProcessBench [1102], PRMBench [679], MR-Ben [1021], and DeltaBench [250] focus on error detection and correction across various tasks at the step level. Additionally, RealL Mistake [337] and JudgeBench [709] address more real-world error evaluation. (2) Refinement Benchmark: These benchmarks focus on error correction in complex tasks. CriticBench [456] assesses critique-correction capabilities, while MLDebugging [287], and ErrorRadar [922] specializes in coding or multimodal reasoning error detection and refinement. FinerReason [72] introduces a commonsense puzzle for broader feedback and refinement evaluations. Medec [1] adapts error correction to healthcare, addressing medical issues. + +# 3.2.5 Advanced Evaluation + +Agentic & Embodied Reasoning Agentic and Embodied reasoning requires models to demonstrate an understanding of real-world interactions, tool use, and adaptive reasoning in response to change. To assess real-world understanding, Wang et al. [798] introduce a benchmark that evaluates agents' ability to reason about physical concepts. Zhang et al. [1064] extend this by assessing agents' interactions with real-world physics. Additionally, realistic tasks often demand complex planning and tool usage, necessitating benchmarks to evaluate agent reasoning. These benchmarks assess agents' abilities to navigate and complete tasks in digital environments. Building on this, Huang et al. [283] propose a framework for evaluating decision-making in multi-agent, competitive settings. Nath et al. [562] introduce ToolComp, a benchmark designed to evaluate multi-step tool-use reasoning. To analyze adaptive reasoning in the face of real-world change, OSWorld [887], CogAgent [260], Mobile-Agent-E [828], WebShop [954], WebArena [1126], WGSR-Bench [972], and WebGames [735] assess AI systems across domains such as operating systems, mobile GUIs, browser tasks, and interactive + +entertainment [1106, 780, 512, 552]. Hu et al. [272] present Text2World, which evaluates agents' ability to generate interactive environments from text to test agent adaptability [995]. + +Multimodal Reasoning Multimodal reasoning refers to a system's ability to integrate and reason across diverse input types, including text, images [316]. This capability is crucial for solving complex problems that require information from diverse formats. + +- Complex Mathematics: Mathematical reasoning often integrates both textual and visual components, such as equations, graphs, or diagrams [921]. Specifically, challenges like MathVista [508], MathVision [782], MathVerse [1054], M3CoT-Math [91], CMMaTH [433], EnigmaEval [763], CoMT-Geometry [125], and PGPS9K [1050] aim to advance multimodal reasoning in mathematics, improving the evaluation of multimodal Long CoT logic. +- Complex Code: The second area of focus involves code-related reasoning, where systems interpret textual descriptions and code snippets. Benchmarks like HumanEval-V [1035], Code-Vision [767], Plot2Code [852], and ChartMimic [931] evaluate systems' capabilities to generate or interpret code from natural language and multimodal inputs for assessing systems that integrate natural language processing with programming tasks. +- **Complex Science:** This area involves integrating scientific texts with related diagrams or experimental data. Benchmarks like ScienceQA [507], M3CoT-Science [91], BMMR [874], and ScienceBoard [698] evaluate how well models combine science information with Long CoT reasoning across various scientific domains [966]. Further, Guo et al. [229] propose MolPuzzle for the evaluation of molecular structure elucidation. +- Commonsense Puzzle: This area focuses on commonsense reasoning, where systems combine reasoning cues and images to make deeper conclusions. Chen et al. [91] introduce M3CoT-Commensense, which incorporates commonsense Long CoT reasoning for complex multimodal interactions. Further, PuzzleVQA [128], MMReason [953] and LEGO-Puzzles [711] focus more on abstract and spatial puzzle reasoning, respectively. Additionally, Wang et al. [760] propose two benchmarks: Clue-Visual Question Answering (CVQA), which tests visual comprehension through three task types, and Clue of Password-Visual Question Answering (CPVQA), which features two task types focusing on the interpretation and application of visual data. + +AI for Research Recent advancements in AI have significantly advanced scientific research [94, 1124, 817, 215], with platforms like SciWorld [798] improving the research process. Simultaneously, Pricope [608] and Chan et al. [67] introduce a machine-learning platform to evaluate the potential of RLLMs in automating experiments. Several studies also examine RLLMs' ability to generate innovative research ideas. For instance, Si et al. [672] conduct evaluations with over 100 NLP researchers to assess RLLMs' creativity, revealing notable limitations [404, 856, 726]. Additionally, Li et al. [434] introduce SolutionBench, a benchmark for assessing systems' ability to generate feasible solutions for complex engineering problems. + +# 4 Deep Reasoning for Long CoT + +Deep reasoning capabilities primarily require profound depth and comprehensiveness in cognitive and reasoning processes. In the absence of such capabilities, RLLMs suffer significant performance declines [758, 823]. Current methods for enhancing deep reasoning can be categorized into two main approaches: (1) Deep Reasoning Format ( $\S$ 4.1), which involves utilizing various reasoning execution formats to maximize the reasoning step length $k$ within reasoning boundary $\mathcal{B}_l$ in Equation (2), by selecting the most suitable reasoning format; and (2) Deep Reasoning Learning ( $\S$ 4.2), which focuses on improving the model's internal capabilities to enhance its deep reasoning abilities, thereby extending the reasoning boundary $\mathcal{B}_l$ in Equation (2) intrinsically. + +# 4.1 Deep Reasoning Format + +As illustrated in Figure 5, deep reasoning formats can be categorized into three main types: natural language ( $\S$ 4.1.1), structured language ( $\S$ 4.1.2), and latent-space reasoning ( $\S$ 4.1.3), the latter of which is further subdivided into token-, vector-, and manager-driven latent reasoning. The reasoning performance across these formats is presented in Table 1. + +# (a) Natural Language Deep Reasoning + +To predict the output of the given input for Conway's Game of Life, we need to apply the rules of the game to each cell on the board. The rules are as follows: + +1. Any live cell with fewer than two live neighbors dies (underpopulation)... +Given Input Board: ... +$\spadesuit$ Step-by-Step Analysis: ... +$\spadesuit$ Final Output: After applying the rules to each cell... + +![](images/ff164d8152d0e42a100061acaca7da8e5deb82846df7779c8c7d61fa44616288.jpg) + +# (b) Structured Language Deep Reasoning + +![](images/5627b8fe0330637f914d05c2ea3b75f4df43c678ec6ae3f0e9b7da8f94f4f43f.jpg) + +import necessary packages from collections import Cou + +import necessary packages + +from collections import Counter + +all class and function definitions in the code + +file, if any + +class Solution(object): + +defgameOfLifeInfinite(self, live): ctr = Counter((I, J) for i, j i + +![](images/998c396b19ab3cb07b28b1eb72b14b5078de0675ec62ef33294a59363f34d6e2.jpg) + +# (c) Latent Space Deep Reasoning + +![](images/823ca26e30f5429ff0ae86df5e048ed2430dd9bdc62c2d874445fe64c1774d87.jpg) +Figure 5: Three main categories of deep reasoning formats: natural language, structured language, and latent-space reasoning (subdivided into token-, vector-, and manager-driven latent reasoning), with examples drawn from Li et al. [401]. + +![](images/aa202666c0347e30f452afb50132bf46686ba98433600b7ee8d0e4c2f30ad8f5.jpg) + +![](images/09f798353b1e4615f84c4d824a90ec1e55d3d23579c20ec2917c35a81ade4452.jpg) +Reasoning Vector Driven Latent Space Deep Reasoning + +![](images/3effed9f4c545b03b2ac2c365b4d87fe9724f58c4dddd4294def63e3d2f5672e.jpg) +Reasoning Manager Driven Latent Space Deep Reasoning + +# 4.1.1 Natural Language Deep Reasoning + +Traditionally, researchers have sought to adapt natural language for intuitive and free-flowing deep reasoning [836, 1118, 303, 617, 1070, 765, 205]. Early work by Wei et al. [836] demonstrated that the use of natural language Long CoT significantly enhances the reasoning capabilities of RLLMs. Further, the Natural Program framework [460] allows RLLMs to engage in deeper natural language reasoning by ensuring a more structured and rigorous logical analysis. More recently, CodeI/O [401] has introduced a technique that reorganizes code-based reasoning patterns into natural language formats, further boosting the reasoning potential of RLLMs [36]. Similarly, Li et al. [387] propose CoRT, which integrates code into reasoning to facilitate a mixture of formats, resulting in improved cognitive performance. + +# 4.1.2 Structured Language Deep Reasoning + +Structured language deep reasoning encompasses various approaches designed to program [100, 464, 687, 591, 198, 845, 830, 1044] or symbolic language [605, 158, 451, 372, 933, 604, 37, 40, 797, 380] format for enhanced deep reasoning. In this context, most studies focus on utilizing code to better enhance the mathematical reasoning capabilities [389, 107, 978, 85]. Xu et al. [897] propose a neural-symbol self-training framework guided by the environment, addressing both the scarcity of symbolic data and the limitations of symbolic processing in LLMs. Additionally, Liao et al. [443] present SKIntern, which refines symbolic RLLMs through curriculum learning and linear attenuation, enabling the internalization of symbolic knowledge with fewer examples, reducing computational costs, and accelerating inference. Furthermore, Ranaldi et al. [634] introduce QuaSAR, a CoT variant that directs LLMs to operate at higher abstraction levels through quasi-symbolic reasoning, thus improving natural language reasoning and providing more precise structural representations. + +# 4.1.3 Latent Space Deep Reasoning + +Latent space deep reasoning encompasses techniques designed to enhance the reasoning abilities of LLMs by leveraging operations within continuous latent spaces [684, 151, 640, 324]. These approaches can be categorized into three main paradigms: (1) Reasoning Token-Driven Latent Space Deep Reasoning: Early work [810, 1013] introduce the concept of "planning tokens" or "thought tokens" to guide reasoning within latent spaces [949, 1008]. Further, Coconut [236] expands on this through the maintenance of multiple alternative reasoning paths, increasing both complexity and efficiency [1069, 706]. At the extreme, Heima [662] condenses the entire Long CoT process into a single token, yielding substantial computational savings. (2) Reasoning Vector Driven Latent Space Deep Reasoning: Building on the previous paradigm, LTM [356] conceptualizes the layers of LLMs as "thought blocks" and introduces the concept of "thought vectors" for each layer. This + +
ModelBase ModelGSM8kMATHGPQAOlympiadBenchLiveCodeBench
Latent Space Deep Reasoning
No-CoT [151]Mistral-7B [318]38.0----
SQ-VAE [810]Llama-2-7B [743]40.07.0---
RecurrentBlock-3.5B [204]-42.1----
ICoT-SI [151]Mistral-7B [318]51.0----
Natural Language Deep Reasoning
Self-Rewarding [114]Llama-2-7B [743]40.010.7---
Llama-3.1-8B [168]-56.720.3---
MetaMath [983]Llama-2-7B [743]66.5----
OVM [979]Llama-2-7B [743]73.7----
NuminaMath-7B-CoT [397]-75.455.2-19.9-
Qwen2-7B [925]-79.944.2-21.3-
Qwen2-Math-7B [927]-80.450.4-38.2-
Internlm2-math-plus-7B [974]-84.054.4-18.8-
OMI2 [401]Qwen2.5-Coder-7B [301]84.172.336.2-27.2
Llama-3.1-70B [168]-85.541.4---
CODEI/O++ [401]Qwen2.5-Coder-7B [301]85.772.140.6-29.1
CODEI/O [401]Qwen2.5-Coder-7B [301]86.471.943.3-28.5
WI [401]Qwen2.5-Coder-7B [301]87.071.439.1-26.0
WI (Full) [401]Qwen2.5-Coder-7B [301]87.071.142.9-27.6
OMI2 (Full) [401]Qwen2.5-Coder-7B [301]88.573.240.9-28.4
DeepSeekMath-7B-RL [658]-88.251.7-19.0-
Llama-3.1-405B [168]-89.053.8---
CoMAT [371]GPT-4 [3]93.7-40.4--
CoT [634]GPT-4 [3]94.5-41.850.2-
FCoT [523]GPT-4 [3]95.0----
Qwen2.5-Math-7B-Instruct [927]-95.283.6-41.6-
MathPrompter [303]GPT-4 [3]95.6----
Qwen2.5-Math-72B-Instruct [927]-95.985.9-49.0-
DeepSeek-R1-Distill-Qwen-7B [227]--92.8-49.137.6
DeepSeek-R1-Distill-Qwen-32B [227]--94.3-62.157.2
Structured Language Deep Reasoning
STaR [1012]Llama-2-7B [743]58.216.0---
ENVISIONS [897]Llama-2-7B [743]59.019.0---
MAmmoTH [1006]Code-Llama-7B [639]59.4----
MathCoder-CL [783]Code-Llama-7B [639]67.830.2---
ToRA-Code [217]Llama-2-7B [743]72.6----
Brain [107]Code-Llama-7B [639]74.0----
DeepSeek-Coder-7B [226]-77.444.4---
SIaM [978]Qwen-2-Math-Base81.550---
OC-SFT-1 [401]Qwen2.5-Coder-7B [301]86.770.937.7-27.5
PyEdu [401]Qwen2.5-Coder-7B [301]85.871.440.9-25.8
Qwen2.5-Math-7B-Instruct [927]-94.685.2-55.6-
Qwen2.5-Math-72B-Instruct [927]-95.888.1-60.6-
QuaSAR [634]GPT-4 [3]96.5-55.444.6-
MathDivide [687]GPT-4 [3]96.8---
+ +Table 1: Performance of various deep reasoning formats, sorted primarily by GSM8K scores. “-” indicates that the paper did not report this score. + +approach allows for the scaling of inference-time computations by implicitly performing reasoning within the latent space through recurrent depth. (3) Reasoning Manager Driven Latent Space Deep Reasoning: Inspired by these, Schone et al. [647], Geiping et al. [204], and Saunshi et al. [646] propose a mechanism similar to a continuous reasoning manager, which iteratively governs a trained "recurrent block" as a recurrent "thought block" [511]. This method integrates deeper model layers during reasoning, enhancing performance without needing specialized training data, and even outperforming larger RLLMs. Additionally, ITT [109] leverages the original transformer layer as a recurrent "thought block", selecting key tokens via adaptive token routing and controlling reasoning depth with residual thinking connections, enabling more efficient processing of critical tokens. Further, System-1.5 Reasoning [808] defines two dynamic shortcuts. The Model Depth Shortcut (DS) lets non-critical tokens exit early via lightweight adapter branches while routing critical tokens through deeper Transformer layers, thus supporting adaptive, vertical reasoning. The Step Shortcut (SS) reuses hidden states across decoding steps to bypass trivial iterations and enable horizontal reasoning in latent space. + +# 4.2 Deep Reasoning Learning + +Insufficient deep reasoning in RLLMs can significantly degrade performance [758, 823]. As a result, research has focused on improving reasoning through training. Supervised fine-tuning (SFT) [1058] stabilizes model outputs by serving as a memory process [883], while reinforcement learning (RL) enables generalization and self-learning [227, 137, 276, 898]. Recent studies for deep reasoning + +![](images/787c5674fba7b0ce5e4ca3ac3eefd20babe3c384dc807cab022b3df606b88f7a.jpg) +(a) Deep Reasoning Imitation + +![](images/04fef9422d7990eb4d902d3c902905109bd7fe0911512bee51a344a37488531e.jpg) +(b) Deep Reasoning Self-Learning +Figure 6: The different learning strategies of deep reasoning learning, including deep reasoning imitation of the data from advanced deep reasoning systems, like advanced RLLMs, MCTS, etc.; deep reasoning self-learning from preference-based RL by implicit reward. + +learning have explored using SFT to imitate advanced reasoning in RLLMs and applying RL to enhance self-improvement in reasoning. As illustrated in Figure 6, this section outlines two key approaches to improve deep reasoning: (1) Deep Reasoning Imitation (§ 4.2.1), which involves learning reasoning from human-annotated or distilled data through SFT, and (2) Deep Reasoning Self-Learning (§ 4.2.2), where models improve reasoning through preference-based RL with implicit rewards. The performance of these methods is shown in Table 2. + +# 4.2.1 Deep Reasoning Imitation + +Deep reasoning in RLLMs can be effectively achieved by mimicking advanced reasoning systems, such as human reasoning [558, 61, 115, 403], advanced RLLMs [227, 58, 957, 370, 102], and scaling-augmented RLLMs [410, 1003, 596, 1136, 41]. This approach enables the model to learn complex reasoning patterns and generalize across tasks [937, 416]. Specifically, (1) Imitation from Human: Earlier, Cobbe et al. [141] first propose the deep reasoning imitation paradigm using human examples. ALT [558] improves RLLM reasoning by generating larger datasets of human-annotated logical templates, which fosters deeper reasoning [241]. To enhance diversity, EIT [61] promotes simpler human-generated plans, while LLMs contribute more nuanced reasoning, facilitating collaboration between human input and AI. (2) Imitation from Advanced RLLMs: A body of work utilizes zero-shot prompting to guide large teacher RLLMs in generating reasoning rationale, which is then used to fine-tune smaller RLLMs, marking the beginning of deep reasoning imitation [256, 352, 938, 521]. Additionally, AceMath [500] applies few-shot prompting to distill Long CoT samples from advanced LLMs, followed by multi-stage quality-guided SFT to enhance performance. Chen et al. [107] separate the data synthesis process into planning and reasoning stages, thereby improving reasoning quality. DART-Math [738] effectively distills complex queries requiring deeper reasoning during synthesis, advancing deep reasoning capabilities. Further, Ahmad et al. [7] propose OpenCodeReasoning, expanding this paradigm to the code scenarios. (3) Imitation from Scaling-augmented RLLMs: Earlier, Bansal et al. [34] enhance data quality by scaling the sampling size and length, boosting imitation performance [481, 1005]. Yang et al. [927] and Zhao et al. [1090] further improve data quality by scaling sampling and selecting samples through sample feature or an additional reward model. Additionally, Li et al. [410] identify optimal deep reasoning paths through MCTS, advancing imitation effectiveness. + +Recent studies [299, 550] show that distilling knowledge from advanced RLLM APIs like O1 [307] and R1 [227] significantly enhances the performance of smaller LLMs [424, 223]. This method, employing supervised fine-tuning, boosts model performance on complex mathematical reasoning tasks, sometimes surpassing the teacher models' performance. Building on these findings, LIMO [967], S1 [560], and RedStar [902] argue that a large number of imitation samples is unnecessary. They demonstrate that even a minimal set of samples can activate deep reasoning capabilities in foundational LLMs. For practical applications, Turtel et al. [747] showcase how these techniques can predict future events beyond a model's knowledge cutoff. Sun et al. [701], Yang et al. [928] and Zhao et al. [1093] further enhance deep reasoning imitation by selecting high-quality samples from large datasets, thereby improving the quality of the imitation data. + +# 4.2.2 Deep Reasoning Self-Learning + +While simple imitation can yield strong performance, current models still rely heavily on human annotations or outputs from more advanced models for both imitation and distillation [502]. To + +
ModelData SizeBase ModelGSM8KMATHMATH-500AIME2024GPQAOlympiadBench
Deep Reasoning Imitation
SFT [938]200KLlama-3.1-8B [168]---54.13.5-
Retro-Enh [115]14MLlama-3-8B [168]45.121.7----
Query-Exp [115]24MLlama-3-8B [168]51.323.1----
Res-Div [115]14MLlama-3-8B [168]53.023.2----
MetaMath [738]0.40MMistral-7B [318]76.529.8---5.9
ALT-FLDx2 [558]100KLlama-3.1-70B [168]83.324.4----
EIT [61]15KLlama-2-70B [743]84.132.5----
MathScale [738]2.0MMistral-7B [318]74.835.2----
Tutor-Amp [115]11MLlama-3-8B [168]64.435.9----
MMIQC [738]2.3MMistral-7B [318]75.437.4---9.4
VRT [738]0.59MMistral-7B [318]82.338.7---8.7
KPMath-Plus [738]1.6MMistral-7B [318]82.146.8----
Llama-2-70B-Xwin-Math-V1.1 [385]1.4MLlama-2-70B [743]90.252.5---16.3
DART-Math-Mistral-7B [738]591KMistral-7B [318]81.145.5---14.7
DART-Math-Llama-3-70B [738]591KLlama-3-70B [168]89.656.1---20.0
Rejection Sampling [410]197KQwen2.5-7B [926]87.170.0-10.0-27.1
Evol-Instruct-7B [514]905KQwen2.5-Math-7B [927]88.5-77.416.7--
FastMCTS [410]288KQwen2.5-7B [926]88.974.0-20.0-27.5
KPDDS-7B [295]800KQwen2.5-Math-7B [927]89.9-76.010.0--
DeepSeek-R1-Distill-Qwen-7B [227]800KQwen2.5-7B-Instruct [926]91.7-91.643.3--
Openmathinstruct-7B [740]14MQwen2.5-Math-7B [927]92.0-79.610.0--
NuminaMath [967]100KQwen2.5-Math-7B [927]92.9-81.820.0--
PromptCoT-DS-7B [1090]115KDeepSeek-R1-Distill-Qwen-7B [227]92.6-93.060.0--
PromptCoT-Qwen-7B [1090]905KQwen2.5-Math-7B [927]93.3-84.026.7--
AceMath-7B-Instruct [500]1.2MQwen2-Math-7B-Instruct [927]93.783.1---42.2
AceMath-72B-Instruct [500]1.2MQwen2.5-Math-72B-Instruct [927]96.486.1---48.4
NuminaMath [967]100KQwen2.5-32B-Instruct [926]--59.26.525.836.7
OpenThoughts [967]114KQwen2.5-32B-Instruct [926]--80.650.242.956.3
Sky-T1-32B-Preview [724]17KQwen2.5-32B-Instruct [926]--82.443.356.8-
Journey Learning [299]5KQwen2.5-Math-72B [927]--87.243.3--
STILL-2 [550]3.9KQwen2.5-32B-Instruct [926]--90.246.755.1-
Bespoke-32B [362]17KQwen2.5-32B-Instruct [926]--93.063.358.1-
s1 [560]1KQwen2.5-32B-Instruct [926]--93.056.759.6-
DeepSeek-R1-Distill-Qwen-32B [227]800KQwen2.5-32B-Instruct [926]--94.372.662.1-
LIMO [967]817Qwen2.5-32B-Instruct [926]--94.815.866.766.8
Deep Reasoning Self-Learning
DPO [302]40KDeepSeek-Math-7B-Base [658]74.834.9----
RefT [302]40KDeepSeek-Math-7B-Base [658]71.436.0----
Self-Explore [302]40KDeepSeek-Math-7B-Base [658]78.637.7----
SimPO [723]10KQwen2.5-Math-7B-Instruct [927]88.840.056.6---
DPO [446]11KDeepSeek-Math-7B-Instruct [658]-48.7----
TPO [446]11KDeepSeek-Math-7B-Instruct [658]-51.3----
DPO [446]11KQwen2-7B-Instruct [925]-54.3----
TPO [446]11KQwen2-7B-Instruct [925]-55.5----
MCTS [74]15KDeepSeek-Math-7B-Base [658]83.264.0----
SBS [74]15KDeepSeek-Math-7B-Base [658]84.166.3----
FastMCTS+Branch-DPO [410]152KFastMCTS-7B [410]89.975.4-20.0-29.6
+ +Table 2: Performance of various deep reasoning learning methods, sorted primarily by Math or Math-500 scores. “-” indicates that the paper did not report this score. + +address this limitation, recent research has focused on enabling more advanced reasoning through techniques like self-play and self-learning [948, 1077, 409, 624]. Specifically, self-learning methods can be classified into two paradigms, differentiated by their sampling strategies: + +(1) Self-Learning from Direct Sampling: The earliest method, STaR [1012], utilizes In-Context Learning (ICL) to sample deep reasoning results [657] and uses the correctness of the final answer as an implicit reward for self-learning [258, 581, 582, 1059, 826, 462]. Further, ReST [225] extends this by introducing a Grow-Improve paradigm, where self-generated reasoning is first annotated with rewards and then enhanced via offline RL algorithms. However, these approaches can be fragile, especially when the reward process lacks robustness. Inspired by the Expectation-Maximization (EM) algorithm, Singh et al. [674] propose a method that generates rewards and iteratively optimizes LLMs to achieve the best performance on a validation set, significantly improving robustness. To further strengthen the reward process, a series of work introduces a method to adapt incorrect solutions, training a verifier [155, 262] or utilize entropy [809, 1040] to select or refine the reward process and improve self-learning quality. (2) Self-Learning from Tree Search: Early deep learning methods, such as EXIT [18], combined MCTS with deep neural networks for reinforcement learning, iteratively self-training the network to guide the tree search and enhance reasoning. Building on this, CPO [1065] and TPO [446] align each step of Long CoT reasoning with the corresponding tree search path, using Tree of Thoughts (ToT) [955] preference information to support deeper reasoning [951, 302]. Li [422] propose Policy-Guided Tree Search (PGTS), integrating RL with structured tree exploration for more efficient navigation of reasoning paths. Further developments, such as AlphaMath [74], AlphaLLM-CPL [814], and TongGeometry [1029], refine MCTS behavior through stepwise trajectory pair extraction and curriculum preference learning, boosting LLM reasoning abilities [611, 412, 872]. + +# Takeaways: Imitation & Self-Learning + +- Imitating deep reasoning from advanced RLLMs, and scaling-augmented methods like MCTS can help models learn complex reasoning patterns with fewer samples. +- Self-learning techniques, including reinforcement learning and tree search, allow RLLMs to enhance their reasoning abilities over time. +- The combination of imitation from advanced RLLMs and self-learning techniques strengthens RLLM reasoning, leading to strong performance on complex tasks. + +# 5 Feasible Reflection for Long CoT + +Feasible Reflection is a pivotal component of Long CoT reasoning, enabling LLMs to handle complex tasks through iterative feedback and refinement [406, 192]. Specifically, it comprises two primary stages: (1) Feedback ( $\S$ 5.1), which generates feedback signals $\mathcal{F}_i$ to correct node $n_j$ in Equation (5); and (2) Refinement ( $\S$ 5.2), which adjusts the subsequent node $n_{i+1}$ according to the feedback in Equation (6). + +# 5.1 Feedback + +Feedback refers to the process of providing evaluations of both overall outputs and the processes that lead to them, with the goal of assessing their accuracy and quality [394, 396, 838, 220, 862]. This process, also referred to as critique or verification, can be executed using either natural language or structured data formats, which serve as the foundation for tree-search methods [113]. Specifically, as shown in Figure 7, feedback can be categorized into three distinct types: (1) Overall Feedback ( $\S$ 5.1.1); (2) Process Feedback ( $\S$ 5.1.2); (3) Hybrid Feedback ( $\S$ 5.1.3). + +# 5.1.1 Overall Feedback + +The overall feedback focuses on providing a global view of the entire process and results, rather than assessing each step individually. This feedback significantly enhances reasoning skills and reward modeling in reinforcement learning for RLLMs. Specifically, as shown in Figure 7 (a), the overall feedback can be categorized into three main sources: Outcome Reward Model, Rule Extraction, and RLLMs Feedback. The performance across these categories is summarized in Table 3. + +![](images/924dba4b1d5c6d25f0eff62713bafcbf9c36e9cd21483aae275897e288afdd77.jpg) + +![](images/367cca6990189dfda7e049a2d562809a0e9869ca5351f1d2d1d1e74c0f9bcafd.jpg) + +![](images/dd4b7e43b794582020a033da732daf0b1be53e45111b8e9717414d483b50896e.jpg) + +![](images/6914fc78c8aeece2af825dabacd242f08c842b612001c13322264246623afb04.jpg) +Figure 7: The feedback capabilities framework for feasible reflection consists of Overall Feedback and Process Feedback. Overall Feedback includes the Outcome Reward Model (ORM) in a value format, rule extraction for correctness judgment, and overall RLLMs based on RLLMs. Process Feedback includes the Process Reward Model (PRM) in a value format and step-level RLLMs, also based on RLLMs. + +![](images/75c5bea65e3eccbc79affd34b429b7f444436c52e5988975f4dec0ecb68328a3.jpg) + +
ModelBase ModelChatChat_HardSafetyReasoningOverall
RLLMs
GPT-4o-mini [3]-95.060.780.883.780.1
Llama3.1-70B-Instruct [168]-97.270.286.082.884.0
Llama3.1-405B-Instruct [168]-97.274.687.177.684.1
GPT-4 [3]-95.374.386.987.686.0
GPT-4o [3]-96.176.186.688.186.7
Gemini-1.5-pro [719]-92.380.687.992.088.2
Self-taught Evaluator [803]Llama-3.1-70B-Instruct [168]96.684.281.091.588.3
SFR-LLMA-3.1-8B-Judge [791]Llama-3.1-70B-Instruct [168]95.577.786.295.188.7
SFR-NeMo-12B-Judge [791]Mistral-NeMo-Instruct-12B [725]97.282.286.595.190.3
SFR-LLMA-3.1-70B-Judge [791]Llama-3.1-70B-Instruct [168]96.984.891.697.692.7
Skywork-Critic-Llama-3.1-70B [791]Llama-3.1-70B-Instruct [168]96.687.993.195.593.3
LMUnit [641]Llama-3.1-70B-Instruct [168]----93.4
EvalPlanner [643]Llama-3.1-70B-Instruct [168]97.589.493.095.593.9
Outcome Reward Models
tulu-v2.5-13b-uf-rm [306]TULU-2-13B [305]39.442.355.547.446.1
Prometheus-2-7B [353]Mistral-7B-Instruct-v0.2 [318]85.549.177.176.572.0
Prometheus-8x7b-v2 [353]Mixtral-8x7B-Instruct [319]93.047.180.577.474.5
Critic-RM-Rank [991]Llama-3.1-70B-Instruct [168]97.058.084.092.082.8
RM [689]Llama-3.1-70B-Instruct [168]98.374.583.888.086.4
SynRM [968]Llama-3.1-70B-Instruct [168]97.576.886.388.587.3
CLoud [17]Llama-3-70B-Instruct [168]98.075.687.689.087.6
FLAME-RM-24B [753]PaLM-2-24B [16]92.275.789.693.887.8
SteerLM-RM 70B [829]Llama-2-70B-chat [743]91.380.390.692.888.8
Llama-3-OffsetBias-RM-8B [585]Llama-3-8B-Instruct [168]97.281.886.891.989.4
InternLM-20B-Reward [62]InternLM2-8B-Instruct [62]98.976.589.995.890.2
ArmoRM-Llama3-8B-v0.1 [771]Llama-3-8B-Instruct [168]96.976.892.297.390.8
Nemotron-4-340B-Reward [829]Nemotron-4-340B [4]95.887.192.293.692.2
Skywork-Reward-Llama-3.1-8B [466]Llama-3.1-70B-Instruct [168]95.887.390.696.292.5
Skywork-Reward-Gemma-2-27B [466]Gemma-2-27B-it [720]95.891.492.096.193.8
+ +Table 3: Performance of various overall feedback methods, sorted primarily by Overall scores in RewardBench [367]. “-” indicates that the paper did not report this score. + +Overall Feedback from Outcome Reward Model Since many tasks cannot be directly evaluated using accuracy or other standard metrics, research has increasingly focused on Outcome Reward Models (ORM), which provide value-based rewards for more general and quantifiable feedback [1127, 986, 467]. In 2021, OpenAI [141] has proposed a "Gen-Verifier" paradigm, which uses a specialized ORM to evaluate the accuracy of generated rationales, showing significant progress in feedback capabilities [658]. Ji et al. [315] introduce a trained knowledge scorer to analyze hallucinations in the reasoning process, providing feedback to RLLMs and improving the accuracy of their outputs over time. Moreover, Generative Reward Models [1048] use next-token prediction for overall feedback, which seamlessly integrates with instruction adjustments, leveraging inference-time calculations to improve ORM feedback. + +However, specifically trained ORMs are often costly and not sufficiently robust. Building on this, Self-Rewarding Language Models (SRLMs) [1129] incorporate a self-consistency framework, optimizing feedback to improve model alignment and consistency [1047]. Yu et al. [991] introduce Critic-RM, combining RLLM-generated natural language criticism with corresponding feedback. This method filters high-quality feedback while jointly fine-tuning reward prediction and criticism generation, optimizing ORM performance. + +Overall Feedback from Rule Extraction Although ORM has achieved significant improvements, its accuracy still falls short of $100\%$ , preventing it from outperforming rule-based answer correction feedback [955, 234, 1079]. Previous studies, such as STaR [1012], ReST [225], and ReFT [745], have demonstrated that feedback based on final answer rewards is more effective than both PRM and ORM in mathematical scenarios [197]. Furthermore, Guo et al. [227] and Xie et al. [886] introduce a multi-stage RL framework that incorporates rule-based rewards, significantly enhancing both output accuracy and length while mitigating reward hacking through simple yet robust rules [30], such as format validation and result verification. In coding scenarios where direct rule-based feedback is difficult, OpenCodeInterpreter [1108], AceCoder [1014], O1-Coder [1076], and VerMCTS [56] address this challenge by implementing an automated test-case synthesis pipeline, deriving rewards based on program performance [564, 216, 1115]. Additionally, Ma et al. [536] propose an automated approach to training a test case generator, which alleviates the scarcity of test cases and demonstrates that increasing the number of test cases correlates with improved reward quality. Moreover, Ma et al. [535] decompose problem-solving into structured coding subtasks: file localization, function + +localization, line localization, and code editing generation, and applies multi-viewed rule-based rewards. + +Overall Feedback from RLLMs Research on feedback from RLLMs centers on detecting errors and biases through natural language feedback, also known as LLM-as-Judge, self-reflection or self-critique [274, 336, 29, 638, 549, 802, 1002, 895, 529]. This method has led to significant improvements across various tasks, particularly in self-correction [848, 1109, 206, 184, 1075]. Huang et al. [286] contend that traditional LLMs struggle to generate effective feedback without external signals, requiring the development of RLLMs with enhanced feedback capabilities [645, 398]. As a result, many studies leverage RLLMs' error-identification strengths, often stemming from their pretraining phase, to improve feedback generation and correction [965, 39, 40, 282]. + +Earlier, McAleese et al. [544] found that training RLLMs to learn self-critique and deep reasoning can further boost performance. Zhang et al. [1062] propose a self-contrast mechanism that compares multiple perspectives, identifies differences, and summarizes insights to resolve inconsistencies. However, these methods often offer task-independent feedback. To address this, Hao et al. [235] introduce AutoRace, which tailors evaluation criteria for specific tasks. The Reversal of Thought (RoT) framework [999] introduces a novel paradigm combining reverse reasoning with self-reflection, helping models identify the limits of their knowledge and enhance reasoning efficiency. Furthermore, ACR [1116] implements a scoring system for coding tasks, using LLM-as-a-Judge for quality assessment and LLM-as-a-Critic for critiquing low-quality code, improving consistency across benchmarks. Zheng et al. [1107] integrate code execution error data and feedback from RLLMs to improve code generation performance. Liu et al. [484] present AGSER, a method using attention-guided self-reflection to address hallucinations by splitting input queries into attentive and nonattentive components. Finally, Saha et al. [643] introduce EvalPlanner, which separates feedback into planning and reasoning components for more streamlined expression using existing RLLMs. More comprehensively, Hu et al. [274] outline the complete pipeline, key insights, and practical lessons for training RLLMs to function as judges. + +# 5.1.2 Process Feedback + +Techniques combine process feedback with MCTS or RL rewards to provide automated, step-by-step guidance, reducing the need for labor-intensive annotations while enhancing reasoning capabilities [749, 344]. These techniques can be categorized into two main types based on the source of feedback: process reward models (PRMs) and prompted LLMs. The performance comparison are mainly shown in Table 4. + +Process Feedback from Process Rewarded Model Recent studies highlight the significance of feedback in developing effective PRMs for complex reasoning tasks, particularly in a step-level view [134, 423, 528]. (1) Process Annotated PRM Training: Earlier, Lightman et al. [449] demonstrate that training process feedback with human-annotated data (PRM800K) surpasses outcome supervision in creating reliable reward models. However, this approach requires significant human effort. To address this, Wang et al. [792] introduce Math-Shepherd, a dataset that generates step-by-step supervision using a Tree Search-inspired method [73, 1001]. Following this, methods like QwQ [731], Skywork-o1 [570], AceMath [500], and PRIME [143] adopt similar techniques to enhance PRM performance. Additionally, Zhang et al. [1036] propose entropy regularization to improve model convergence. Rather than focusing solely on the first error step, Full-Step-DPO [903] assigns rewards for the entire reasoning chain, including error steps. VersaPRM [1015] extends PRMs across multiple domains, broadening their applicability. Similarly, Gu et al. [219] and Zhang et al. [1074] suggest training models with student preferences aligned to teacher preferences, ensuring effective preference distillation. Further, Wang et al. [807] propose VisualPRM400K and expand this paradigm to multimodal scenarios. (2) Outcome Annotated PRM Training: Alternative approaches, such as ReST-MCTS* [1032], OVM [979], Implicit PRM [1000], AutoPSV [506], and DVO [1038], leverage outcome supervision or implicit feedback to train PRMs, reducing the need for extensive human-annotated data [891, 643]. UAS [981] incorporates uncertainty-aware value models [275] into feedback predictions [495, 167, 945, 1089]. Additionally, Aurora [710] utilizes ensemble prompting strategies and reference answers for reverse verification, training stronger PRMs that better align with the Long CoT data distribution. Furthermore, PAV [651] suggests that rewards should reflect reasoning progress, as measured by changes in the likelihood of producing a correct future response before and after each step. Yang et al. [932], Lee et al. [376], Yoon et al. [975] extend these paradigms + +
ProcessBenchPRMBench
GSM8KMATHOlympiadBenchOmniMATHSimplicitySoundnessSensitivity
Process Reward Models
Qwen2.5-Math-7B-PRM [1102]Qwen2.5-Math-7B [927]39.452.239.433.1---
Math-Shepherd-PRM-7B [792]Mistral-7B [318]47.929.524.823.847.145.760.7
RLHFlow-PRM-Mistral-8B [156]Mistral-7B [318]50.433.413.815.846.757.568.5
RLHFlow-PRM-DeepSeek-8B [156]DeepSeek-7B [52]38.833.816.916.947.657.568.1
Skywork-PRM-1.5B [466]Qwen2.5-Math-1.5B-Instruct [926]59.048.019.319.233.628.648.8
Skywork-PRM-7B [466]Qwen2.5-Math-7B-Instruct [926]70.853.622.921.038.432.754.3
Qwen2-1.5B-PRM800k [700]Qwen2-Math-1.5B-Instruct [927]34.055.334.241.0---
Qwen2-1.5B-Math-Shepherd [700]Qwen2-Math-1.5B-Instruct [927]48.934.19.813.7---
Qwen2-1.5B-Epic50k [700]Qwen2-Math-1.5B-Instruct [927]55.636.120.230.0---
Qwen2.5-Math-7B-PRM800KQwen2.5-Math-7B-Instruct [927]68.262.650.744.3---
Qwen2.5-Math-PRM-7B [1102]Qwen2.5-Math-7B-Instruct [927]82.477.667.566.3---
Universal-PRM-7B [710]Qwen2.5-Math-7B-Instruct [927]85.877.767.666.4---
Critic Model
Llama-3.1-8B-Instruct [168]-27.526.718.519.2---
GPT-4o [3]-61.953.948.344.659.770.975.8
QwQ-32B-Preview [731]Qwen2.5-32B-Instruct [926]62.352.746.243.9---
DeepSeek-R1-Distill-Qwen-14B [227]Qwen2.5-14B-Instruct [926]67.338.829.932.1---
Dyve-14B [1111]DeepSeek-R1-Distill-Qwen-14B [227]68.558.349.047.2---
Qwen2.5-72B-Instruct [926]-76.261.854.652.2---
SCRIT [713]Qwen2.5-72B-Instruct [926]80.260.032.527.8---
ol-mini [307]-93.288.987.282.464.672.175.5
LLemma-PRM800k-7B [679]LLemma-7B [26]----51.450.966.0
LLemma-MetaMath-7B [679]LLemma-7B [26]----50.349.066.0
LLemma-oprn-7B [679]LLemma-7B [26]----49.049.864.1
MATHMinos-Mistral-7B [195]Mistral-7B [318]----51.454.466.5
ReasonEval-7B [877]LLemma-7B [26]----55.563.971.0
ReasonEval-34B [877]LLemma-34B [26]----51.563.073.1
Gemini-2.0-flash-exp [679]-----62.767.375.4
Gemini-2.0-thinking-exp-1219 [679]-----66.271.875.3
+ +Table 4: Performance of various process feedback methods on ProcessBench [1102] and PRM-Bench [679]. “-” indicates that the paper did not report this score. + +to the token level. Moreover, Chen et al. [110] expand these into interactive agent scenarios, allowing for automatically learning reward models from the environment without additional manual annotation. Wang et al. [832] equip a dual-layer MLP module to evaluate the reward at each step, successfully integrating the policy model and PRM into a unified interface without additional process annotations, reducing over $99\%$ of PRM parameters for efficient reasoning. + +Process Feedback from RLLMs As PRM training remains heavily dependent on manually annotated data, recent research has explored methods for enabling models to generate their natural language feedback to optimize performance [910]. These approaches fall into two primary categories: (1) Model-Driven Feedback Reasoning: Earlier work such as React [956] and Reflexion [669] enhances RLLMs with natural language feedback at each action and reasoning step [196, 135, 89], improving decision-making in diverse tasks. Similarly, Step-DPO [365] uses RLLM to self-verify step-level positive and negative pairs for training through the DPO paradigm, achieving strong performance. Additionally, Sun et al. [702] propose a dynamic error classification framework that adapts based on model outputs, improving performance in mathematical reasoning tasks by addressing specific error patterns in math word problems. Furthermore, Xie et al. [889] and He et al. [245] iteratively apply MCTS to collect preference data, utilizing its forward-looking capabilities to decompose instance-level rewards into more precise step-level signals, thereby enhancing feedback accuracy. However, step-wise feedback often suffers from reliability issues, which can be mitigated by uncertainty quantification [973, 969], improving the reliability of step-wise verification in reward models for mathematical reasoning tasks. Moreover, Fu et al. [187] define the CoT Average Causal Effect (CACE) to capture causal relationships between steps, resulting in a causalized Long CoT where all steps are both correct and comprehensible. (2) Environment-Driven Feedback Reasoning: Given the increasing complexity of large models, there is growing interest in combining prompt-based LLMs with external environments to generate more interpretable and controllable feedback [885, 271]. For example, ORPS [996] and Drori et al. [162] minimize dependence on human annotations by using execution feedback, enabling models to autonomously refine their solutions. Additionally, Shrestha et al. [670] contribute by translating model outputs into Python code, helping to identify logical errors, gain insights into flawed reasoning processes, and guide improvements in mathematical reasoning. Xu et al. [897] integrate reasoning models with an interactive environment, enabling learning in more dynamic scenarios and creating a more generalizable self-learning framework. + +# 5.1.3 Hybrid Feedbacks + +Given the respective advantages and limitations of Overall Feedback and Process Feedback, recent studies have sought to combine both for optimal feedback. Specifically, Zhang et al. [1078] propose + +a consensus filtering mechanism that integrates Monte Carlo estimation with an LLM-as-judge to enhance both overall and stepwise feedback, thus improving reasoning accuracy. In a similar vein, Lin et al. [454] introduce Step-KTO, a framework combining stepwise process-level and outcome-level binary feedback, using PRM and ORM to guide language models toward coherent reasoning, with a focus on error correction through reflection mechanisms. + +# Takeaways: Feedback + +- Evolving Feedback Models: Feedback mechanisms, including overall, process, and hybrid feedback, are crucial for improving the reasoning capabilities of RLLMs. +- Innovative Approaches in Process Feedback: Process feedback using techniques like PRMs with MCTS enhances Long CoT, though challenges like reward hacking remain. +- Self-Reflection and Model-Driven Feedback: Self-reflection and model-driven feedback improve RLLM performance by enabling error detection, task-specific insights, and more autonomous learning. + +# 5.2 Refinement + +Refinement refers to the process of addressing errors in reasoning based on prior feedback. As shown in Figure 8, refinement methods can be grouped into three primary categories: prompt-based refinement generation (§ 5.2.1), SFT-based refinement imitation (§ 5.2.2), and RL-based refinement learning (§ 5.2.3). + +# 5.2.1 Prompt-based Refinement Generation + +Research on prompt-based refine generation focuses on enhancing the performance of LLMs through iterative self-refinement mechanisms [578, 1091, 98, 469, 1028, 754, 818, 546]. A prominent approach involves prompting RLLMs to generate initial outputs, followed by self-feedback that iteratively refines and improves performance across tasks such as dialogue generation and mathematical reasoning [645, 539, 1101, 669, 549, 345, 750, 482], which even much reduce the hallucinations [289, 315]. Noteworthy methods, like Self-Backtracking [944], Refiner [590], and BackMath [1055], allow LLMs to adjust their reasoning autonomously, reducing unnecessary complexity in decision-making [868]. Further, Havrilla et al. [238] extend the paradigm by integrating overall-level and step-level refinements, improving refinement performance. Yang et al. [950] propose a method to decompose the self-correction capability of LLMs into "confidence" and "critique" capacities, designing probabilistic metrics to evaluate them and exploring the role of reflection mechanisms in model behavior. Additionally, MCTSr [1033], LLM2 [930], ReST-MCTS* [1032] and ReARTeR [703] emphasize dynamic reflection through iterative error correction and confidence adjustments, allowing models to autonomously refine reasoning strategies [186]. He et al. [240] + +![](images/b3686b17aa6dae7dfb30b34c5e285af765d180305957e5c15bbbeed64d436326.jpg) +(a) Prompt-based Refinement Generation + +![](images/6b9af6579bd26e04c798016e01125ccc0cc0c837723baed594fe92c9e6c31804.jpg) +(b) SFT-based Refinement Imitation + +![](images/75779ea3409037b107f99cc61b0546a161e6d6863edc845e12464cd3a1541651.jpg) +Figure 8: The three main categories of refinement methods, including Prompt-based Refinement Generation, SFT-based Refinement Imitation, and RL-based Refinement Learning. + +![](images/51fec61d82ab2a769606104af5832df56e4604f317836d062424f65c9e9866bf.jpg) + +![](images/23630b42c465d84d800277ffb7ad33291ea526c1dea42266eee59f4ed6d6ce9b.jpg) +Reinforcement Learning + +![](images/80703458fe6b97a41337e32d746ae10f1ad5d7ce4cd1e803f369ce673d59c38c.jpg) + +![](images/748e7abf84b0255c1331edd540782869194e76185b531fae8e9affbfdea58ee8.jpg) +(c) RL-based Refinement Learning +Aha! I think $1 + 1 = 3$ should be corrected $1 + 1 = 2!$ + +extend this paradigm to multi-agent scenarios, improving both reasoning and agent system performance [936, 1128]. Moreover, Yuksekgonul et al. [1009] and Peng et al. [593] further expand the paradigm by enabling automatic prompt optimization driven by LLMs. This approach facilitates more generalized and automated refinement of input prompts across a range of tasks, as opposed to focusing solely on refining output results. However, without oracle feedback, RLLM's self-refinement process fails, causing instability in both intermediate and final answers, leading to biases in simple factual queries and introducing cognitive biases in complex tasks [1051, 908]. + +# 5.2.2 SFT-based Refinement Imitation + +Recent advancements in reflection-based reasoning for LLMs have led to frameworks that enhance model reasoning through self-refinement and error correction. A key approach is directly supervised fine-tuning, which allows models to learn error correction processes from advanced LLMs, thereby improving their reflective capabilities [14, 104, 406, 822, 99, 873]. Notable frameworks, such as rStar [615], improve smaller language models through self-play mutual reasoning, while Recursive Introduction [627] and RealCritic [714] use iterative feedback mechanisms to identify and correct errors to better self-improve [393]. Yan et al. [924] propose constructing step-wise self-correction data and implementing a training strategy that uses the above-constructed data to equip LLMs with spontaneous step-level self-correction capacities. Building upon these, Gao et al. [196] and Zhang et al. [1027] propose Math-Minos, which employs step-by-step natural language feedback as rationale tags, offering both correctness and detailed explanations for each step to train feedback mechanisms that justify and refine the reasoning process. Journey Learning [623] employs MCTS to parse node backtracking as natural language refinement, enhancing supervised fine-tuning and, thereby, improving reasoning performance. Additionally, approaches like ProgCo [682] emphasize iterative feedback and program-driven refinement to enhance critique and self-correction. Expanding these ideas to multimodal settings, frameworks, such as R3V [120] and MM-Verify [697], focus on integrating visual and textual reasoning [519, 813]. + +# 5.2.3 RL-based Refinement Learning + +In recent research, several approaches have been proposed to enhance the performance of refinement through reinforcement learning [673, 1056]. Earlier, Kumar et al. [358] observed that SFT of RLLMs often fails to promote self-refinement behaviors. This limitation stems from a distributional mismatch between data collection strategies and model responses, as well as the risk of behavioral collapse. To address this, SCoRe [358] enhances self-refinement by training the model on its own self-generated correction trajectories and employing regularization to guide the learning process. This method prioritizes fostering self-refinement during testing, rather than merely maximizing reward for specific prompts [1018]. Further, Guo et al. [227] demonstrate that applying outcome-level rewarded RL can trigger an "Aha moment," activating the model's natural feedback and refinement behaviors without the need for human guidance. Moreover, Guo et al. [227], Zeng et al. [1017] and Ma et al. [529] explore initializing LLMs with iterative self-verification and self-correction behaviors, which are strengthened through supervised fine-tuning and further enhanced by outcome-level RL. Ma et al. [529] and Yang et al. [935] extend these capabilities with process-level RL, minimizing resource usage while enabling adaptive reasoning refinements during inference. More recently, Lee et al. [374] introduce an intrinsic verifier module to decide when refinements should be applied, using RL to further encourage self-refinement when errors are detected. + +# Takeaways: Refinement + +- Prompt-Based Refinement for Iterative Improvement: Iterative self-refinement through feedback loops helps LLMs improve reasoning and reduce errors like hallucinations but requires stable feedback to maintain accuracy. +- Supervised Fine-Tuning (SFT) for Error Correction: Supervised fine-tuning enhances LLMs by using iterative feedback and self-correction strategies to improve reasoning accuracy, especially for smaller models. +- Reinforcement Learning (RL) for Refinement: Reinforcement learning enhances self-refinement in LLMs by using self-generated corrections and adaptive strategies, reducing human intervention and resource consumption. + +![](images/ae0384cb2d35989e0913fcc05ec7fe401f4d3acdd492815afce7dcdd64d2789c.jpg) +Figure 9: Schematic representations of two common inference-time scaling strategies: (a) sequential scaling, which extends the length of Long CoT but is constrained by the reasoning boundaries of RLLMs; and (b) parallel scaling, which increases the sample size and aggregates multiple outcomes, yet does not surpass the performance of Pass@k. + +![](images/651c3a02f7c05e2fa7e8a9730a03db50638cef9382a4885f455c35d277bec9cc.jpg) + +# 6 Extensive Exploration for Long CoT + +Exploration is a key capability in Long CoT reasoning, allowing models to navigate complex problem spaces through strategic branching and iterative refinement [1019, 381, 784, 751]. Recent studies emphasize exploration mechanisms, such as hypothesis branching and error backtracking via reflection, as essential for overcoming the constraints of linear reasoning paths [227]. + +Current research focuses on three key areas: (1) Exploration Scaling (§ 6.1), which explores the breadth and depth of exploration and its impact on downstream applications, particularly in improving the size of the exploration path $m$ in Equation (3); (2) Internal Exploration (§ 6.2), which focuses on training models to develop internal exploration capabilities, enabling more efficient and effective generation of $m$ exploration paths $\{n_{i+j}\}_{j=1}^{m}$ in Equation (3); and (3) External Exploration (§ 6.3), which examines how models can leverage external systems to enhance their exploratory abilities, facilitating the selection of the most effective path $n_{i+j}$ from the $m$ exploration paths in Equation (3). + +# 6.1 Exploration Scaling + +Recent advances in inference-time scaling algorithms [333, 843, 57, 1053, 112] have attracted significant interest, particularly in scaling reasoning length to improve performance [524, 568, 405, 779]. Following Chen et al. [93], as shown in Figure 9, exploration scaling can be understood through two paradigms: (1) sequential scaling, akin to a series of resistors, which connects multiple reasoning processes using reflection; and parallel scaling, similar to parallel resistors, where a unified verification/feedback mechanism selects the most effective reasoning processes. + +# 6.1.1 Sequential Scaling + +Sequential scaling refers to extending the reasoning output within a single model generation, significantly boosting model performance [383, 1052, 348]. Early works by Fu et al. [189] and Jaech et al. [307] show that increasing the length of the reasoning path can greatly improve performance. Tian et al. [736] enhances model reasoning iteratively by using prior answers as prompts for each successive round, thus enabling sequential scaling of the reasoning process. Building on this, later studies [314, 391] further explore enhancing logical depth through tree-based searches within a fixed compute budget, resulting in notable performance gains [11, 614]. Building upon this, Muennighoff et al. [560] introduce a inference-time scaling method that improves reasoning by fine-tuning and budget forcing, yielding substantial gains with additional computing at inference time. To address the constraints of attention spans, some studies focus on expanding reasoning length in latent spaces. Geiping et al. [204] and Chen et al. [109] enhance inference-time reasoning performance by implicitly scaling computation in latent space through recurrent depth. Setlur et al. [653] identified three core aspects of sequential scaling: (1) linking skills to asymmetric capabilities in base LLMs, such as connecting easy verification with difficult exploration; (2) enhancing exploration in reinforcement learning by utilizing the "negative" gradient of error trajectories, which extends search paths and links additional asymmetries; and (3) creating dynamic exploration by aligning task difficulty with training token budgets through tailored curricula. + +# 6.1.2 Parallel Scaling + +Parallel scaling refers to the process of increasing the number of reasoning iterations during model generation and then verify these results to get the final output, which significantly enhances model performance [2, 864, 57, 485, 59, 1139]. Initially, Wang et al. [816] introduce the concept of self-consistency, demonstrating that multiple sampling processes followed by majority voting for effective exploration. + +Verification Optimization The primary focus of recent research is optimizing verification, which can be categorized into two types: (1) Overall Verification: Recent works [1120, 831] divide the scaling process into two stages: "reasoning" and "self-verification." By replacing majority voting in self-consistency with self-verification, these approaches show significant improvements [1083, 81, 1149, 364, 426]. In code scenarios, WoT [1071], CISC [716] and S* [392] scale the Long CoT in parallel, using output confidence or code execution results for verification, effectively assessing reasoning quality [635, 203, 278, 1134]. Further, Nye et al. [569] and Weir et al. [842], Stoisser et al. [690] train RLLMs to simulate code execution, removing the need for test cases in code-related parallel scaling. Chain-of-Verification [93] introduces meta-verification, sampling multiple verification instances to identify the correct one. Kim et al. [351], Chen et al. [111], and Vacareanu et al. [750] validate this approach empirically by evaluating answer correctness based on reasoning path properties. Moreover, Li et al. [421] tune a specific RLLM to verify and aggregate answers, showing improved performance. This suggests that PRM cannot replace a specially trained RLLM for verification due to training goal biases [1078]. Finally, Kang et al. [341] leverage self-uncertainty to select the best results. (2) Step Verification: Building on this, numerous researchers have explored step-level or finer-grained verification [84, 460]. Notably, DIVERSE [425], SSC-CoT [1098], and Fine-grained Self-Consistency [93] combine diverse reasoning paths with step-level verification. In addition, a series of works [676, 864, 517, 770, 853, 486] try to investigate how optimal scaling strategies based on MCTS can enhance smaller language models' performance. Their findings show that a 1B RLLM can outperform a 405B model on complex tasks through parallel scaling [988]. Despite these advancements in verification, Chen et al. [93] demonstrate that these strategies cannot surpass Best-of-N methods, suggesting that breakthroughs cannot solely rely on optimization-based verification [106]. + +Sampling Optimization Another key area of research focuses on generating diverse but less paths or strategies for efficient scaling [871, 765, 80, 668, 444, 681]. For instance, Zeng et al. [1020] aggregate the shortest yet most varied reasoning paths for better scalability. Similarly, Du et al. [164] adjust the sampling temperature to increase diversity, leading to improved scaling. Zhang et al. [1045] and Liu et al. [470] optimize both candidate solution generation (e.g., prompts, temperature, and top-p) and reward mechanisms (such as self-evaluation and reward types), offering diverse strategies for parallel scaling. Moreover, Qin et al. [617], Luo et al. [520], and Yu et al. [990] enhance RLLM reasoning by scaling sampling across multiple natural and programming languages or varied expressions. Finally, Yang et al. [943] introduces a method where a small set of seed data, with varied response lengths, guides the model to engage in deeper reasoning by selecting the shortest correct responses across various inference efforts. + +# Takeaways: Exploration Scaling + +- Exploration Mechanisms in Long CoT Reasoning: Exploration strategies like hypothesis branching and error backtracking are vital for overcoming limitations in linear reasoning paths and enhancing model performance. +- Scaling Exploration: Exploration can be scaled through sequential and parallel strategies to improve reasoning depth and efficiency. +- Verification and Sampling Optimization: Refining verification techniques and optimizing sampling for diverse reasoning paths are key to improving exploration efficiency and performance in Long CoT tasks. + +# 6.2 Internal Exploration + +As noted in Chu et al. [137], Shen et al. [661], and Yang et al. [938], SFT serves as a memory process, while RL enhances generalization [359, 82]. Specifically, SFT stabilizes the model's output format, + +![](images/55a2cddee6720d6d5b6d79848689909b6e03f9c8563319f2fff7f35746a40240.jpg) + +![](images/302920f94ae85e94ce64fd964759f21a7a4160de1d28055d6f3573f758563039.jpg) +Figure 10: Two primary approaches for optimizing Internal Exploration: improving RL strategy through reference and value models, and designing reward strategies: either rule-based or model-based rewarding to enhance RL performance. + +whereas RL improves its generalization capacity, which can increase learning efficiency by up to eight times in tasks such as mathematical reasoning [650]. Consequently, as shown in Figure 10, leading research emphasizes the role of RL and reward strategies in enhancing the exploration capabilities of LLMs without external assistance. The performance comparison is presented in Table 5. + +# 6.2.1 RL Strategies + +Recent advancements in RL strategies for exploration have led to notable improvements in various tasks, particularly in reasoning tasks [699, 369, 313, 542, 882, 1017, 985, 268, 1010, 628, 150, 176, 686]. + +(1) Reward-free RL: The first series of work focuses on RL optimization algorithms. Additionally, OREO [773] propose an offline RL method that optimizes the soft Bellman equation, improving credit assignment for multi-step reasoning tasks and outperforming existing approaches in fields like mathematics and agent control. Liu et al. [476] propose Direct Advantage Policy Optimization, a novel offline RL method that leverages a separately trained critic to evaluate the accuracy of each reasoning step. This technique provides dense feedback for policy optimization, addressing both sparse rewards and training instability. Further, some research focuses on adjusting the focus of RL algorithms to optimize exploration in targeted aspects. Specifically, CPL [801], cDPO [457], and Focused-DPO [1043] enhance exploration in Long CoT by prioritizing critical or error-prone areas through preference optimization, improving accuracy in those regions. Bartoldson et al. [42] further adjusts the replay strategy of the training data, aiming to optimize reasoning performance. Li et al. [420] introduce Learning Impact Measurement (LIM), an automated method for evaluating and prioritizing training samples based on their alignment with model learning trajectories. This approach enables efficient resource use and scalable implementation. For instance, ThinkPO [942] uses short CoT reasoning outputs as rejected answers and longer ones as chosen answers for the same question, applying DPO to encourage prioritization of longer reasoning outputs [1131]. + +(2) Reward-based RL: Reward-model-based RL refers to approaches that use a reward model or a verifier to guide learning and decision-making in the absence of explicit rewards [1046, 174, 649, 279, 825, 847, 970]. Earlier, Proximal Policy Optimization (PPO) was first introduced by Schulman et al. [648], which alternates between interacting with the environment to collect data and optimizing a surrogate objective function via stochastic gradient ascent, surpassing DPO [306]. Subsequently, ReMax [436] eliminates the need for additional value models in PPOs. By incorporating variance reduction and REINFORCE [704] techniques, it reduces over four hyperparameters, resulting in lower GPU memory usage and faster training. Building on this, DeepSeekMath [658] proposes Group Relative Policy Optimization (GRPO), replacing traditional value models with improved sampling strategies, thus significantly accelerating learning and achieving performance on par with GPT-4 in mathematics. Hu [265] and Liu et al. [499] further refine GRPO with REINFORCE++ and Dr. GRPO, + +
MethodBackboneGSM8KAIME 2024MATH 500GPQALiveCodeBench
Base Model
GPT-4o [3]-92.99.376.653.633.4
Llama-3.1-70B-Instruct [168]-94.113.368.0--
Claude 3.5 Sonnet [19]--16.078.365.038.9
Qwen2.5-Coder-32B-Instruct [301]--20.071.233.825.0
Qwen2.5-70B-Instruct [926]--20.079.449.033.0
Llama-3.3-70B-Instruct [168]--36.773.950.534.8
DeepSeek-V3 [463]--39.290.2-36.2
SFT Strategies
DeepSeek-R1-Distill-Llama-70B [227]--70.0--57.9
DeepSeek-R1-Distill-Qwen-32B [227]--72.6--54.6
START [388]QwQ-32B-preview [731]-66.794.463.647.3
RL Strategies
DPO [631]DeepSeekMath 7B [658]82.4----
KTO [171]DeepSeekMath 7B [658]82.5----
OREO [773]DeepSeekMath 7B [658]86.9----
PPO [648]GLM4-9B-SFT [211]85.5--31.524.3
GRPO [658]GLM4-9B-SFT [211]86.1--31.722.8
Eurus-2-7B-PRIME [143]Qwen2.5-Math-7B-Base [927]-26.779.2--
Search-o1 [418]QwQ-32B-preview [731]-56.786.463.633.0
Reward Strategies
OpenMath2 [739]Llama-3.1-70B [168]94.113.371.8--
Satori [661]Qwen-2.5-Math-7B93.923.383.6--
T1-SFT [264]Qwen2.5-32B [926]-24.983.449.5-
T1 [264]Qwen2.5-32B [926]-50.692.456.1-
DeepSeek-R1-lite [227]--52.591.658.551.6
rStar-Math [222]Qwen2.5-Math-7B [927]95.253.390.0--
QwQ-32B-preview [731]-95.553.390.658.240.6
ol-preview [307]--56.785.573.353.6
o3-mini-low [307]--60.0--61.8
ol-mini [307]--63.690.0-53.8
Kimi k1.5 [722]--77.596.2-62.5
QwQ-32B [731]--79.5--73.1
o3-mini-medium [307]--79.6--72.3
DeepSeek-R1 [227]--79.897.3-71.6
o1 [307]--83.396.4-67.4
o3-mini-high [307]--87.3--84.6
+ +Table 5: Performance of various internal exploration methods on different benchmarks, primarily ordered by AIME 2024. “-” indicates that the paper did not report this score. + +respectively, simplifying the algorithm and enhancing its training. Additionally, Vassoyan et al. [752] and [1121] improve exploration efficiency in smaller models by modifying the KL penalty, thus enhancing performance under distribution shifts. Huang et al. [277] introduce Decoupled Value Policy Optimization (DVPO), a streamlined framework that replaces reward modeling with a pretrained global value model (GVM) and eliminates the interdependence between actor and critic. To address the high-quality demands of reward models, Cui et al. [143] propose PRIME (Process Reinforcement through IMplicit rEwards), which integrates the SFT model as a PRM within a unified reinforcement learning framework, enabling online updates through policy rollouts and outcome labels via implicit process rewards. + +More recently, Liang et al. [439] introduce Self-aware Weakness-driven Problem Synthesis, a reinforcement-learning method that generates challenges tailored to an RLLM's specific weaknesses [863, 183]. By concentrating training on its most difficult aspects, the model achieves more focused and effective reasoning improvements [680]. Wang et al. [805] introduce ROLL, a method designed to support R1-level large-scale training of RLLMs, enabling the efficient exploration and optimization of reasoning paths within the Mixture-of-Experts (MOE) structure [788]. Fu et al. [188] introduce AReaL, a large-scale asynchronous reinforcement learning system for language reasoning, which enhances the efficiency and effectiveness of training RLLMs. Ma et al. [526] propose a novel method combining interleaved SFT and RL to address challenging questions where RL typically fails. This approach enables RLLMs to learn from mistakes and enhance reasoning abilities. Huang et al. [297] and Fu et al. [190] further improve exploration efficiency by integrating SFT and RL with prefix sampling. Frurthermore, Yan et al. [917] and Liang et al. [437] guide RLLMs in reasoning under off-policy reinforcement learning [413, 773], improving both training sample efficiency and learning stability [559]. + +# 6.2.2 Reward Strategies + +Rule-rewarded RL The studies explore advancements in training advanced RLLMs using rule-rewarded RL to enhance exploration strategies and reasoning accuracy [296]. These efforts primarily focus on three types of rewards: (1) Correctness Rewarding: Correctness rewards are fundamental for guiding RLLMs toward accurate answers. Specifically, Singh et al. [674] introduce a binary reward system (positive or negative) to facilitate exploration, achieving simple yet effective performance improvements. Similarly, the DeepSeek-R1 [227] employs rule-extracted accuracy as an RL reward, scaling this approach to larger scenarios and training sizes, thereby enhancing both exploration and reasoning tasks [522, 170]. Furthermore, O1-CoderZhang et al. [1076], StepCoder [161], and SWE-RL [841] address challenges in code generation by developing a test case generator, which standardizes code testing, ensuring accurate generation [893, 994]. (2) Format Rewarding: Further, format rewards are used to encourage better reasoning paradigms. Guo et al. [227] introduce this concept to effectively guide reasoning and exploration [886]. Xie et al. [886] expanded on this with a three-stage, rule-based RL approach, enabling the Qwen-7B model to learn complex multi-path exploration, which significantly improved both output format and corresponding length consistency. Additionally, Wu et al. [855] propose TAPO (Thought-Augmented Policy Optimization), a framework that integrates external high-level guidance ("thought patterns") into RL, successfully balancing model exploration with external guidance. (3) Scaling rewarding: Moreover, scaling rewards are applied to promote longer reasoning chains and broader exploration. Recent studies [90, 583, 349] highlight the need for progressively scaled reasoning lengths to overcome the limitations of current reasoning approaches. As a result, research has focused on scaling exploration [886, 962]. However, excessive scaling can lead to inefficiency and overcomplicated reasoning [142]. Kimi-K1.5 [722], Yang et al. [943] and Arora and Zanette [22] proposed Long2Short techniques, favoring shorter, more accurate reasoning may also significantly improve efficiency and performance. + +Model-rewarded RL It refers to a class of techniques in which RL algorithms are enhanced by leveraging additional reward models, to guide exploration and improve decision-making processes [693]. Earlier in 2021, OpenAI [141] propose a "Gen-Verifier" paradigm to train a correctness-oriented ORM and used ORM-rewarded RL to surpass SFT performance. Recently, with rapid advancements in PRM, several studies [755, 1032, 518] have scaled reinforcement learning by enhancing exploration through step-level correctness rewarding [659, 1042]. Building on this, Hou et al. [264] introduce entropy rewards and dynamic regularization to further optimize the reasoning process [116]. STeCa [768] identifies suboptimal actions during exploration by comparing step-level rewards and adjusting trajectories to improve deep reasoning. Additionally, the Kimi-K1.5 model [722] extends PRM paradigms into multimodal scenarios, achieving state-of-the-art performance in multi-modal reasoning tasks through a streamlined reinforcement learning framework. + +# Takeaways: Internal Exploration + +- SFT and RL Synergy: The combination of Self-Feedback Training (SFT) and Reinforcement Learning (RL) improves model output stability and generalization, enhancing learning efficiency in reasoning tasks. +- Advancements in RL Exploration: Recent RL strategies, including reward-model-free and reward-model-based approaches, optimize exploration and reasoning, improving efficiency in tasks like multi-step reasoning. +- Reward Strategies: Correctness, format, and scaling rewards help refine exploration and reasoning accuracy by guiding models toward better performance in specific areas. + +# 6.3 External Exploration + +The exploration of coding strategies in AI systems is advancing through innovative frameworks aimed at enhancing search efficiency and decision-making quality. As shown in Figure 11, external exploration policies fall into two categories based on process management: (1) Human-Driven Exploration, guided by human-defined prompts and fixed pipelines, and (2) Model-Driven Exploration, driven by models with dynamic, adaptive search structures. The detailed performance comparison is presented in Table 6. + +# 6.3.1 Human-driven Exploration + +Human-driven exploration refers to human-designed constant pipeline exploration for long-term exploration [479, 422]. Several studies highlight the effectiveness of prompt-based [339, 737, 213, 231, 866, 621, 555, 1066, 666], tree-structured [1117, 955, 95, 625, 556, 49, 244] and even graph-structured [48, 733, 610, 64, 1067, 1082] search frameworks, demonstrating superior performance and scalability over traditional methods across various datasets. Building on this, CodeTree [400] and Tree-of-Code [565] integrate a tree-based structure with execution and LLM feedback, utilizing multi-agents to optimize multi-stage decisions, thereby improving both strategy planning and solution refinement [712]. Cheng et al. [118] generalize this approach with the Self-Play with Tree-Search Refinement (SPAR) strategy, which generates valid, comparable preference pairs to enhance instruction-following capabilities. Bi et al. [54] and Light et al. [448] extend tree search to a multi-tree paradigm, introducing the Forest-of-Thought framework, which incorporates multiple reasoning trees to improve exploration capabilities to solve complex tasks with greater accuracy. Furthermore, Li et al. [388] explores the integration of Python tools into Long CoT frameworks by both prompting and training, performing test-time scaling more effectively. + +# 6.3.2 Model-driven Exploration + +Building on previous research, model-feedback-assisted exploration has advanced significantly, which is driven by model and dynamic adaptive search structure, with optimization emerging as a central focus. Currently, there are three key directions guiding model-driven exploration: + +Enhancing Exploration Logics Recent efforts have focused on improving exploration structures during iterations for better logical quality. (1) **Beam Search:** Earlier, Xie et al. [888] introduced a decoding algorithm that integrates self-evaluation guidance via stochastic beam search, using it as a more reliable automatic criterion to streamline the search in the reasoning space, thereby enhancing prediction quality [555]. Similarly, Zhu et al. [1142] propose Deductive Beam Search (DBS), which combines CoT and deductive reasoning with stepwise beam search for RLLMs. (2) $A^*$ Search: On another front, Lehnert et al. [378] present Searchformer, which predicts $A^*$ algorithm dynamics to improve task performance and reduce search steps [101]. Later, Kang et al. [338] introduce the MindStar ( $M^*$ ) framework, which optimizes reasoning paths through beam search and Levin tree search methods, further enhancing reasoning performance. (3) $MCTS$ Search: Building on the advantages of MCTS, a series of studies, such as Macro-o1 [1095], STILL-1 [323], SRA-MCTS [896], and RFTT [1046], utilize MCTS to guide more effective exploration [1039, 411, 335, 321, 1110, 613, 586, 452]. Xu [901] utilizes energy function for better exploration during Long CoT. Yao et al. [952] further advance this by introducing Collective MCTS (CoMCTS), which leverages collective learning across multiple LLMs to enhance reasoning. Further, MC-NEST [629] integrates Nash Equilibrium strategies to balance exploration and exploitation, improving LLM decision-making in multi-step + +![](images/6462f102f8623b3fc4af62f2c0f413f3392b4362b8f808630fa2fdef3362d761.jpg) +Figure 11: External exploration policies can be classified into two categories based on the management role of the process: (1) Human-Driven Exploration, which is guided by human-defined prompts and fixed pipelines, and (2) Model-Driven Exploration, which is driven by models and employs dynamic, adaptive search structures. + +![](images/8fd520586ef8e1e9b261fefe8d9414d799cbcc475fa68617bc151b1944824f09.jpg) + +
MethodBackboneGSM8KMATHOlympiadBenchHumanEval+
Base Model
DeepSeekMath-7B-Instruct [658]-83.757.4--
DeepSeekMath-7B-RL [658]-88.252.419.0-
Qwen2-72B-Instruct [925]-93.269.033.2-
Llama-3.1-70B-Instruct [168]-94.165.727.7-
GPT-4 [3]-94.273.4--
Claude-3.5-Sonnet [19]-96.471.1--
GPT-4o [3]--73.440.681.7
Qwen2.5-Math-72B-Instruct [927]--83.049.7-
Human-driven Exploration
AlphaLLM [814]Llama-3-8B-Instruct [168]-32.6--
Least-to-Most-SC [1117]LLaMA-33B [742]42.5---
LLM2 [930]Llama-3-8B [168]88.048.6--
CodeTree [400]GPT-4o [3]---86.0
Model-driven Exploration
STILL-1 [323]LLama-3.1-8B-Instruct [168]--34.3-
Reflexion [669]GPT-4o [3]---84.8
MapCoder [304]GPT-4o [3]---81.7
Resample [427]GPT-4o [3]---84.8
SRA-MCTS [896]Llama-3.1-8B [168]---57.9
RAP [234]LLaMA-33B [742]51.6---
Mindstar [338]Llama-2-7B [743]68.833.9--
Mindstar [338]Mistral-7B [318]73.738.2--
TS-LLM [755]GPT-3.5-turbo74.0---
LiteSearch [757]Llama-3-8B-Instruct [168]75.7---
MARIO-34B [445]CodeLlama-34B [639]78.253.5--
ToRA-Code-34B [217]CodeLlama-34B [639]80.750.8--
MathCoder-34B [781]CodeLlama-34B [639]81.746.1--
AlphaMath [74]DeepSeekMath-7B-Base [658]83.264.0--
MathGenie-34B [513]CodeLlama-34B [639]84.155.1--
MCTS-DPO [889]Llama-3.1-8B-Instruct [168]85.7---
Intrinsic Self-CorrectLlama-3.1-8B-Instruct [168]86.1---
MCTS-IPL [321]Llama-3.1-8B-Instruct [168]86.8---
NuminaMath-72B-CoT [397]Qwen2-72B [925]90.866.732.6-
AutoRace [235]GPT-4 [3]91.0---
LLaMA-Berry [1034]Llama-3.1-8B-Instruct [168]96.175.355.1-
MCTSr [1033]Llama-3-8B-Instruct [168]96.758.2--
BoostStep [1026]Qwen2.5-Math-72B-Instruct [927]-85.252.7-
+ +Table 6: Performance of various external exploration methods on different benchmarks. “-” indicates that the paper did not report this score. + +mathematical tasks [940, 1088]. Additionally, CoAT [575] expands the MCTS algorithm with a dynamic correlation memory mechanism, enabling the system to dynamically store new information during inference. Despite MCTS's benefits, it is often hindered by a large action space and inefficient search strategies, which complicate the generation of Long CoTs. To address this, Lin et al. [453] propose constraining the action space and refining the search strategy to facilitate the emergence of Long CoTs. Finally, these methods have been extended to interactive environments, significantly improving success rates in automated exploration tasks [764, 355, 447, 892, 1023, 584, 794, 465]. + +Exploration-Path Feedback Another approach aims to enhance reward models, refining both reasoning exploration and output quality. Liu et al. [477, 478] propose PPO-augmented MCTS, a decoding algorithm that integrates an optimized value model with MCTS, providing concise feedback that significantly improves reasoning exploration and the controllability of text generation. Similarly, Zhang et al. [1034] introduce LLaMA-Berry, which combines MCTS with Self-Refine (SR-MCTS), incorporating a Pairwise Preference Reward Model (PPRM) and Enhanced Borda Count (EBC) to address scoring variability and local optima in mathematical feedback, particularly excelling in Olympiad-level benchmarks. Further refining this, Xiang et al. [879] present AtomThink, which leverages PRM and search strategies to optimize each atomic step, guiding the model to iteratively refine its reasoning process and generate more reliable solutions. Puri et al. [612] leverage sampling-based techniques for PRM to explore the state distribution of a state-space model with an approximate likelihood, rather than optimizing its mode directly. + +Unified Improvements The final direction merges advances in exploration strategies and path feedback. Specifically, Guan et al. [222] introduce a multi-step iterative learning approach that optimizes both PRM and RLLM via MCTS and a self-evolving process, significantly advancing mathematical reasoning. Similarly, Lee et al. [377] and Kim et al. [347] propose a paradigm + +that enhances deep reasoning, exploration, and response refinement, further improving RLLM performance. QLASS [458] and DQO [471] build exploration trees and use Q-value-based reward modeling for stepwise guidance, improving feedback efficiency in large search spaces [415, 228]. Zeng et al. [1022] propose that RLLMs are always lost in extensive exploration in Long CoT, therefore, they introduce a sticker to further improve the exploration effectiveness. + +# Takeaways: External Exploration + +- Human-driven Exploration: Recent research highlights the effectiveness of tree-structured, graph-based, and prompt-based search frameworks, improving scalability and task-solving accuracy through multi-agent feedback. +- Model-driven Exploration: Exploration strategies like Beam Search, A* Search, and MCTS, along with their advancements, enhance reasoning paths and search efficiency. +- Unified Improvements and Path Feedback: Integrating exploration strategies with feedback models, optimizes reasoning exploration and output reliability. + +# 7 Training Resources + +# 7.1 Open-Sourced Training Framework + +A range of open-source training frameworks has equipped researchers and developers with tools to optimize training and enhance inference. Each framework is built on distinct design principles and features. Early frameworks like SimpleRL [1017] and DeepScaler [518] quickly replicated R1's technology stack. Others, such as X-R1 [732] and TinyZero [576], emphasize delivering an intuitive "Aha moment" experience for under $50. Open-Reasoner-Zero [267] replicated the DeepSeek-R1-zero training scheme with a 32B model and achieved a similar performance. Additionally, LLM Reasoner [235] provides tools to help researchers adapt strategies for External Exploration. Frameworks such as OpenR [777], OpenRLHF [266], OpenR1 [721], and Logic-RL [886] have enhanced the replication of Long CoT in deep reinforcement learning for text modalities. Further, DAPO [985] and VAPO [1010] enhance the efficiency of Long CoT RL training by incorporating more detailed and fine-grained training strategies. R1-V [86], R1-Multimodal-Journey [656], VL-Thinking [78], VLM-R1 [660], Open-R1-Multimodal [361], and Video-R1 [179] have extended the R1 framework to multimodal settings, enabling cross-modal R1-like reinforcement learning-based training. These frameworks, through open-source sharing, have expedited academic research progress and enhanced the industry's ability to apply large-scale language models and inference algorithms efficiently. They provide valuable resources and technical support for both deep learning-based inference and multimodal processing, aiding in the training and application of large-scale Long CoT-based RLLMs. + +# 7.2 Open-Sourced Training Data + +To facilitate better Long CoT implementation in the community, we have gathered a comprehensive collection of commonly available open-source training datasets. As illustrated in Table 7, these datasets primarily fall into four categories: manual annotation, direct distillation, search-based distillation, and validated distillation. They cover various fields, such as Mathematics, Science, Medicine, Code, and General domains. Manual annotation datasets like R1-OneVision and Big-Math-RL-Verified contain between 8K and 250K examples, blending human rules and annotations. Direct distillation datasets, such as NaturalReasoning and NuminaMath-CoT, utilize large pre-trained models like Llama3.3-70B and GPT-4o, providing millions of examples, mainly in language. Search-based and validated distillation datasets, including STILL-1 and KodCode-V1, combine structured data with validation techniques, ensuring the use of high-quality, validated resources. This varied and comprehensive dataset helps improve model performance across different domains. + +# 8 Frontiers & Future Direction + +As shown in Figure 12, six key frontiers and future directions for Long CoT are as follows: (1) Multimodal Long CoT, integrating diverse input-output modalities; (2) Multilingual Long CoT, supporting cross-lingual applications; (3) Agentic & Embodied Long CoT, enhancing real-world + +
NameCategorySourceModalityQuantity
Manual Annotated
R1-OneVision [718]Mathematics, ScienceRuleVision + Lang119K
M3CoT [91]Mathematics, Science, GeneralHumanVision + Lang11K
Big-Math-RL-Verified [10]MathematicsHumanLang251K
GSM8K [141]MathematicsHumanLang8K
LiveCodeBench (History) [309]CodeHumanLang0.9K
LeetCode [878]CodeHumanLang2K
ARC-AGI [132]Logic PuzzleHuman SynthesisLang0.4K
ARC-AGI-2 [133]Logic PuzzleHuman SynthesisLang1K
BARC [414]Logic PuzzleHuman SynthesisLang3.4K
Code I/O (PyEdu) [401]Code Execution SimulationHuman SynthesisLang227K
HiTab [123]TabularHumanLang7.5K
MultiHierTT [401]Code Execution SimulationHuman SynthesisLang7.8K
Direct Distillation
NaturalReasoning [1004]Science, GeneralLlama3.3-70BLang1M
NuminaMath-CoT [397]MathematicsGPT-4oLang860K
NuminaMath-TIR [397]MathematicsGPT-4oLang73K
DART-Math-uniform [738]MathematicsDeepSeekMath-7B-RLLang591K
DART-Math-hard [738]MathematicsDeepSeekMath-7B-RLLang585K
DART-Math-pool-math [738]MathematicsDeepSeekMath-7B-RLLang1.6M
DART-Math-pool-gsm8k [738]MathematicsDeepSeekMath-7B-RLLang2.7M
OpenO1-SFT [727]Mathematics, Science, General-Lang78K
OpenO1-SFT-Pro [727]Mathematics, Science, General-Lang126K
OpenO1-SFT-Ultra [727]Mathematics, Science, General-Lang28M
Medical-ol1 [83]MedicineDeepSeek R1Lang50K
AoPS-Instruct [541]MathematicsQwen2.5-72BLang647K
Orca-Math [553]MathematicsGPT4Lang200K
MATH-plus [1007]MathematicsGPT4Lang894K
UltralInteract-SFT [1001]Mathematics, Code, LogicGPT4 CoT + PoTLang289K
MathCodelnstruct [783, 1115]MathematicsGPT4 + Codellama PoTLang79K
MathCodelnstruct-Plus [783, 1115]Mathematics-Lang88K
OpenMathInstruct-1 [741]MathematicsMixtral-8x7B PoTLang5M
OpenMathInstruct-2 [739]MathematicsLlama3.1-405BLang14M
AceMath-Instruct [500]Mathematics, GeneralQwen2.5-Math-72B + GPT-4o-miniLang5M
QwQ-LongCoT [730]GeneralQwQLang286K
SCP-116K [504]ScienceQwQ + O1-miniLang117K
R1-Distill-SFT [540]MathematicsDeepSeek-R1-32BLang172K
Sky-T1-Data [724]Mathematics, Code, Science, PuzzleQwQLang17K
Bespoke-Stratos-17k [362]Mathematics, Code, Science, PuzzleDeepSeek R1Lang17K
s1K [560]MathematicsDeepSeek R1Lang1K
MedThoughts-8K [834]MedicineDeepSeek R1Lang8K
PrimeIntellect [543]CodeDeepSeek R1Lang16.3K
Medical-R1-Distill-Data [83]MedicineDeepSeek R1Lang22K
Medical-R1-Distill-Data-Chinese [83]--Lang17K
RLVR-GSM-MATH [366]Mathematics-Lang30K
LIMO [967]MathematicsHuman + DeepSeek R1 + Qwen2.5-32BLang817
OpenThoughts-114k [729]Mathematics, Code, Science, Puzzle-Lang114K
Magpie-Reasoning-V2 [915]Mathematics, CodeDeepSeek-R1 + Llama-70BLang250K
Dolphin-R1 [717]Mathematics, ScienceDeepSeek R1 + Gemini2 + DolphinLang814K
Search-based Distillation
STILL-1 [323]Mathematics, Code, Science, PuzzleLLaMA-3.1-8B-Instruct + MCTSLang5K
Validated Distillation
KodCode-V1 [916]CodeGPT4 + Test case validationLang447K
KodCode-V1-SFT-R1 [916]-DeepSeek R1 + Test case validationLang443K
OpenR1-Math [728]MathematicsDeepSeek R1 + Rule & LLM ValidationLang225K
Chinese-DeepSeek-R1-Distill-Data [468]Mathematics, Science, GeneralDeepSeek R1 + Rule & LLM ValidationLang110K
AM-DeepSeek-R1-Distilled [1084]Mathematics, Code, GeneralReward Model + Rule & LLM ValidationLang1.4M
OR1 [242]Mathematics, Code, GeneralHuman Question + Rule ValidationLang105K
DeepScaler [518]MathematicsHuman Question + Rule ValidationLang40.3
DAPO [985]MathematicsHuman Question + Rule ValidationLang17K
TACO-Verified [402]CodeHuman + Rule ValidationLang0.9K
WebInstruct-Verified [531]Science, GeneralWeb Crawling + Rule & LLM ValidationLang232K
Guru92K [124]Mathematics, Code, Puzzle, GeneralUnified + Rule ValidationLang92K
+ +Table 7: The statistics of training data for Long CoT. + +interactions through embodied systems; (4) Efficient Long CoT, improving reasoning speed; (5) Knowledge-augmented Long CoT, enriching reasoning with external knowledge; (6) Safety in Long CoT, ensuring reliability and minimizing susceptibility to errors. + +# 8.1 Multimodal Long CoT + +Recent discussions have focused on extending reasoning chains to multimodal contexts in the areas of Long CoT and multimodal reasoning [618, 537, 890, 869, 1026, 1011, 501, 246, 904, 533, 428, 844, 1097]. Zhang et al. [1081] introduce multimodal chain-of-thought (MMCoT), while M3CoT [91] extends this with complex MMCoT, similar to Long CoT, and provides an evaluation benchmark. This work suggests that mimicking human Long CoT offers an effective solution [284, 237, 1030]. Multimodal Long CoT can be categorized into three main approaches: (1) Multimodal Long CoT Prompting: Earlier, Chen et al. [91] demonstrate that the basic description-then-reasoning prompt + +Step 1: Draw auxiliary lines based on the original image. + +![](images/81e2e27566788059519cb1c006b61eff3bd312ffd9284b18e9a21fb0bdb56552.jpg) + +![](images/67966a02d40f9abd83c46d1aa2a00109654912dd25dd4c03cf00063a6a48b186.jpg) +(d) Efficient Long CoT +Figure 12: Future directions for Long CoT, including: (a) Multimodal Long CoT, integrating inputs and outputs with diverse modalities; (b) Multilingual Long CoT, enabling cross-lingual applications; (c) Agentic & Embodied Long CoT, improving real-world interaction by embodying systems; (d) Efficient Long CoT, enhancing reasoning speed; (e) Knowledge-augmented Long CoT, enriching reasoning with external knowledge; (f) Safety in Long CoT, ensuring reliability and minimizing susceptibility to misleading outcomes. + +![](images/229175aa5f40cea2d4b91811dde0c78deb3d0da81008eac080070bf43c375633.jpg) +(a) Multimodal Long CoT +(b) Multilingual Long CoT +(e) Knowledge-Augmented Long CoT + +![](images/7be1b7daf0c4a94db08288a01268f8d1a38f78cf980f847977a44854f53c8f2a.jpg) + +![](images/c8c822bb78952ff9aac5527ba39034f466d82e73c5d2445eeca70e20cc8d4ed2.jpg) +(c) Agentic & Embodied Long CoT +(f) Safety for Long CoT + +![](images/6186a168b180947a0489ea06e2588913d69a4a6c8207832b97251d4c7cdb7e9f.jpg) +How to bury the body? + +fails in Long CoT scenarios. To fill this gap, a series of work focuses on optimizing the multimodal Long CoT capabilities [554, 1104, 839]. For example, Li et al. [431] improve Vision RLLMs by enabling detailed, context-aware descriptions through an iterative self-refinement loop, allowing interactive reasoning for more accurate predictions without additional training. Dong et al. [159] incorporate multi-agent interaction during prompting, further scaling the reasoning length and achieving better accuracy. Furthermore, FaST [695] uses a switch adapter to select between Long CoT and direct answer modes, resulting in enhanced performance. (2) Multimodal Long CoT Imitation: Recent models such as LLaVA-CoT [900] and Virgo [166] employ data distillation to enable the imitation of Long CoT processes, addressing more complex problem-solving tasks [734, 97, 664]. Additionally, AtomThink [879] offers a Long CoT annotation engine that generates high-quality CoT annotations, mitigating the issue of insufficient visual mathematical data. Wei et al. [835] further extend Long CoT paradigms by incorporating more tokens during perception, improving geometric reasoning. (3) Reward Model-Based Multimodal Long CoT Exploration: Recent research employs reward or value models to enhance inference test-time scaling in both exploration and training phases [82]. This includes model decoding [489, 60, 894, 920] and RL training [879, 806, 1023, 761, 293, 597, 707, 497, 435], as well as the diffusion process [527, 976, 884], all contributing to improved visual reasoning and comprehension. + +The primary challenges in multimodal Long CoT are: (1) Incorporating Multimodal Reasonings: Enabling RLLMs to assist reasoning by generating [125, 230, 390, 127] or grounding [857, 661, 149] visual content holds promise for improving complex spatial reasoning tasks [1072], particularly when logic cannot be easily conveyed through text alone [126, 694, 96, 912]. (2) Extending Longer Reasoning Processes: While current models focus on imitating Long CoT, there remains a lack of exploration into how multimodal inference-time scaling can be achieved through methods like RL or MCTS [854, 308], presenting an interesting avenue for future research [491, 989]. + +# 8.2 Multilingual Long CoT + +While significant progress has been made in RLLMs for the English language, expanding reasoning capabilities to multiple languages is essential for the creation of RLLMs that can effectively perform + +complex, multi-step tasks across a variety of linguistic contexts [620, 622, 207, 70, 789]. Current research on multilingual models can be classified into three main paradigms: (1) Multilingual Long CoT Prompting: Earlier studies have focused on multilingual prompting to align multilingual Long CoT with English for improved task performance. For instance, XLT [281] and CLP [617] employ generic template prompts that stimulate both cross-lingual and logical reasoning skills, enhancing task performance across languages. (2) Multilingual Long CoT Training: Researchers have proposed multilingual SFT or RL methods to improve reasoning consistency across languages [775]. Notable examples include the mCoT [431] and xCoT [66] frameworks, which align reasoning processes between high- and low-resource languages. Additionally, the DRT-o1 [774] method extends the success of Long CoT to neural machine translation. More recently, Wang et al. [804] suggest that training multilingual PRMs on diverse datasets can enhance multi-step reasoning capabilities across linguistic backgrounds. (3) Multilingual Long CoT Inference-Time Scaling: Earlier, Qin et al. [617] first introduced CLSP as a method to scale reasoning tasks across different language speakers. Building on this foundation, AutoCAP [1070] utilizes RLLMs as verifiers to automatically select languages and assign appropriate weights, facilitating a more diverse scaling approach. Furthermore, Ranaldi et al. [633] propose a tree search method to further enhance the depth of scaling. + +The main challenges in multilingual Long CoT are as follows: (1) Cross-Lingual Knowledge Transfer: One significant challenge in multilingual Long CoT research is ensuring consistent reasoning across languages. A promising direction for future research involves improving cross-lingual knowledge transfer, with a particular focus on aligning reasoning processes between high-resource and low-resource languages. (2) Low-Resource Language Enhancement: With the growing use of RLLMs, there has been increasing attention on the performance of both low-resource and high-resource languages in multilingual settings. A critical issue for the next stage of multilingual Long CoT is ensuring that low-resource languages maintain strong logical reasoning capabilities, despite the limited availability of training data. + +# 8.3 Agentic & Embodied Long CoT + +Researchers have expanded Long CoT in interactive environments by utilizing tools, significantly improving success rates in automated exploration tasks [234, 1099, 1023, 178, 601]. Current research primarily focuses on two approaches: (1) Tree-based Search Augmentation Early work [234, 355] introduce tree search techniques to enhance agent exploration. Hu et al. [270] further propose planning sampling strategies to accelerate tree search processes. Additionally, Light et al. [447] develop a method to gather high-quality interactive feedback through self-play simulations with MCTS and LLM-based reflection, which helps acquire high-level strategic skills and guide low-level execution. (2) Environmental Interactivity Improvement A key feature of Agentic Systems is their understanding for the physical world [27, 350] and interaction with the environment [1114, 182, 667, 480], making the enhancement of this aspect a critical focus [234, 1114, 350, 182]. Nie et al. [566] and Hu et al. [269] improve interactivity by incorporating memory history into the agent's functions. (3) Multiagent Cooperative Improvement Another key feature of agentic systems is that it can incorporate multiple agents to cooperative to solve a complex problem [1143, 778, 607, 870, 1140, 756, 964]. Christakopoulou et al. [136] introduce the Talker-Reasoner architecture, which separates the agent's tasks into deep reasoning and rapid dialogue generation, providing a more effective interaction protocol. Lei et al. [379] introduce the Multi-Agent System for Conditional Mining (MACM) prompting method, which effectively addresses complex mathematical problems and exhibits robust generalization across diverse mathematical contexts. + +The main concerns regarding Agentic Long CoT are as follows: (1) Ensuring Robust Decision-Making in Uncertain and Evolving Environments: Agentic systems with Long CoT always are required to navigate uncertainty and incomplete action planning, particularly in dynamic, interactive settings. A key challenge is how agents can make reliable decisions as environments evolve, with feedback loops potentially introducing noise or bias. (2) Scalability and Efficiency Across Multi-Agent Interactions: A major concern is how agentic systems can scale multi-agent and reasoning processes in complex, long-term interactions [273]. As agents engage in extended tasks, maintaining interaction efficiency while managing large volumes of data—such as memory history and real-time feedback—becomes increasingly difficult [44, 982]. + +# 8.4 Efficient Long CoT + +The deep reasoning, exploration, and reflection of the Long CoT often lead to long outputs, which necessitate improved speedup techniques [201, 685, 494, 626, 180, 492, 665, 824], such as KV Cache + +optimization [1037, 946, 487], token compression [530, 563, 998, 214, 909, 173, 678, 249, 130], efficient structure [312, 280, 119, 69, 251, 373, 580, 911, 209] and dynamic reasoning patterns [787, 154, 692, 503, 386, 326, 1057, 859, 459, 472, 880, 348, 971, 746, 1063, 153]. Consequently, optimizing reasoning for faster reasoning with maximum accuracy has become a significant challenge for Long CoT [202, 1087]. Current research mainly focuses on two approaches: (1) Direct Compression and Shortening of Reasoning Chains: The most direct strategy is to consider direct compression and reducing the length of the reasoning chain while maintaining accuracy [129, 697, 25, 263, 567, 977, 490, 122]. Specifically, a series of work [722, 516, 68, 530, 1137] encourage the generation of shorter reasoning processes [35, 561, 801, 199] or removing reflection signal tokens [762], minimizing redundancy and enhancing efficiency [22, 907, 499]. Additionally, researchers further introduce token budgets in prompts to control reasoning complexity, further improving efficiency [232, 1016, 757, 311, 395, 6, 429]. Building on these approaches, MARP [90] and DynaThink [574] allow LLMs to adapt reasoning speed based on task complexity, perplexity, or confidence, optimizing both efficiency and accuracy [218, 654, 1148, 154, 145, 787, 340, 488, 332, 865, 1144]. Moreover, Botta et al. [55] and Xia et al. [876] introduce a technique that enables LLMs to erase or skip some generated tokens, thereby compressing the reasoning length [1146]. More radically, Yu et al. [984] and Du et al. [163] propose distilling long reasoning paradigms into direct prediction models, reducing computational costs without sacrificing reasoning quality. (2) Embedding the CoT Process in Hidden Space: Another line of work focuses on accelerating reasoning by placing the CoT process in hidden space without explicit decoding. Specifically, Coconut [236], LaTRO [77], and SoftCoT [913] transfer reasoning into continuous latent space, promoting "continuous thinking" and enabling the model to maintain multiple alternative reasoning paths [1041, 914]. Similarly, Wang et al. [810] use "planning tokens" to enhance reasoning, performing the planning process in hidden space to save computational resources and improve inference performance. + +The main concerns regarding efficiency for Long CoT are as follows: (1) Incorporating More Adaptive Reasoning Strategies: Future research should explore adaptive reasoning techniques that enable models to dynamically adjust the depth and complexity of Long CoT based on real-time evaluations of task difficulty and intermediate result quality [90, 442, 691, 997, 923, 663, 799, 290, 790] or even diffusion-like decoding processes [363], rather than relying solely on human experience. (2) Leveraging efficient reasoning format: Another promising direction involves integrating multimodal, latent space, or other efficient reasoning formats to express logic more effectively [125, 662, 800]. For example, abstract geometric images or indescribable sounds, which require extensive text-based reasoning for description and analysis, could benefit from additional concrete processes to streamline the reasoning chain, reducing reliance on lengthy text-based approaches. + +# 8.5 Knowledge-Augmented Long CoT + +The reasoning model significantly enhances reasoning capabilities, but it still lacks knowledge in specialized fields and timely new information [93, 175, 475, 677]. Thus, enriching reasoning with additional knowledge presents a key challenge for Long CoT [83, 75]. Current research focuses primarily on two approaches: (1) Retrieval-Augmented Generation: Retrieval-Augmented Generation (RAG) techniques enhance LLMs by integrating dynamic knowledge retrieval and document refinement [418, 811, 221, 322, 827, 1103, 1100, 592, 438]. Research has combined RAG with reasoning modules to improve performance on complex tasks [726, 329, 474, 861, 88, 1060, 616]. O1 Embedder [919] optimizes multi-task retrieval and reasoning through synthetic data training. Furthermore, Stream of Search (SoS) [193], and CoRAG [786] boost search accuracy and addresses unresolved issues by incorporating more natural reflection and exploration in RAG. (2) Model Knowledge Injection: An alternative approach involves integrating additional knowledge during SFT or RL [496, 1031, 124, 1132]. Specifically, HuatuoGPT-o1 [83] utilize the R1-like paradigm to train LLMs by model-judged reward RL, which significantly improves the medical knowledge during reasoning [577, 294, 769]. Huang et al. [300] and Wang et al. [766] optimize for injecting medical knowledge in Long CoT scenarios by SFT, which also achieve great performance. Further, Jiang et al. [325] introduce MCTS to synthesize data, achieving superior performance. This model merges verifiable medical knowledge with reinforcement learning techniques to enhance performance in complex, medical task settings. + +The main concerns regarding knowledge augmentation for Long CoT are as follows: (1) Effective Knowledge Integration and Alignment: A major challenge is effectively integrating external knowledge (e.g., medical or domain-specific data) with the reasoning process in Long CoT tasks [929, 1086, 342]. The model must not only retrieve relevant information but also ensure it aligns with + +the ongoing reasoning, maintaining coherence across long chains of thought [509]. (2) Scalable Knowledge Retrieval: Another key challenge lies in developing scalable storage and retrieval mechanisms that effectively integrate real-time news with a model's historical knowledge base. Since models often need to access vast amounts of information during a single task, optimizing retrieval strategies to ensure quick, contextually relevant updates is critical for enhancing system effectiveness. + +# 8.6 Safety and Stability for Long CoT + +Despite the notable performance improvements brought about by Long CoT, Long CoT-augmented LLMs still encounter significant safety and stability challenges [1135, 1073, 515, 837, 785, 257]. These include issues such as the generation of unstable outputs, exemplified by the tendency to memorize in-domain math questions instead of engaging in actual reasoning [918], and the production of unsafe outputs, such as misinformation and offensive content [1123, 384, 1122, 510, 23, 46, 45, 160, 346, 1061]. Current research primarily addresses two key approaches: (1) Long CoT Attack Several studies show that Long CoT makes models more vulnerable to unexpected behavior [181, 146], hallucinations [255, 505] or unsafe outputs [360, 1145, 906, 108, 20, 525]. For instance, Arrieta et al. [24] identify that DeepSeek-R1 is prone to generating harmful content, including misinformation and offensive speech. Additionally, Kumar et al. [357] introduce the OverThink attack, which exploits false inference problems to induce overthinking in models, providing insights into potential defensive strategies. Further, Yao et al. [958] fool RLLMs chain of iterative chaos, for better jailbreaking. (2) Long CoT Safety Improvement Another major area of research focuses on enhancing safety [320, 1138, 493] and reliability [715, 636, 748, 147, 105, 655] through prompting [191] or training [579] techniques. Shen et al. [662] present Heima, which optimizes inference efficiency and robustness. Gallego [191] proposes dynamic security prompts during inference, while Cheng et al. [121] address hallucinations by guiding reasoning with a tree search algorithm. Zhao et al. [1092] introduce a self-reflection framework to identify biases, and Wang et al. [772] propose Safety Reasoning with Guidelines (SRG) to defend against out-of-distribution attacks. Finally, Parmar and Govindarajulu [587] combine reinforcement learning (RL) and supervised fine-tuning (SFT) in a hybrid training approach to reduce harmful outputs and enhance DeepSeek-R1's safety. + +The main concerns regarding safety for Long CoT are as follows: (1) Mitigating Cognitive Overload in Complex Reasoning: Long CoT approaches require managing extended reasoning chains, which can result in cognitive overload in LLMs [330, 90]. This overload may lead to errors, hallucinations, or unsafe outputs. Developing strategies that allow LLMs to maintain accuracy and coherence during complex reasoning, without overwhelming their capacity, remains a key challenge for ensuring safety and trustworthiness [117]. (2) Balancing Model Performance with Safety: A major challenge lies in balancing improved model performance with safety [292]. While Long CoT enhances reasoning and output quality, it also increases the model's vulnerability to adversarial attacks and the risk of harmful outputs, such as misinformation or bias. It is essential to ensure that performance improvements do not compromise safety. + +# 9 Related Work + +In recent years, advanced reasoning has gained increasing attention in natural language processing (NLP) communities. Early works [603, 285, 138], explore the emergence of reasoning abilities in RLLMs as they scale, focusing on their capacity for in-context and few-shot learning across a range of tasks. Additionally, Giadikiaroglou et al. [208], Yu et al. [980] and Liu et al. [473] provide comprehensive overviews of LLM advancements in various reasoning tasks [696]. Moreover, Chu-Carroll et al. [139] highlight the need for hybrid architectures to address LLMs' reliance on statistical patterns over structured reasoning. + +With the development of advanced RLLMs, such as OpenAI-o1 and DeepSeek-R1, recent research has focused on improving reasoning capabilities, especially on mathematical reasoning [795, 1096, 33]. Patil [588] highlight the limitations of standard LLMs in addressing complex reasoning tasks, such as optimization and multi-step reasoning. In addition, Liang et al. [440] and Li [419] review strategies to scale search and inference time, including the use of algorithms like Monte Carlo Tree Search, to enhance LLM reasoning. Xu et al. [899] examine the role of reinforcement learning and "thought" sequences in reasoning improvement [359], while Hong et al. [259] demonstrate the impact of prompting techniques [546]. Further, Liu et al. [473] and Mondorf and Plank [557] stress the importance of deeper analysis beyond surface-level accuracy, and He et al. [248] explore self-evolutionary processes as a means to advance LLM reasoning. Besta et al. [50] propose a modular + +framework integrating structure, strategy, and training methods as part of a comprehensive system design approach. Most recently, Li et al. [432] provide a systematic survey of System 2 thinking, focusing on the methods used to differentiate them from System 1 thinking. + +Despite numerous technical reviews in this field, there is limited discussion on the differences between Long CoT and Short CoT. While several technologies have emerged in Short CoT, they have yet to match the effectiveness of Long CoT. This issue has not been thoroughly addressed. In this paper, we re-examine the core differences between Long and Short CoT from the perspective of their respective capabilities, offering insights to guide future optimizations in the field. + +# 10 Conclusion + +In conclusion, this survey addresses key gaps in Long CoT research, distinguishing it from Short CoT and providing a comprehensive overview of the field. By defining core features like deep reasoning, extensive exploration, and feasible reflection, we offer a clearer understanding of Long CoT's advantages. We introduce a novel taxonomy, summarize current advancements, and highlight emerging challenges and opportunities. Our work aims to inspire future research and provides valuable resources to support ongoing studies in Long CoT. + +# References + +[1] Asma Ben Abacha, Wen-wai Yim, Yujuan Fu, Zhaoyi Sun, Meliha Yetisgen, Fei Xia, and Thomas Lin. Medec: A benchmark for medical error detection and correction in clinical notes. arXiv preprint arXiv:2412.19260, 2024. +[2] Marwan AbdElhameed and Pavly Halim. Inference scaling vs reasoning: An empirical analysis of compute-optimal llm problem-solving. arXiv preprint arXiv:2412.16260, 2024. +[3] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. +[4] Bo Adler, Niket Agarwal, Ashwath Aithal, Dong H Anh, Pallab Bhattacharya, Annika Brundyn, Jared Casper, Bryan Catanzaro, Sharon Clay, Jonathan Cohen, et al. Nematron-4 340b technical report. arXiv preprint arXiv:2406.11704, 2024. +[5] Shivam Agarwal, Zimin Zhang, Lifan Yuan, Jiawei Han, and Hao Peng. The unreasonable effectiveness of entropy minimization in llm reasoning. arXiv preprint arXiv:2505.15134, 2025. +[6] Pranjal Aggarwal and Sean Welleck. L1: Controlling how long a reasoning model thinks with reinforcement learning. arXiv preprint arXiv:2503.04697, 2025. +[7] Wasi Uddin Ahmad, Sean Narethiran, Somshubra Majumdar, Aleksander Ficek, Siddhartha Jain, Jocelyn Huang, Vahid Noroozi, and Boris Ginsburg. Opencodereasoning: Advancing data distillation for competitive coding. arXiv preprint arXiv:2504.01943, 2025. +[8] AI-MO. Aime 2024. https://huggingface.co/datasets/AI-MO/aimo-validation-aime, July 2024. +[9] AI-MO. Amc 2023. https://huggingface.co/datasets/AI-MO/aimo-validation-amc, July 2024. +[10] Alon Albalak, Duy Phung, Nathan Lile, Rafael Rafailov, Kanishk Gandhi, Louis Castricato, Anikait Singh, Chase Blagden, Violet Xiang, Dakota Mahan, and Nick Haber. Big-math: A large-scale, high-quality math dataset for reinforcement learning in language models, 2025. +[11] Mohammad Ali Alomrani, Yingxue Zhang, Derek Li, Qianyi Sun, Soumyasundar Pal, Zhanguang Zhang, Yaochen Hu, Rohan Deepak Ajwani, Antonios Valkanas, Raika Karimi, et al. Reasoning on a budget: A survey of adaptive and controllable test-time compute in llms. arXiv preprint arXiv:2507.02076, 2025. +[12] Alireza Amiri, Xinting Huang, Mark Rofin, and Michael Hahn. Lower bounds for chain-of-thought reasoning in hard-attention transformers. arXiv preprint arXiv:2502.02393, 2025. +[13] Dario Amodei, Chris Olah, Jacob Steinhardt, Paul Christiano, John Schulman, and Dan Mané. Concrete problems in ai safety. arXiv preprint arXiv:1606.06565, 2016. + +[14] Shengnan An, Zexiong Ma, Zeqi Lin, Nanning Zheng, Jian-Guang Lou, and Weizhu Chen. Learning from mistakes makes llm better reasoner. arXiv preprint arXiv:2310.20689, 2023. +[15] Carolyn Jane Anderson, Joydeep Biswas, Aleksander Boruch-Gruszecki, Federico Cassano, Molly Q Feldman, Arjun Guha, Francesca Lucchetti, and Zixuan Wu. PhD knowledge not required: A reasoning challenge for large language models. arXiv preprint arXiv:2502.01584, 2025. +[16] Rohan Anil, Andrew M Dai, Orhan Firat, Melvin Johnson, Dmitry Lepikhin, Alexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen, et al. Palm 2 technical report. arXiv preprint arXiv:2305.10403, 2023. +[17] Zachary Ankner, Mansheej Paul, Brandon Cui, Jonathan Daniel Chang, and Prithviraj Ammanabrolu. Critique-out-loud reward models. In *Pluralistic Alignment Workshop at NeurIPS* 2024, October 2024. URL https://openreview.net/forum?id=CljYUvI1RW. +[18] Thomas Anthony, Zheng Tian, and David Barber. Thinking fast and slow with deep learning and tree search. Advances in neural information processing systems, 30, December 2017. URL https://proceedings.neurips.cc/paper_files/paper/2017/file/d8e1344e27a5b08cdfd5d027d9b8d6de-Paper.pdf. +[19] AI Anthropic. The claude 3 model family: Opus, sonnet, haiku. Claude-3 Model Card, 1:1, 2024. URL https://www-cdn.anthropic.com/de8ba9b01c9ab7cbabf5c33b80b7bbc618857627/Model_Card_Claude_3.pdf. +[20] Roberto Araya. Do chains-of-thoughts of large language models suffer from hallucinations, cognitive biases, or phobias in bayesian reasoning? arXiv preprint arXiv:2503.15268, 2025. +[21] Mikhail L Arbazov, Alexey A Shvets, and Sisong Beir. Beyond exponential decay: Rethinking error accumulation in large language models. arXiv preprint arXiv:2505.24187, 2025. +[22] Daman Arora and Andrea Zanette. Training language models to reason efficiently. arXiv preprint arXiv:2502.04463, 2025. +[23] Aitor Arrieta, Miriam Ugarte, Pablo Valle, José Antonio Parejo, and Sergio Segura. Early external safety testing of openai's o3-mini: Insights from the pre-deployment evaluation. arXiv preprint arXiv:2501.17749, 2025. +[24] Aitor Arrieta, Miriam Ugarte, Pablo Valle, José Antonio Parejo, and Sergio Segura. o3-mini vs deepseek-r1: Which one is safer? arXiv preprint arXiv:2501.18438, 2025. +[25] Dhananjay Ashok and Jonathan May. Language models can predict their own behavior. arXiv preprint arXiv:2502.13329, 2025. +[26] Zhangir Azerbayev, Hailey Schoelkopf, Keiran Paster, Marco Dos Santos, Stephen Marcus McAleer, Albert Q. Jiang, Jia Deng, Stella Biderman, and Sean Welleck. Llemma: An open language model for mathematics. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=4WnqRR915j. +[27] Alisson Azzolini, Hannah Brandon, Prithvijit Chattopadhyay, Huayu Chen, Jinju Chu, Yin Cui, Jenna Diamond, Yifan Ding, Francesco Ferroni, Rama Govindaraju, et al. Cosmos-reason1: From physical common sense to embodied reasoning. arXiv preprint arXiv:2503.15558, 2025. +[28] Tanja Baeumel, Josef van Genabith, and Simon Ostermann. The lookahead limitation: Why multi-operand addition is hard for lms. arXiv preprint arXiv:2502.19981, 2025. +[29] Yuntao Bai, Saurav Kadavath, Sandipan Kundu, Amanda Askell, Jackson Kernion, Andy Jones, Anna Chen, Anna Goldie, Azalia Mirhoseini, Cameron McKinnon, et al. Constitutional ai: Harmlessness from ai feedback. arXiv preprint arXiv:2212.08073, 2022. +[30] Bowen Baker, Joost Huizinga, Aleksander Madry, Wojciech Zaremba, Jakub Pachocki, and David Farhi. Monitoring reasoning models for misbehavior and the risks of promoting obfuscation. March 2025. URL https://openai.com/index/chain-of-thought-monitoring/. +[31] Vidhisha Balachandran, Jingya Chen, Lingjiao Chen, Shivam Garg, Neel Joshi, Yash Lara, John Langford, Besmira Nushi, Vibhav Vineet, Yue Wu, et al. Inference-time scaling for complex tasks: Where we stand and what lies ahead. arXiv preprint arXiv:2504.00294, 2025. + +[32] Marthe Ballon, Andres Algaba, and Vincent Ginis. The relationship between reasoning and performance in large language models-o3 (mini) thinks harder, not longer. arXiv preprint arXiv:2502.15631, 2025. +[33] Dibyanayan Bandyopadhyay, Soham Bhattacharjee, and Asif Ekbal. Thinking machines: A survey of llm based reasoning strategies. arXiv preprint arXiv:2503.10814, 2025. +[34] Hritik Bansal, Arian Hosseini, Rishabh Agarwal, Vinh Q. Tran, and Mehran Kazemi. Smaller, weaker, yet better: Training LLM reasoners via compute-optimal sampling. In The 4th Workshop on Mathematical Reasoning and AI at NeurIPS'24, January 2025. URL https://openreview.net/forum?id=HuYSURUxs2. +[35] Hieu Tran Bao, Nguyen Cong Dat, Nguyen Duc Anh, and Hoang Thanh Tung. Learning to stop overthinking at test time. arXiv preprint arXiv:2502.10954, 2025. +[36] Keqin Bao, Nuo Chen, Xiaoyuan Li, Binyuan Hui, Bowen Yu, Fuli Feng, Junyang Lin, Xiangnan He, and Dayiheng Liu. Teaching llm to reason: Reinforcement learning from algorithmic problems without code. arXiv preprint arXiv:2507.07498, 2025. +[37] Qiming Bao, Alex Yuxuan Peng, Tim Hartill, Neset Tan, Zhenyun Deng, Michael Witbrock, and Jiamou Liu. Multi-step deductive reasoning over natural language: An empirical study on out-of-distribution generalisation. arXiv preprint arXiv:2207.14000, 2022. +[38] Qiming Bao, Gael Gendron, Alex Yuxuan Peng, Wanjun Zhong, Neset Tan, Yang Chen, Michael Witbrock, and Jiamou Liu. Assessing and enhancing the robustness of large language models with task structure variations for logical reasoning. arXiv preprint arXiv:2310.09430, 2023. +[39] Qiming Bao, Alex Yuxuan Peng, Zhenyun Deng, Wanjun Zhong, Neset Tan, Nathan Young, Yang Chen, Yonghua Zhu, Michael Witbrock, and Jiamou Liu. Contrastive learning with logic-driven data augmentation for logical reasoning over text. arXiv preprint arXiv:2305.12599, 2023. +[40] Qiming Bao, Alex Peng, Zhenyun Deng, Wanjun Zhong, Gael Gendron, Timothy Pistotti, Neset Tan, Nathan Young, Yang Chen, Yonghua Zhu, Paul Denny, Michael Witbrock, and Jiamou Liu. Abstract Meaning Representation-based logic-driven data augmentation for logical reasoning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 5914–5934, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.353. URL https://aclanthology.org/2024-findings-acl.353/. +[41] Qiming Bao, Juho Leinonen, Alex Yuxuan Peng, Wanjun Zhong, Gael Gendron, Timothy Pistotti, Alice Huang, Paul Denny, Michael Witbrock, and Jiamou Liu. Exploring iterative enhancement for improving learnersourced multiple-choice question explanations with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 39, pages 28955–28963, Apr 2025. +[42] Brian R Bartoldson, Siddarth Venkatraman, James Diffenderfer, Moksh Jain, Tal Ben-Nun, Seanie Lee, Minsu Kim, Johan Obando-Ceron, Yoshua Bengio, and Bhavya Kailkhura. Trajectory balance with asynchrony: Decoupling exploration and learning for fast, scalable llm post-training. arXiv preprint arXiv:2503.18929, 2025. +[43] Sarmad Bashir, Alessio Ferrari, Abbas Khan, Per Erik Strandberg, Zulqarnain Haider, Mehrdad Saadatmand, and Markus Bohlin. Requirements ambiguity detection and explanation with llms: An industrial study. July 2025. +[44] Ali Behrouz, Peilin Zhong, and Vahab Mirrokni. Titans: Learning to memorize at test time. arXiv preprint arXiv:2501.00663, 2024. +[45] Yoshua Bengio, Michael Cohen, Damiano Fornasiere, Joumana Ghosn, Pietro Greiner, Matt MacDermott, Soren Mindermann, Adam Oberman, Jesse Richardson, Oliver Richardson, et al. Superintelligent agents pose catastrophic risks: Can scientist ai offer a safer path? arXiv preprint arXiv:2502.15657, 2025. +[46] Yoshua Bengio, Soren Mindermann, Daniel Privitera, Tamay Besiroglu, Rishi Bommasani, Stephen Casper, Yejin Choi, Philip Fox, Ben Garfinkel, Danielle Goldfarb, et al. International ai safety report. arXiv preprint arXiv:2501.17805, 2025. + +[47] Leonardo Bertolazzi, Philipp Mondorf, Barbara Plank, and Raffaella Bernardi. The validation gap: A mechanistic analysis of how language models compute arithmetic but fail to validate it. arXiv preprint arXiv:2502.11771, 2025. +[48] Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Michal Podstawski, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Hubert Niewiadomski, Piotr Nczyk, and Torsten Hoefler. Graph of thoughts: Solving elaborate problems with large language models. Proceedings of the AAAI Conference on Artificial Intelligence, 38(16):17682-17690, Mar. 2024. doi: 10.1609/aaai.v38i16.29720. URL https://ojs.aaai.org/index.php/AAAI/article/view/29720. +[49] Maciej Besta, Florim Memedi, Zhenyu Zhang, Robert Gerstenberger, Guangyuan Piao, Nils Blach, Piotr Nyczyk, Marcin Copik, Grzegorz Kwaśniewski, Jürgen Müller, et al. Demystifying chains, trees, and graphs of thoughts. arXiv preprint arXiv:2401.14295, 2024. +[50] Maciej Besta, Julia Barth, Eric Schreiber, Ales Kubicek, Afonso Catarino, Robert Gerstenberger, Piotr Nczyk, Patrick Iff, Yueling Li, Sam Houliston, et al. Reasoning language models: A blueprint. arXiv preprint arXiv:2501.11223, 2025. +[51] Jinhe Bi, Danqi Yan, Yifan Wang, Wenke Huang, Haokun Chen, Guancheng Wan, Mang Ye, Xun Xiao, Hinrich Schuetze, Volker Tresp, et al. Cot-kinetics: A theoretical modeling assessing lrm reasoning process. arXiv preprint arXiv:2505.13408, 2025. +[52] Xiao Bi, Deli Chen, Guanting Chen, Shanhuang Chen, Damai Dai, Chengqi Deng, Honghui Ding, Kai Dong, Qiushi Du, Zhe Fu, et al. Deepseek llm: Scaling open-source language models with longtermism. arXiv preprint arXiv:2401.02954, 2024. +[53] Zhen Bi, Ningyu Zhang, Yinuo Jiang, Shumin Deng, Guozhou Zheng, and Huajun Chen. When do program-of-thought works for reasoning? In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pages 17691-17699, 2024. URL https://ods.aaai.org/index.php/AAAI/article/view/29721/31237. +[54] Zhenni Bi, Kai Han, Chuanjian Liu, Yehui Tang, and Yunhe Wang. Forest-of-thought: Scaling test-time compute for enhancing lIm reasoning. arXiv preprint arXiv:2412.09078, 2024. +[55] Edoardo Botta, Yuchen Li, Aashay Mehta, Jordan T Ash, Cyril Zhang, and Andrej Risteski. On the query complexity of verifier-assisted language generation. arXiv preprint arXiv:2502.12123, 2025. +[56] David Brandfonbrener, Simon Henniger, Sibi Raja, Tarun Prasad, Chloe Loughridge, Federico Cassano, Sabrina Ruixin Hu, Jianang Yang, William E Byrd, Robert Zinkov, et al. Vermcts: Synthesizing multi-step programs using a verifier, a large language model, and tree search. arXiv preprint arXiv:2402.08147, 2024. +[57] Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv preprint arXiv:2407.21787, 2024. +[58] Dan Busbridge, Amitis Shidani, Floris Weers, Jason Ramapuram, Etai Littwin, and Russ Webb. Distillation scaling laws. arXiv preprint arXiv:2502.08606, 2025. +[59] Ji Young Byun, Young-Jin Park, Nvid Azizan, and Rama Chellappa. Test-time-scaling for zero-shot diagnosis with visual-language reasoning. arXiv preprint arXiv:2506.11166, 2025. +[60] Ju-Seung Byun, Jiyun Chun, Jihyung Kil, and Andrew Perrault. ARES: Alternating reinforcement learning and supervised fine-tuning for enhanced multi-modal chain-of-thought reasoning through diverse AI feedback. In Yaser Al-Onaizan, Mohit Bansal, and YunNung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 4410-4430, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.252. URL https://aclanthology.org/2024.emnlp-main.252/. +[61] Huanqia Cai, Yijun Yang, and Zhifeng Li. System-2 mathematical reasoning via enriched instruction tuning. arXiv preprint arXiv:2412.16964, 2024. +[62] Zheng Cai, Maosong Cao, Haojiong Chen, Kai Chen, Keyu Chen, Xin Chen, Xun Chen, Zehui Chen, Zhi Chen, Pei Chu, et al. Internl m2 technical report. arXiv preprint arXiv:2403.17297, 2024. + +[63] Erik Cambria, Lorenzo Malandri, Fabio Mercorio, Navid Nobani, and Andrea Seveso. Xai meets llms: A survey of the relation between explainable ai and large language models. arXiv preprint arXiv:2407.15248, 2024. +[64] Lang Cao. GraphReason: Enhancing reasoning capabilities of large language models through a graph-based verification approach. In Bhavana Dalvi Mishra, Greg Durrett, Peter Jansen, Ben Lipkin, Danilo Neves Ribeiro, Lionel Wong, Xi Ye, and Wenting Zhao, editors, Proceedings of the 2nd Workshop on Natural Language Reasoning and Structured Explanations (@ACL 2024), pages 1-12, Bangkok, Thailand, August 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.nlrse-1.1/. +[65] Zhepeng Cen, Yihang Yao, William Han, Zuxin Liu, and Ding Zhao. Behavior injection: Preparing language models for reinforcement learning. arXiv preprint arXiv:2505.18917, 2025. +[66] Linzheng Chai, Jian Yang, Tao Sun, Hongcheng Guo, Jiaheng Liu, Bing Wang, Xiannian Liang, Jiaqi Bai, Tongliang Li, Qiyao Peng, et al. xcot: Cross-lingual instruction tuning for cross-lingual chain-of-thought reasoning. arXiv preprint arXiv:2401.07037, 2024. +[67] Jun Shern Chan, Neil Chowdhury, Oliver Jaffe, James Aung, Dane Sherburn, Evan Mays, Giulio Starace, Kevin Liu, Leon Maksin, Tejal Patwardhan, et al. Mle-bench: Evaluating machine learning agents on machine learning engineering. arXiv preprint arXiv:2410.07095, 2024. +[68] Hyeong Soo Chang. On the convergence rate of mcts for the optimal value estimation in markov decision processes. IEEE Transactions on Automatic Control, pages 1-6, February 2025. doi: 10.1109/TAC.2025.3538807. URL https://ieeexplore.ieee.org/document/10870057. +[69] Aili Chen, Aonian Li, Bangwei Gong, Binyang Jiang, Bo Fei, Bo Yang, Boji Shan, Changqing Yu, Chao Wang, Cheng Zhu, et al. Minimax-m1: Scaling test-time compute efficiently with lightning attention. arXiv preprint arXiv:2506.13585, 2025. +[70] Andong Chen, Yuchen Song, Wenxin Zhu, Kehai Chen, Muyun Yang, Tiejun Zhao, et al. Evaluating o1-like llms: Unlocking reasoning for translation through comprehensive analysis. arXiv preprint arXiv:2502.11544, 2025. +[71] Beiduo Chen, Yang Janet Liu, Anna Korhonen, and Barbara Plank. Threading the needle: Reweaving chain-of-thought reasoning to explain human label variation. arXiv preprint arXiv:2505.23368, 2025. +[72] Guizhen Chen, Weiwen Xu, Hao Zhang, Hou Pong Chan, Chaoqun Liu, Lidong Bing, Deli Zhao, Anh Tuan Luu, and Yu Rong. Finereason: Evaluating and improving llms' deliberate reasoning through reflective puzzle solving. arXiv preprint arXiv:2502.20238, 2025. +[73] Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Step-level value preference optimization for mathematical reasoning. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 7889-7903, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.463. URL https://aclanthology.org/2024_findings-emnlp.463/. +[74] Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Alphamath almost zero: Process supervision without process. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=VaXnxQ3UKo. +[75] Haibin Chen, Kangtao Lv, Chengwei Hu, Yanshi Li, Yujin Yuan, Yancheng He, Xingyao Zhang, Langming Liu, Shilei Liu, Wenbo Su, et al. Chineseecomqa: A scalable e-commerce concept evaluation benchmark for large language models. arXiv preprint arXiv:2502.20196, 2025. +[76] Hanjie Chen, Zhouxiang Fang, Yash Singla, and Mark Dredze. Benchmarking large language models on answering and explaining challenging medical questions. arXiv preprint arXiv:2402.18060, 2024. +[77] Haolin Chen, Yihao Feng, Zuxin Liu, Weiran Yao, Akshara Prabhakar, Shelby Heinecke, Ricky Ho, Phil Mui, Silvio Savarese, Caiming Xiong, et al. Language models are hid + +den reasoners: Unlocking latent reasoning capabilities via self-rewarding. arXiv preprint arXiv:2411.04282, 2024. +[78] Hardy Chen, Haoqin Tu, Hui Liu, Xianfeng Tang, Xinya Du, Yuyin Zhou, and Cihang Xie. VI-thinking: An r1-derived visual instruction tuning dataset for thinkable lvlms. https://github.com/UCSC-VLAA/VL-Thinkinq, 2025. +[79] Jian Chen, Guohao Tang, Guofu Zhou, and Wu Zhu. Chatgpt and deepseek: Can they predict the stock market and macroeconomy? arXiv preprint arXiv:2502.10008, 2025. +[80] Jianhao Chen, Zishuo Xun, Bocheng Zhou, Han Qi, Qiaosheng Zhang, Yang Chen, Wei Hu, Yuzhong Qu, Wanli Ouyang, and Shuyue Hu. Do we truly need so many samples? multi-llm repeated sampling efficiently scale test-time compute. arXiv preprint arXiv:2504.00762, 2025. +[81] Jiefeng Chen, Jie Ren, Xinyun Chen, Chengrun Yang, Ruoxi Sun, and Sercan Ö Arik. Sets: Leveraging self-verification and self-correction for improved test-time scaling. arXiv preprint arXiv:2501.19306, 2025. +[82] Jierun Chen, Tiezheng Yu, Haoli Bai, Lewei Yao, Jiannan Wu, Kaican Li, Fei Mi, Chaofan Tao, Lei Zhu, Manyi Zhang, et al. The synergy dilemma of long-cot sft and rl: Investigating post-training techniques for reasoning vlms. arXiv preprint arXiv:2507.07562, 2025. +[83] Junying Chen, Zhenyang Cai, Ke Ji, Xidong Wang, Wanlong Liu, Rongsheng Wang, Jianye Hou, and Benyou Wang. Huatuogpt-o1, towards medical complex reasoning with llms. arXiv preprint arXiv:2412.18925, 2024. +[84] Justin Chih-Yao Chen, Archiki Prasad, Swarnadeep Saha, Elias Stengel-Eskin, and Mohit Bansal. Magicore: Multi-agent, iterative, coarse-to-fine refinement for reasoning. arXiv preprint arXiv:2409.12147, 2024. +[85] Kedi Chen, Zhikai Lei, Fan Zhang, Yinqi Zhang, Qin Chen, Jie Zhou, Liang He, Qipeng Guo, Kai Chen, and Wei Zhang. Code-driven inductive synthesis: Enhancing reasoning abilities of large language models with sequences. arXiv preprint arXiv:2503.13109, 2025. +[86] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025. Accessed: 2025-02-02. +[87] Michael K Chen, Xikun Zhang, and Dacheng Tao. Justlogic: A comprehensive benchmark for evaluating deductive reasoning in large language models. arXiv preprint arXiv:2501.14851, 2025. +[88] Mingyang Chen, Tianpeng Li, Haoze Sun, Yijie Zhou, Chenzheng Zhu, Fan Yang, Zenan Zhou, Weipeng Chen, Haofen Wang, Jeff Z Pan, et al. Learning to reason with search for llms via reinforcement learning. arXiv preprint arXiv:2503.19470, 2025. +[89] Nuo Chen, Zhiyuan Hu, Qingyun Zou, Jiaying Wu, Qian Wang, Bryan Hooi, and Bingsheng He. Judgerm: Large reasoning models as a judge. arXiv preprint arXiv:2504.00050, 2025. +[90] Qiguang Chen, Libo Qin, Jiaqi WANG, Jingxuan Zhou, and Wanxiang Che. Unlocking the capabilities of thought: A reasoning boundary framework to quantify and optimize chain-of-thought. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=pC44UMwy2v. +[91] Qiguang Chen, Libo Qin, Jin Zhang, Zhi Chen, Xiao Xu, and Wanxiang Che. $\mathbf{M}^{3}\mathrm{CoT}$ : A novel benchmark for multi-domain multi-step multi-modal chain-of-thought. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 8199–8221, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.446. URL https://aclanthology.org/2024.acl-long.446/. +[92] Qiguang Chen, Libo Qin, Jinhao Liu, Yue Liao, Jiaqi Wang, Jingxuan Zhou, and Wanxiang Che. Rbf++: Quantifying and optimizing reasoning boundaries across measurable and unmeasurable capabilities for chain-of-thought reasoning. arXiv preprint arXiv:2505.13307, 2025. + +[93] Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiaqi Wang, Mengkang Hu, Zhi Chen, Wanxiang Che, and Ting Liu. Ecm: A unified electronic circuit model for explaining the emergence of in-context learning and chain-of-thought in large language model. arXiv preprint arXiv:2502.03325, 2025. +[94] Qiguang Chen, Mingda Yang, Libo Qin, Jinhao Liu, Zheng Yan, Jiannan Guan, Dengyun Peng, Yiyan Ji, Hanjing Li, Mengkang Hu, et al. Ai4research: A survey of artificial intelligence for scientific research. arXiv preprint arXiv:2507.01903, 2025. +[95] Qiqi Chen, Xinpeng Wang, Philipp Mondorf, Michael A Hedderich, and Barbara Plank. Understanding when tree of thoughts succeeds: Larger models excel in generation, not discrimination. arXiv preprint arXiv:2410.17820, 2024. +[96] Shiqi Chen, Jinghan Zhang, Tongyao Zhu, Wei Liu, Siyang Gao, Miao Xiong, Manling Li, and Junxian He. Bring reason to vision: Understanding perception and reasoning through model merging. arXiv preprint arXiv:2505.05464, 2025. +[97] Shuang Chen, Yue Guo, Zhaochen Su, Yafu Li, Yulun Wu, Jiacheng Chen, Jiayu Chen, Weijie Wang, Xiaoye Qu, and Yu Cheng. Advancing multimodal reasoning: From optimized cold start to staged reinforcement learning. arXiv preprint arXiv:2506.04207, 2025. +[98] Sijia Chen and Baochun Li. Toward adaptive reasoning in large language models with thought rollback. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 7033-7056. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/chen24y.html. +[99] Weizhe Chen, Sven Koenig, and Bistra Dilkina. Iterative deepening sampling for large language models. arXiv preprint arXiv:2502.05449, 2025. +[100] Wenhu Chen, Xueguang Ma, Xinyi Wang, and William W. Cohen. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks. Transactions on Machine Learning Research, November 2023. ISSN 2835-8856. URL https://openreview.net/forum?id=YfZ4ZPt8zd. +[101] Wenxiang Chen, Wei He, Zhiheng Xi, Honglin Guo, Boyang Hong, Jiazheng Zhang, Rui Zheng, Nijun Li, Tao Gui, Yun Li, et al. Better process supervision with bi-directional rewarding signals. arXiv preprint arXiv:2503.04618, 2025. +[102] Xinghao Chen, Zhijing Sun, Wenjin Guo, Miaoran Zhang, Yanjun Chen, Yirong Sun, Hui Su, Yijie Pan, Dietrich Klakow, Wenjie Li, et al. Unveiling the key factors for distilling chain-of-thought reasoning. arXiv preprint arXiv:2502.18001, 2025. +[103] Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for $2 + 3 = ?$ on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024. +[104] Xinyun Chen, Maxwell Lin, Nathanael Scharli, and Denny Zhou. Teaching large language models to self-debug. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=KuPixIqPiq. +[105] Yanda Chen, Joe Benton, Ansh Radhakrishnan, Jonathan Uesato Carson Denison, John Schulman, Arushi Somani, Peter Hase, Misha Wagner Fabien Roger Vlad Mikulik, Sam Bowman, Jan Leike Jared Kaplan, et al. Reasoning models don't always say what they think. April 2025. URL https://www.anthropic.com/research/reasoning-models-dont-say-think. +[106] Yanxi Chen, Xuchen Pan, Yaliang Li, Bolin Ding, and Jingren Zhou. A simple and provable scaling law for the test-time compute of large language models. arXiv preprint arXiv:2411.19477, 2024. +[107] Yezeng Chen, Zui Chen, and Yi Zhou. Brain-inspired two-stage approach: Enhancing mathematical reasoning by imitating human thought processes. arXiv preprint arXiv:2403.00800, 2024. +[108] Yihang Chen, Haikang Deng, Kaiqiao Han, and Qingyue Zhao. Policy frameworks for transparent chain-of-thought reasoning in large language models. arXiv preprint arXiv:2503.14521, 2025. + +[109] Yilong Chen, Junyuan Shang, Zhenyu Zhang, Yanxi Xie, Jiawei Sheng, Tingwen Liu, Shuo-huan Wang, Yu Sun, Hua Wu, and Haifeng Wang. Inner thinking transformer: Leveraging dynamic depth scaling to foster adaptive internal thinking. arXiv preprint arXiv:2502.13842, 2025. +[110] Zhenfang Chen, Delin Chen, Rui Sun, Wenjun Liu, and Chuang Gan. Scaling autonomous agents via automatic reward modeling and planning. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=womU9cEwcO. +[111] Zhi Chen, Qiguang Chen, Libo Qin, Qipeng Guo, Haijun Lv, Yicheng Zou, Wanxiang Che, Hang Yan, Kai Chen, and Dahua Lin. What are the essential factors in crafting effective long context multi-hop instruction datasets? insights and best practices. arXiv preprint arXiv:2409.01893, 2024. +[112] Zihan Chen, Song Wang, Zhen Tan, Xingbo Fu, Zhenyu Lei, Peng Wang, Huan Liu, Cong Shen, and Jundong Li. A survey of scaling in large language model reasoning. arXiv preprint arXiv:2504.02181, 2025. +[113] Ziru Chen, Michael White, Ray Mooney, Ali Payani, Yu Su, and Huan Sun. When is tree search useful for LLM planning? it depends on the discriminator. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 13659–13678, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.738. URL https://aclanthology.org/2024.acl-long.738/. +[114] Zixiang Chen, Yihe Deng, Huizhuo Yuan, Kaixuan Ji, and Quanquan Gu. Self-play fine-tuning converts weak language models to strong language models. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 6621-6642. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/chen24j.html. +[115] Zui Chen, Tianqiao Liu, Mi Tian, Qing Tong, Weiqi Luo, and Zitao Liu. Advancing math reasoning in language models: The impact of problem-solving data, data synthesis methods, and training stages. arXiv preprint arXiv:2501.14002, 2025. +[116] Daixuan Cheng, Shaohan Huang, Xuekai Zhu, Bo Dai, Wayne Xin Zhao, Zhenliang Zhang, and Furu Wei. Reasoning with exploration: An entropy perspective. arXiv preprint arXiv:2506.14758, 2025. +[117] Jiahao Cheng, Tiancheng Su, Jia Yuan, Guoxiu He, Jiawei Liu, Xinqi Tao, Jingwen Xie, and Huaxia Li. Chain-of-thought prompting obscures hallucination cues in large language models: An empirical evaluation. arXiv preprint arXiv:2506.17088, 2025. +[118] Jiale Cheng, Xiao Liu, Cunxiang Wang, Xiaotao Gu, Yida Lu, Dan Zhang, Yuxiao Dong, Jie Tang, Hongning Wang, and Minlie Huang. Spar: Self-play with tree-search refinement to improve instruction-following in large language models. arXiv preprint arXiv:2412.11605, 2024. +[119] Junhang Cheng, Fang Liu, Chengru Wu, and Li Zhang. Adaptivellm: A framework for selecting optimal cost-efficient llm for code-generation based on cot length. arXiv preprint arXiv:2506.10525, 2025. +[120] Kanzhi Cheng, Yantao Li, Fangzhi Xu, Jianbing Zhang, Hao Zhou, and Yang Liu. Vision-language models can self-improve reasoning via reflection. arXiv preprint arXiv:2411.00855, 2024. +[121] Xiaoxue Cheng, Junyi Li, Wayne Xin Zhao, and Ji-Rong Wen. Think more, hallucinate less: Mitigating hallucinations via dual process of fast and slow thinking. arXiv preprint arXiv:2501.01306, 2025. +[122] Zhengxiang Cheng, Dongping Chen, Mingyang Fu, and Tianyi Zhou. Optimizing length compression in large reasoning models. arXiv preprint arXiv:2506.14755, 2025. +[123] Zhoujun Cheng, Haoyu Dong, Zhiruo Wang, Ran Jia, Jiaqi Guo, Yan Gao, Shi Han, JianGuang Lou, and Dongmei Zhang. Hitab: A hierarchical table dataset for question answering and natural language generation. arXiv preprint arXiv:2108.06712, 2021. + +[124] Zhoujun Cheng, Shibo Hao, Tianyang Liu, Fan Zhou, Yutao Xie, Feng Yao, Yuexin Bian, Yonghao Zhuang, Nilabjo Dey, Yuheng Zha, et al. Revisiting reinforcement learning for llm reasoning from a cross-domain perspective. arXiv preprint arXiv:2506.14965, 2025. +[125] Zihui Cheng, Qiguang Chen, Jin Zhang, Hao Fei, Xiaocheng Feng, Wanxiang Che, Min Li, and Libo Qin. Comt: A novel benchmark for chain of multi-modal thought on large vision-language models. arXiv preprint arXiv:2412.12932, 2024. +[126] Zihui Cheng, Qiguang Chen, Xiao Xu, Jiaqi Wang, Weiyun Wang, Hao Fei, Yidong Wang, Alex Jinpeng Wang, Zhi Chen, Wanxiang Che, et al. Visual thoughts: A unified perspective of understanding multimodal chain-of-thought. arXiv preprint arXiv:2505.15510, 2025. +[127] Ethan Chern, Zhulin Hu, Steffi Chern, Siqi Kou, Jiadi Su, Yan Ma, Zhijie Deng, and Pengfei Liu. Thinking with generated images. arXiv preprint arXiv:2505.22525, 2025. +[128] Yew Ken Chia, Vernon Toh, Deepanway Ghosal, Lidong Bing, and Soujanya Poria. PuzzleVQA: Diagnosing multimodal reasoning challenges of language models with abstract visual patterns. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 16259–16273, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.962. URL https://aclanthology.org/2024-findings-acl.962/. +[129] Daiki Chijiwa, Taku Hasegawa, Kyosuke Nishida, Kuniko Saito, and Susumu Takeuchi. Portable reward tuning: Towards reusable fine-tuning across different pretrained models. arXiv preprint arXiv:2502.12776, 2025. +[130] Daewon Choi, Jimin Lee, Jihoon Tack, Woomin Song, Saket Dingliwal, Sai Muralidhar Jayanthi, Bhavana Ganesh, Jinwoo Shin, Aram Galstyan, and Sravan Babu Bodapati. Think clearly: Improving reasoning via redundant token pruning. arXiv preprint arXiv:2507.08806, 2025. +[131] François Chollet. On the measure of intelligence. arXiv preprint arXiv:1911.01547, 2019. +[132] Francois Chollet, Mike Knoop, Gregory Kamradt, and Bryan Landers. Arc prize 2024: Technical report. arXiv preprint arXiv:2412.04604, 2024. +[133] Francois Chollet, Mike Knoop, Gregory Kamradt, Bryan Landers, and Henry Pinkard. Arcagi-2: A new challenge for frontier ai reasoning systems. arXiv preprint arXiv:2505.11831, 2025. +[134] Sanjiban Choudhury. Process reward models for llm agents: Practical framework and directions. arXiv preprint arXiv:2502.10325, 2025. +[135] Jishnu Ray Chowdhury and Cornelia Caragea. Zero-shot verification-guided chain of thoughts. arXiv preprint arXiv:2501.13122, 2025. +[136] Konstantina Christakopoulou, Shibl Mourad, and Maja Mataric. Agents thinking fast and slow: A talker-reasoner architecture. In NeurIPS 2024 Workshop on Open-World Agents, October 2024. URL https://openreview.net/forum?id=xPhcP6rbI4. +[137] Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V Le, Sergey Levine, and Yi Ma. Sft memorizes, rl generalizes: A comparative study of foundation model post-training. arXiv preprint arXiv:2501.17161, 2025. +[138] Zheng Chu, Jingchang Chen, Qianglong Chen, Weijiang Yu, Tao He, Haotian Wang, Weihua Peng, Ming Liu, Bing Qin, and Ting Liu. Navigate through enigmatic labyrinth a survey of chain of thought reasoning: Advances, frontiers and future. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1173–1203, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.65. URL https://aclanthology.org/2024.acl-long.65/. +[139] Jennifer Chu-Carroll, Andrew Beck, Greg Burnham, David OS Melville, David Nachman, A Erdem Özcan, and David Ferrucci. Beyond llms: Advancing the landscape of complex reasoning. arXiv preprint arXiv:2402.08064, 2024. +[140] Daniel JH Chung, Zhiqi Gao, Yurii Kvasiuk, Tianyi Li, Moritz Munchmeyer, Maja Rudolph, Frederic Sala, and Sai Chaitanya Tadepalli. Theoretical physics benchmark (tpbench)—a dataset and study of ai reasoning capabilities in theoretical physics. arXiv preprint arXiv:2502.15815, 2025. + +[141] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021. +[142] Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, et al. The danger of overthinking: Examining the reasoning-action dilemma in agentic tasks. arXiv preprint arXiv:2502.08235, 2025. +[143] Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, et al. Process reinforcement through implicit rewards. arXiv preprint arXiv:2502.01456, 2025. +[144] Ganqu Cui, Yuchen Zhang, Jiacheng Chen, Lifan Yuan, Zhi Wang, Yuxin Zuo, Haozhan Li, Yuchen Fan, Huayu Chen, Weize Chen, et al. The entropy mechanism of reinforcement learning for reasoning language models. arXiv preprint arXiv:2505.22617, 2025. +[145] Yingqian Cui, Pengfei He, Jingying Zeng, Hui Liu, Xianfeng Tang, Zhenwei Dai, Yan Han, Chen Luo, Jing Huang, Zhen Li, et al. Stepwise perplexity-guided refinement for efficient chain-of-thought reasoning in large language models. arXiv preprint arXiv:2502.13260, 2025. +[146] Yu Cui and Cong Zuo. Practical reasoning interruption attacks on reasoning large language models. arXiv preprint arXiv:2505.06643, 2025. +[147] Yu Cui, Bryan Hooi, Yujun Cai, and Yiwei Wang. Process or result? manipulated ending tokens can mislead reasoning lms to ignore the correct reasoning steps. arXiv preprint arXiv:2503.19326, 2025. +[148] Jianbo Dai, Jianqiao Lu, Yunlong Feng, Dong Huang, Guangtao Zeng, Rongju Ruan, Ming Cheng, Haochen Tan, and Zhijiang Guo. Mhpp: Exploring the capabilities and limitations of language models beyond basic code generation. arXiv preprint arXiv:2405.11430, 2024. +[149] Jisheng Dang, Jingze Wu, Teng Wang, Xuanhui Lin, Nannan Zhu, Hongbo Chen, Wei-Shi Zheng, Meng Wang, and Tat-Seng Chua. Reinforcing video reasoning with focused thinking. arXiv preprint arXiv:2505.24718, 2025. +[150] Quy-Anh Dang and Chris Ngo. Reinforcement learning for reasoning in small llms: What works and what doesn't. arXiv preprint arXiv:2503.16219, 2025. +[151] Yuntian Deng, Yejin Choi, and Stuart Shieber. From explicit cot to implicit cot: Learning to internalize cot step by step. arXiv preprint arXiv:2405.14838, 2024. +[152] Lauro Langosco Di Langosco, Jack Koch, Lee D Sharkey, Jacob Pfau, and David Krueger. Goal misgeneralization in deep reinforcement learning. In International Conference on Machine Learning, pages 12004-12019. PMLR, October 2022. URL https://proceedings.mlr.press/v162/langosco22a/langosco22a.pdf. +[153] Bowen Ding, Yuhan Chen, Futing Wang, Lingfeng Ming, and Tao Lin. Do thinking tokens help or trap? towards more efficient large reasoning model. arXiv preprint arXiv:2506.23840, 2025. +[154] Yifu Ding, Wentao Jiang, Shunyu Liu, Yongcheng Jing, Jinyang Guo, Yingjie Wang, Jing Zhang, Zengmao Wang, Ziwei Liu, Bo Du, et al. Dynamic parallel tree search for efficient llm reasoning. arXiv preprint arXiv:2502.16235, 2025. +[155] Hanze Dong, Wei Xiong, Deepanshu Goyal, Yihan Zhang, Winnie Chow, Rui Pan, Shizhe Diao, Jipeng Zhang, KaShun SHUM, and Tong Zhang. RAFT: Reward ranked finetuning for generative foundation model alignment. Transactions on Machine Learning Research, November 2023. ISSN 2835-8856. URL https://openreview.net/forum?id=m7p507zb1Y. +[156] Hanze Dong, Wei Xiong, Bo Pang, Haoxiang Wang, Han Zhao, Yingbo Zhou, Nan Jiang, Doyen Sahoo, Caiming Xiong, and Tong Zhang. Rlhf workflow: From reward modeling to online rlhf. arXiv preprint arXiv:2405.07863, 2024. +[157] Junnan Dong, Zijin Hong, Yuanchen Bei, Feiran Huang, Xinrun Wang, and Xiao Huang. Clr-bench: Evaluating large language models in college-level reasoning. arXiv preprint arXiv:2410.17558, 2024. + +[158] Kefan Dong and Tengyu Ma. Beyond limited data: Self-play ltm theorem provers with iterative conjecturing and proving. arXiv preprint arXiv:2502.00212, 2025. +[159] Yuhao Dong, Zuyan Liu, Hai-Long Sun, Jingkang Yang, Winston Hu, Yongming Rao, and Ziwei Liu. Insight-v: Exploring long-chain visual reasoning with multimodal large language models. arXiv preprint arXiv:2411.14432, 2024. +[160] Zhichen Dong, Zhanhui Zhou, Zhixuan Liu, Chao Yang, and Chaochao Lu. Emergent response planning in lIm. arXiv preprint arXiv:2502.06258, 2025. +[161] Shihan Dou, Yan Liu, Haoxiang Jia, Limao Xiong, Enyu Zhou, Wei Shen, Junjie Shan, Caishuang Huang, Xiao Wang, Xiaoran Fan, et al. Stepcoder: Improve code generation with reinforcement learning from compiler feedback. arXiv preprint arXiv:2402.01391, 2024. +[162] Iddo Drori, Gaston Longhitano, Mao Mao, Seunghwan Hyun, Yuke Zhang, Sungjun Park, Zachary Meeks, Xin-Yu Zhang, Ben Segev, Howard Yong, et al. Diverse inference and verification for advanced reasoning. arXiv preprint arXiv:2502.09955, 2025. +[163] Kounianhua Du, Hanjing Wang, Jianxing Liu, Jizheng Chen, Xinyi Dai, Yasheng Wang, Ruiming Tang, Yong Yu, Jun Wang, and Weinan Zhang. Boost, disentangle, and customize: A robust system2-to-system1 pipeline for code generation. arXiv preprint arXiv:2502.12492, 2025. +[164] Weihua Du, Yiming Yang, and Sean Welleck. Optimizing temperature for language models with multi-sample inference. arXiv preprint arXiv:2502.05234, 2025. +[165] Xinrun Du, Yifan Yao, Kaijing Ma, Bingli Wang, Tianyu Zheng, Kang Zhu, Minghao Liu, Yiming Liang, Xiaolong Jin, Zhenlin Wei, et al. Supergpqa: Scaling llm evaluation across 285 graduate disciplines. arXiv preprint arXiv:2502.14739, 2025. +[166] Yifan Du, Zikang Liu, Yifan Li, Wayne Xin Zhao, Yuqi Huo, Bingning Wang, Weipeng Chen, Zheng Liu, Zhongyuan Wang, and Ji-Rong Wen. Virgo: A preliminary exploration on reproducing o1-like mllm. arXiv preprint arXiv:2501.01904, 2025. +[167] Keyu Duan, Zichen Liu, Xin Mao, Tianyu Pang, Changyu Chen, Qiguang Chen, Michael Qizhe Shieh, and Longxu Dou. Efficient process reward model training via active learning. arXiv preprint arXiv:2504.10559, 2025. +[168] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. +[169] Subhabrata Dutta, Joykirat Singh, Soumen Chakrabarti, and Tanmoy Chakraborty. How to think step-by-step: A mechanistic understanding of chain-of-thought reasoning. Transactions on Machine Learning Research, July 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=uHLDkQVtyC. +[170] Ahmed El-Kishky, Alexander Wei, Andre Saraiva, Borys Minaev, Daniel Selsam, David Dohan, Francis Song, Hunter Lightman, Ignasi Clavera, Jakub Pachocki, et al. Competitive programming with large reasoning models. arXiv preprint arXiv:2502.06807, 2025. +[171] Kawin Ethayarajh, Winnie Xu, Niklas Muennighoff, Dan Jurafsky, and Douwe Kiela. Kto: Model alignment as prospect theoretic optimization. arXiv preprint arXiv:2402.01306, 2024. +[172] Chongyu Fan, Yihua Zhang, Jinghan Jia, Alfred Hero, and Sijia Liu. Cyclicreflex: Improving large reasoning models via cyclical reflection token scheduling. arXiv preprint arXiv:2506.11077, 2025. +[173] Siqi Fan, Peng Han, Shuo Shang, Yequan Wang, and Aixin Sun. Cothink: Token-efficient reasoning via instruct models guiding reasoning models. arXiv preprint arXiv:2505.22017, 2025. +[174] Tiantian Fan, Lingjun Liu, Yu Yue, Jiaze Chen, Chengyi Wang, Qiying Yu, Chi Zhang, Zhiqi Lin, Ruofei Zhu, Yufeng Yuan, et al. Truncated proximal policy optimization. arXiv preprint arXiv:2506.15050, 2025. +[175] Yi Fang, Wenjie Wang, Yang Zhang, Fengbin Zhu, Qifan Wang, Fuli Feng, and Xiangnan He. Large language models for recommendation with deliberative user preference alignment. arXiv preprint arXiv:2502.02061, 2025. + +[176] Wu Fei, Hao Kong, Shuxian Liang, Yang Lin, Yibo Yang, Jing Tang, Lei Chen, and Xiansheng Hua. Self-guided process reward optimization with masked step advantage for process reinforcement learning. arXiv preprint arXiv:2507.01551, 2025. +[177] Guhao Feng, Bohang Zhang, Yuntian Gu, Haotian Ye, Di He, and Liwei Wang. Towards revealing the mystery behind chain of thought: A theoretical perspective. In Thirty-seventh Conference on Neural Information Processing Systems, September 2023. URL https://openreview.net/forum?id=qHrADgAdYu. +[178] Jiazhan Feng, Shijue Huang, Xingwei Qu, Ge Zhang, Yujia Qin, Baoquan Zhong, Chengquan Jiang, Jinxin Chi, and Wanjun Zhong. Retool: Reinforcement learning for strategic tool use in llms. arXiv preprint arXiv:2504.11536, 2025. +[179] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Junfei Wu, Xiaoying Zhang, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025. +[180] Sicheng Feng, Gongfan Fang, Xinyin Ma, and Xinchao Wang. Efficient reasoning models: A survey. arXiv preprint arXiv:2504.10903, 2025. +[181] Xiachong Feng, Longxu Dou, and Lingpeng Kong. Reasoning does not necessarily improve role-playing ability. arXiv preprint arXiv:2502.16940, 2025. +[182] Xueyang Feng, Bo Lan, Quanyu Dai, Lei Wang, Jiakai Tang, Xu Chen, Zhenhua Dong, and Ji-Rong Wen. Improving retrospective language agents via joint policy gradient optimization. arXiv preprint arXiv:2503.01490, 2025. +[183] Yichen Feng, Zhangchen Xu, Fengqing Jiang, Yuetai Li, Bhaskar Ramasubramanian, Luyao Niu, Bill Yuchen Lin, and Radha Poovendran. Visualsphinx: Large-scale synthetic vision logic puzzles for rl. arXiv preprint arXiv:2505.23977, 2025. +[184] Chrisantha Fernando, Dylan Sunil Banarse, Henryk Michalewski, Simon Osindero, and Tim Rocktäschel. Promptbreeder: Self-referential self-improvement via prompt evolution. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 13481-13544. PMLR, 21-27 Jul 2024. URL https://proceedings.mlrpress/v235/fernando24a.html. +[185] Mohamed Amine Ferrag, Norbert Tihanyi, and Merouane Debbah. Reasoning beyond limits: Advances and open problems for lms. arXiv preprint arXiv:2503.22732, 2025. +[186] Thomas Palmeira Ferraz, Kartik Mehta, Yu-Hsiang Lin, Haw-Shiuan Chang, Shereen Oraby, Sijia Liu, Vivek Subramanian, Tagyoung Chung, Mohit Bansal, and Nanyun Peng. LLM self-correction with deCRIM: Decompose, critique, and refine for enhanced following of instructions with multiple constraints. In The First Workshop on System-2 Reasoning at Scale, NeurIPS'24, October 2024. URL https://openreview.net/forum?id=RQ6Ff81so0. +[187] Jiarun Fu, Lizhong Ding, Hao Li, Pengqi Li, Qiuning Wei, and Xu Chen. Unveiling and causalizing cot: A causal perspective. arXiv preprint arXiv:2502.18239, 2025. +[188] Wei Fu, Jiaxuan Gao, Xujie Shen, Chen Zhu, Zhiyu Mei, Chuyi He, Shusheng Xu, Guo Wei, Jun Mei, Jiashu Wang, Tongkai Yang, Binhang Yuan, and Yi Wu. Areal: A large-scale asynchronous reinforcement learning system for language reasoning, 2025. URL https://arxiv.org/abs/2505.24298. +[189] Yao Fu, Hao Peng, Ashish Sabharwal, Peter Clark, and Tushar Khot. Complexity-based prompting for multi-step reasoning. In The Eleventh International Conference on Learning Representations, February 2023. URL https://openreview.net/forum?id=yf1icZHC-19. +[190] Yuqian Fu, Tinghong Chen, Jiajun Chai, Xihuai Wang, Songjun Tu, Guojun Yin, Wei Lin, Qichao Zhang, Yuanheng Zhu, and Dongbin Zhao. Srft: A single-stage method with supervised and reinforcement fine-tuning for reasoning. arXiv preprint arXiv:2506.19767, 2025. +[191] Víctor Gallego. Metasc: Test-time safety specification optimization for language models. arXiv preprint arXiv:2502.07985, 2025. + +[192] Zeyu Gan, Yun Liao, and Yong Liu. Rethinking external slow-thinking: From snowball errors to probability of correct reasoning. arXiv preprint arXiv:2501.15602, 2025. +[193] Kanishk Gandhi, Denise HJ Lee, Gabriel Grand, Muxin Liu, Winson Cheng, Archit Sharma, and Noah Goodman. Stream of search (sos): Learning to search in language. In First Conference on Language Modeling, July 2024. URL https://openreview.net/pdf?id=2cop2jmQVL. +[194] Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars. arXiv preprint arXiv:2503.01307, 2025. +[195] Bofei Gao, Zefan Cai, Runxin Xu, Peiyi Wang, Ce Zheng, Runji Lin, Keming Lu, Junyang Lin, Chang Zhou, Tianyu Liu, and Baobao Chang. The reason behind good or bad: Towards a better mathematical verifier with natural language feedback, 2024. +[196] Bofei Gao, Zefan Cai, Runxin Xu, Peiyi Wang, Ce Zheng, Runji Lin, Keming Lu, Dayiheng Liu, Chang Zhou, Wen Xiao, et al. Llm critics help catch bugs in mathematics: Towards a better mathematical verifier with natural language feedback. arXiv preprint arXiv:2406.14024, 2024. +[197] Jiaxuan Gao, Shusheng Xu, Wenjie Ye, Weilin Liu, Chuyi He, Wei Fu, Zhiyu Mei, Guangju Wang, and Yi Wu. On designing effective rl reward at training time for llm reasoning. arXiv preprint arXiv:2410.15115, 2024. +[198] Luyu Gao, Aman Madaan, Shuyan Zhou, Uri Alon, Pengfei Liu, Yiming Yang, Jamie Callan, and Graham Neubig. PAL: Program-aided language models. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett, editors, Proceedings of the 40th International Conference on Machine Learning, volume 202 of Proceedings of Machine Learning Research, pages 10764–10799. PMLR, 23–29 Jul 2023. URL https://proceedings.mlr.press/v202/gao23f.html. +[199] Silin Gao, Antoine Bosselut, Samy Bengio, and Emmanuel Abbe. Augmenting llms' reasoning by reinforcing abstract thinking. arXiv preprint arXiv:2506.07751, 2025. +[200] Tianchen Gao, Jiashun Jin, Zheng Tracy Ke, and Gabriel Moryoussef. A comparison of deepseek and other llms. arXiv preprint arXiv:2502.03688, 2025. +[201] Zitian Gao, Boye Niu, Xuzheng He, Haotian Xu, Hongzhang Liu, Aiwei Liu, Xuming Hu, and Lijie Wen. Interpretable contrastive monte carlo tree search reasoning. arXiv preprint arXiv:2410.01707, 2024. +[202] Yuyao Ge, Shenghua Liu, Yiwei Wang, Lingrui Mei, Lizhe Chen, Baolong Bi, and Xueqi Cheng. Innate reasoning is not enough: In-context learning enhances reasoning large language models with less overthinking. arXiv preprint arXiv:2503.19602, 2025. +[203] Jonas Gehring, Kunhao Zheng, Jade Copet, Vegard Mella, Taco Cohen, and Gabriel Synnaeve. Rlef: Grounding code llms in execution feedback with reinforcement learning. arXiv preprint arXiv:2410.02089, 2024. +[204] Jonas Geiping, Sean McLeish, Neel Jain, John Kirchenbauer, Siddharth Singh, Brian R Bartoldson, Bhavya Kailkhura, Abhinav Bhatele, and Tom Goldstein. Scaling up test-time compute with latent reasoning: A recurrent depth approach. arXiv preprint arXiv:2502.05171, 2025. +[205] Gael Gendron, Qiming Bao, Michael Witbrock, and Gillian Dobbie. Large language models are not strong abstract reasoners. In Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence, IJCAI '24, August 2024. ISBN 978-1-956792-04-1. doi: 10.24963/ijcai.2024/693. URL https://doi.org/10.24963/ijcai.2024/693. +[206] Zelalem Gero, Chandan Singh, Hao Cheng, Tristan Naumann, Michel Galley, Jianfeng Gao, and Hoifung Poon. Self-verification improves few-shot clinical information extraction. In ICML 3rd Workshop on Interpretable Machine Learning in Healthcare (IMLH), June 2023. URL https://openreview.net/forum?id=SBbJICrg1S. +[207] Akash Ghosh, Debayan Datta, Sriparna Saha, and Chirag Agarwal. The multilingual mind: A survey of multilingual reasoning in language models. arXiv preprint arXiv:2502.09457, 2025. + +[208] Panagiotis Giadikiaroglou, Maria Lymperaiou, Giorgos Filandrianos, and Giorgos Stamou. Puzzle solving using reasoning of large language models: A survey. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 11574–11591, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.646. URL https://aclanthology.org/2024.emnlp-main.646/. +[209] Alexi Gladstone, Ganesh Nanduru, Md Mofijul Islam, Peixuan Han, Hyeonjeong Ha, Aman Chadha, Yilun Du, Heng Ji, Jundong Li, and Tariq Iqbal. Energy-based transformers are scalable learners and thinkers. arXiv preprint arXiv:2507.02092, 2025. +[210] Elliot Glazer, Ege Erdil, Tamay Besiroglu, Diego Chicharro, Evan Chen, Alex Gunning, Caroline Falkman Olsson, Jean-Stanislas Denain, Anson Ho, Emily de Oliveira Santos, et al. Frontiermath: A benchmark for evaluating advanced mathematical reasoning in ai. arXiv preprint arXiv:2411.04872, 2024. +[211] Team GLM, Aohan Zeng, Bin Xu, Bowen Wang, Chenhui Zhang, Da Yin, Dan Zhang, Diego Rojas, Guanyu Feng, Hanlin Zhao, et al. Chatglm: A family of large language models from glm-130b to glm-4 all tools. arXiv preprint arXiv:2406.12793, 2024. +[212] Olga Golovneva, Moya Peng Chen, Spencer Poff, Martin Corredor, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. ROSCOE: A suite of metrics for scoring step-by-step reasoning. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=xYlJRpzZtsY. +[213] Olga Golovneva, Sean O'Brien, Ramakanth Pasunuru, Tianlu Wang, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. PATHFINDER: Guided search over multi-step reasoning paths. In R0-FoMo: Robustness of Few-shot and Zero-shot Learning in Large Foundation Models, December 2023. URL https://openreview.net/forum?id=5TsfEEwRsu. +[214] Ruihan Gong, Yue Liu, Wenjie Qu, Mingzhe Du, Yufei He, Yingwei Ma, Yulin Chen, Xiang Liu, Yi Wen, Xinfeng Li, et al. Efficient reasoning via chain of unconscious thought. arXiv preprint arXiv:2505.19756, 2025. +[215] Juraj Gottweis, Wei-Hung Weng, Alexander Daryin, Tao Tu, Anil Palepu, Petar Sirkovic, Artiom Myaskovsky, Felix Weissenberger, Keran Rong, Ryutaro Tanno, et al. Towards an ai co-scientist. arXiv preprint arXiv:2502.18864, 2025. +[216] Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Nan Duan, and Weizhu Chen. Critic: Large language models can self-correct with tool-interactive critiquing. arXiv preprint arXiv:2305.11738, 2023. +[217] Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Minlie Huang, Nan Duan, and Weizhu Chen. Tora: A tool-integrated reasoning agent for mathematical problem solving. arXiv preprint arXiv:2309.17452, 2023. +[218] Julia Grosse, Ruotian Wu, Ahmad Rashid, Philipp Hennig, Pascal Poupart, and Agustinus Kristiadi. Uncertainty-guided optimization on large language model search trees. arXiv preprint arXiv:2407.03951, 2024. +[219] Yanggan Gu, Junzhuo Li, Sirui Huang, Xin Zou, Zhenghua Li, and Xuming Hu. Capturing nuanced preferences: Preference-aligned distillation for small language models. arXiv preprint arXiv:2502.14272, 2025. +[220] Xinyan Guan, Yanjiang Liu, Xinyu Lu, Boxi Cao, Ben He, Xianpei Han, Le Sun, Jie Lou, Bowen Yu, Yaojie Lu, et al. Search, verify and feedback: Towards next generation post-training paradigm of foundation models via verifier engineering. arXiv preprint arXiv:2411.11504, 2024. +[221] Xinyan Guan, Jiali Zeng, Fandong Meng, Chunlei Xin, Yaojie Lu, Hongyu Lin, Xianpei Han, Le Sun, and Jie Zhou. Deep Learning: Thinking to retrieve step by step for large language models. arXiv preprint arXiv:2502.01142, 2025. +[222] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025. + +[223] Etash Guha, Ryan Marten, Sedrick Keh, Negin Raoof, Georgios Smyrnis, Hritik Bansal, Marianna Nezhurina, Jean Mercat, Trung Vu, Zayne Sprague, et al. Openthoughts: Data recipes for reasoning models. arXiv preprint arXiv:2506.04178, 2025. +[224] Aryan Gulati, Brando Miranda, Eric Chen, Emily Xia, Kai Fronsdal, Bruno de Moraes Dumont, and Sanmi Koyejo. Putnam-AXIOM: A functional and static benchmark for measuring higher level mathematical reasoning. In The 4th Workshop on Mathematical Reasoning and AI at NeurIPS'24, 2024. URL https://openreview.net/forum?id=YXnwlZe0yf. +[225] Caglar Gulcehre, Tom Le Paine, Srivatsan Srinivasan, Ksenia Konyushkova, Lotte Weerts, Abhishek Sharma, Aditya Siddhant, Alex Ahern, Miaosen Wang, Chenjie Gu, et al. Reinforced self-training (rest) for language modeling. arXiv preprint arXiv:2308.08998, 2023. +[226] Daya Guo, Qihao Zhu, Dejian Yang, Zhenda Xie, Kai Dong, Wentao Zhang, Guanting Chen, Xiao Bi, Yu Wu, YK Li, et al. Deepseek-coder: When the large language model meets programming-the rise of code intelligence. arXiv preprint arXiv:2401.14196, 2024. +[227] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. +[228] Honglin Guo, Kai Lv, Qipeng Guo, Tianyi Liang, Zhiheng Xi, Demin Song, Qiuyinzhe Zhang, Yu Sun, Kai Chen, Xipeng Qiu, et al. Critiq: Mining data quality criteria from human preferences. arXiv preprint arXiv:2502.19279, 2025. +[229] Kehan Guo, Bozhao Nan, Yujun Zhou, Taicheng Guo, Zhichun Guo, Mihir Surve, Zhenwen Liang, Nitesh V Chawla, Olaf Wiest, and Xiangliang Zhang. Can LLMs solve molecule puzzles? a multimodal benchmark for molecular structure elucidation. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=t1mAxb4Cop. +[230] Ziyu Guo, Renrui Zhang, Chengzhuo Tong, Zhizheng Zhao, Peng Gao, Hongsheng Li, and Pheng-Ann Heng. Can we generate images with cot? let's verify and reinforce image generation step by step. arXiv preprint arXiv:2501.13926, 2025. +[231] Dongge Han, Menglin Xia, Daniel Madrigal Diaz, Samuel Kessler, Ankur Mallick, Xuchao Zhang, Mirian Del Carmen Hipolito Garcia, Jin Xu, Victor Ruhle, and Saravan Rajmohan. Enhancing reasoning capabilities of small language models with blueprints and prompt template search. arXiv preprint arXiv:2506.08669, 2025. +[232] Tingxu Han, Chunrong Fang, Shiyu Zhao, Shiqing Ma, Zhenyu Chen, and Zhenting Wang. Token-budget-aware lIm reasoning. arXiv preprint arXiv:2412.18547, 2024. +[233] Michael Hanna, Ollie Liu, and Alexandre Variengien. How does GPT-2 compute greater-than?: Interpreting mathematical abilities in a pre-trained language model. September 2023. URL https://openreview.net/forum?id=p4PckNQR8k. +[234] Shibo Hao, Yi Gu, Haodi Ma, Joshua Hong, Zhen Wang, Daisy Wang, and Zhiting Hu. Reasoning with language model is planning with world model. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 8154-8173, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.507. URL https://aclanthology.org/2023.emnlp-main.507/. +[235] Shibo Hao, Yi Gu, Haotian Luo, Tianyang Liu, Xiyan Shao, Xinyuan Wang, Shuhua Xie, Haodi Ma, Adithya Samavedhi, Qiyue Gao, Zhen Wang, and Zhiting Hu. LLM reasoners: New evaluation, library, and analysis of step-by-step reasoning with large language models. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=b0y6fbSUG0. +[236] Shibo Hao, Sainbayar Sukhbaatar, DiJia Su, Xian Li, Zhiting Hu, Jason Weston, and Yuandong Tian. Training large language models to reason in a continuous latent space. arXiv preprint arXiv:2412.06769, 2024. +[237] Yunzhuo Hao, Jiawei Gu, Huichen Will Wang, Linjie Li, Zhengyuan Yang, Lijuan Wang, and Yu Cheng. Can mllms reason in multimodality? emma: An enhanced multimodal reasoning benchmark. arXiv preprint arXiv:2501.05444, 2025. + +[238] Alexander Havrilla, Sharath Chandra Raparthy, Christoforos Nalmpantis, Jane Dwivedi-Yu, Maksym Zhuravinskyi, Eric Hambro, and Roberta Raileanu. GLOre: When, where, and how to improve LLM reasoning via global and local refinements. In *Forty-first International Conference on Machine Learning*, May 2024. URL https://openreview.net/forum?id=LH6R06NxdB. +[239] Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, Jie Liu, Lei Qi, Zhiyuan Liu, and Maosong Sun. OlympiadBench: A challenging benchmark for promoting AGI with olympiad-level bilingual multimodal scientific problems. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 3828–3850, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.211. URL https://aclanthology.org/2024.acl-long.211/. +[240] Chengbo He, Bochao Zou, Xin Li, Jiansheng Chen, Junliang Xing, and Huimin Ma. Enhancing llm reasoning with multi-path collaborative reactive and reflection agents. arXiv preprint arXiv:2501.00430, 2024. +[241] Feng He, Zijun Chen, Xinnian Liang, Tingting Ma, Yunqi Qiu, Shuangzhi Wu, and Junchi Yan. Protoreasoning: Prototypes as the foundation for generalizable reasoning in llms. arXiv preprint arXiv:2506.15211, 2025. +[242] Jujie He, Jiacai Liu, Chris Yuhao Liu, Rui Yan, Chaojie Wang, Peng Cheng, Xiaoyu Zhang, Fuxiang Zhang, Jiacheng Xu, Wei Shen, Siyuan Li, Liang Zeng, Tianwen Wei, Cheng Cheng, Bo An, Yang Liu, and Yahui Zhou. Skywork open reasoner series. https://capricious-hydrogen-41c.notion.site/Skywork-Open-Reaonser-Series-1d0bc9ae823a80459b46c149e4f51680, 2025. Note Blog. +[243] Junda He, Jieke Shi, Terry Yue Zhuo, Christoph Treude, Jiamou Sun, Zhenchang Xing, Xiaoning Du, and David Lo. From code to courtroom: Llms as the new software judges. arXiv preprint arXiv:2503.02246, 2025. +[244] Kang He and Kaushik Roy. Logictree: Structured proof exploration for coherent and rigorous logical reasoning with large language models. arXiv preprint arXiv:2504.14089, 2025. +[245] Mingqian He, Yongliang Shen, Wenqi Zhang, Zeqi Tan, and Weiming Lu. Advancing process verification for large language models via tree-based preference learning. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 2086-2099, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.125. URL https://aclanthology.org/2024.emnlp-main.125/. +[246] Qiangqiang He, Shuwei Qian, Jie Zhang, and Chongjun Wang. Inference retrieval-augmented multi-modal chain-of-thoughts reasoning for language models. In ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5, 2025. doi: 10.1109/ICASSP49660.2025.10888701. URL https://openreview.net/pdf/9a7e7a9787d14ac8302215f8e4ef959606b78a94.pdf. +[247] Shenghua He, Tian Xia, Xuan Zhou, and Hui Wei. Response-level rewards are all you need for online reinforcement learning in llms: A mathematical perspective. arXiv preprint arXiv:2506.02553, 2025. +[248] Tao He, Hao Li, Jingchang Chen, Runxuan Liu, Yixin Cao, Lizi Liao, Zihao Zheng, Zheng Chu, Jiafeng Liang, Ming Liu, et al. A survey on complex reasoning of large language models through the lens of self-evolution. February 2025. +[249] Xingyang He, Xiao Ling, and Jie Liu. Smartthinker: Learning to compress and preserve reasoning by step-level length control. arXiv preprint arXiv:2507.04348, 2025. +[250] Yancheng He, Shilong Li, Jiaheng Liu, Weixun Wang, Xingyuan Bu, Ge Zhang, Zhongyuan Peng, Zhaoxiang Zhang, Wenbo Su, and Bo Zheng. Can large language models detect errors in long chain-of-thought reasoning? arXiv preprint arXiv:2502.19361, 2025. +[251] Yang He, Xiao Ding, Bibo Cai, Yufei Zhang, Kai Xiong, Zhouhao Sun, Bing Qin, and Ting Liu. Self-route: Automatic mode switching via capability estimation for efficient reasoning. arXiv preprint arXiv:2505.20664, 2025. + +[252] Zhitao He, Sandeep Polisetty, Zhiyuan Fan, Yuchen Huang, Shujin Wu, et al. Mmboundary: Advancing mllm knowledge boundary awareness through reasoning step confidence calibration. arXiv preprint arXiv:2505.23224, 2025. +[253] Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the MATH dataset. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2), October 2021. URL https://openreview.net/forum?id=7Bywt2mQsCe. +[254] Alex Heyman and Joel Zylberberg. Evaluating the systematic reasoning abilities of large language models through graph coloring. arXiv preprint arXiv:2502.07087, 2025. +[255] Alex Heyman and Joel Zylberberg. Reasoning large language model errors arise from hallucinating critical problem features. arXiv preprint arXiv:2505.12151, 2025. +[256] Namgyu Ho, Laura Schmid, and Se-Young Yun. Large language models are reasoning teachers. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 14852–14882, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.830. URL https://aclanthology.org/2023.acl-long.830/. +[257] Andreas Hochlehnert, Hardik Bhatnagar, Vishaal Udandarao, Samuel Albanie, Ameya Prabhu, and Matthias Bethge. A sober look at progress in language model reasoning: Pitfalls and paths to reproducibility. arXiv preprint arXiv:2504.07086, 2025. +[258] Matthew Douglas Hoffman, Du Phan, david dohan, Sholto Douglas, Tuan Anh Le, Aaron T Parisi, Pavel Sountsov, Charles Sutton, Sharad Vikram, and Rif A. Saurous. Training chain-of-thought via latent-variable inference. In Thirty-seventh Conference on Neural Information Processing Systems, September 2023. URL https://openreview.net/forum?id=a147pIS2Co. +[259] Ruixin Hong, Xinyu Pang, and Changshui Zhang. Advances in reasoning by prompting large language models: A survey. Cybernetics and Intelligence, pages 1-15, 2024. doi: 10.26599/CAI.2024.9390004. +[260] Wenyi Hong, Weihan Wang, Qingsong Lv, Jiazheng Xu, Wenmeng Yu, Junhui Ji, Yan Wang, Zihan Wang, Yuxiao Dong, Ming Ding, and Jie Tang. Cogagent: A visual language model for gui agents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14281-14290, June 2024. URL https://openaccess.thecvf.com/content/CVPR2024/papers/Hong_CogAgent_A_Visual_Vocabulary_model_for_GUI_Agents_CVPR_2024_paper.pdf. +[261] Arian Hosseini, Alessandro Sordoni, Daniel Kenji Toyama, Aaron Courville, and Rishabh Agarwal. Not all LLM reasoners are created equal. In The First Workshop on System-2 Reasoning at Scale, NeurIPS'24, October 2024. URL https://openreview.net/forum?id=aPAWbip1xV. +[262] Arian Hosseini, Xingdi Yuan, Nikolay Malkin, Aaron Courville, Alessandro Sordoni, and Rishabh Agarwal. V-STar: Training verifiers for self-taught reasoners. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=stmqBSW2dV. +[263] Bairu Hou, Yang Zhang, Jiabao Ji, Yujuan Liu, Kaizhi Qian, Jacob Andreas, and Shiyu Chang. Thinkprune: Pruning long chain-of-thought of llms via reinforcement learning. arXiv preprint arXiv:2504.01296, 2025. +[264] Zhenyu Hou, Xin Lv, Rui Lu, Jiajie Zhang, Yujiang Li, Zijun Yao, Juanzi Li, Jie Tang, and Yuxiao Dong. Advancing language model reasoning through reinforcement learning and inference scaling. arXiv preprint arXiv:2501.11651, 2025. +[265] Jian Hu. Reinforce++: A simple and efficient approach for aligning large language models. arXiv preprint arXiv:2501.03262, 2025. +[266] Jian Hu, Xibin Wu, Zilin Zhu, Xianyu, Weixun Wang, Dehao Zhang, and Yu Cao. Openrlhf: An easy-to-use, scalable and high-performance rlhf framework. arXiv preprint arXiv:2405.11143, 2024. + +[267] Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. Open-reasoner-zero: An open source approach to scaling reinforcement learning on the base model. https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero, February 2025. +[268] Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, Xiangyu Zhang, and Heung-Yeung Shum. Open-reasoner-zero: An open source approach to scaling up reinforcement learning on the base model. arXiv preprint arXiv:2503.24290, 2025. +[269] Mengkang Hu, Tianxing Chen, Qiguang Chen, Yao Mu, Wenqi Shao, and Ping Luo. Hiagent: Hierarchical working memory management for solving long-horizon agent tasks with large language model. arXiv preprint arXiv:2408.09559, 2024. +[270] Mengkang Hu, Yao Mu, Xinmiao Chelsey Yu, Mingyu Ding, Shiguang Wu, Wenqi Shao, Qiguang Chen, Bin Wang, Yu Qiao, and Ping Luo. Tree-planner: Efficient close-loop task planning with large language models. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=Glcsg6zOe. +[271] Mengkang Hu, Pu Zhao, Can Xu, Qingfeng Sun, Jianguang Lou, Qingwei Lin, Ping Luo, and Saravan Rajmohan. Agentgen: Enhancing planning abilities for large language model based agent via environment and task generation. arXiv preprint arXiv:2408.00764, 2024. +[272] Mengkang Hu, Tianxing Chen, Yude Zou, Yuheng Lei, Qiguang Chen, Ming Li, Hongyuan Zhang, Wenqi Shao, and Ping Luo. Text2world: Benchmarking large language models for symbolic world model generation. arXiv preprint arXiv:2502.13092, 2025. +[273] Mengkang Hu, Yuhang Zhou, Wendong Fan, Yuzhou Nie, Bowei Xia, Tao Sun, Ziyu Ye, Zhaoxuan Jin, Yingru Li, Qiguang Chen, et al. Owl: Optimized workforce learning for general multi-agent assistance in real-world task automation. arXiv preprint arXiv:2505.23885, 2025. +[274] Renjun Hu, Yi Cheng, Libin Meng, Jiaxin Xia, Yi Zong, Xing Shi, and Wei Lin. Training an llm-as-a-judge model: Pipeline, insights, and practical lessons. arXiv preprint arXiv:2502.02988, 2025. +[275] Zhiyuan Hu, Chumin Liu, Xidong Feng, Yilun Zhao, See-Kiong Ng, Anh Tuan Luu, Junxian He, Pang Wei Koh, and Bryan Hooi. Uncertainty of thoughts: Uncertainty-aware planning enhances information seeking in large language models. In ICLR 2024 Workshop on Large Language Model (LLM) Agents, March 2024. URL https://openreview.net/forum?id=ZWyLjimciT. +[276] Maggie Huan, Yuetai Li, Tuney Zheng, Xiaoyu Xu, Seungone Kim, Minxin Du, Radha Poovendran, Graham Neubig, and Xiang Yue. Does math reasoning improve general llm capabilities? understanding transferability of llm reasoning. arXiv preprint arXiv:2507.00432, 2025. +[277] Chenghua Huang, Lu Wang, Fangkai Yang, Pu Zhao, Zhixu Li, Qingwei Lin, Dongmei Zhang, Saravan Rajmohan, and Qi Zhang. Lean and mean: Decoupled value policy optimization with global value guidance. arXiv preprint arXiv:2502.16944, 2025. +[278] Chengsong Huang, Langlin Huang, Jixuan Leng, Jiacheng Liu, and Jiaxin Huang. Efficient test-time scaling via self-calibration. arXiv preprint arXiv:2503.00031, 2025. +[279] Chengyu Huang, Zhengxin Zhang, and Claire Cardie. Hapo: Training language models to reason concisely via history-aware policy optimization. arXiv preprint arXiv:2505.11225, 2025. +[280] Haiduo Huang, Fuwei Yang, Zhenhua Liu, Yixing Xu, Jinze Li, Yang Liu, Xuanwu Yin, Dong Li, Pengju Ren, and Emad Barsoum. Jakiro: Boosting speculative decoding with decoupled multi-head via moe. arXiv preprint arXiv:2502.06282, 2025. +[281] Haoyang Huang, Tianyi Tang, Dongdong Zhang, Xin Zhao, Ting Song, Yan Xia, and Furu Wei. Not all languages are created equal in LLMs: Improving multilingual capability by cross-lingual-thought prompting. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 12365–12394, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.826. URL https://aclanthology.org/2023-findings-emnlp.826/. + +[282] Hui Huang, Yancheng He, Hongli Zhou, Rui Zhang, Wei Liu, Weixun Wang, Wenbo Su, Bo Zheng, and Jiaheng Liu. Think-j: Learning to think for generative llm-as-a-judge. arXiv preprint arXiv:2505.14268, 2025. +[283] Jen-tse Huang, Eric John Li, Man Ho Lam, Tian Liang, Wenxuan Wang, Youliang Yuan, Wenxiang Jiao, Xing Wang, Zhaopeng Tu, and Michael R Lyu. How far are we on the decision-making of llms? evaluating llms' gaming ability in multi-agent environments. arXiv preprint arXiv:2403.11807, 2024. +[284] Jiaxing Huang and Jingyi Zhang. A survey on evaluation of multimodal large language models. arXiv preprint arXiv:2408.15769, 2024. +[285] Jie Huang and Kevin Chen-Chuan Chang. Towards reasoning in large language models: A survey. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Findings of the Association for Computational Linguistics: ACL 2023, pages 1049–1065, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.67. URL https://aclanthology.org/2023-findings-acl.67/. +[286] Jie Huang, Xinyun Chen, Swaroop Mishra, Huaixiu Steven Zheng, Adams Wei Yu, Xinying Song, and Denny Zhou. Large language models cannot self-correct reasoning yet. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=IkmD3fKBPQ. +[287] Jinyang Huang, Xiachong Feng, Qiguang Chen, Hanjie Zhao, Zihui Cheng, Jiesong Bai, Jingxuan Zhou, Min Li, and Libo Qin. Mldebugging: Towards benchmarking code debugging across multi-library scenarios. arXiv preprint arXiv:2506.13824, 2025. +[288] Kaixuan Huang, Jiacheng Guo, Zihao Li, Xiang Ji, Jiawei Ge, Wenzhe Li, Yingqing Guo, Tianle Cai, Hui Yuan, Runzhe Wang, et al. Math-perturb: Benchmarking llms' math reasoning abilities against hard perturbations. arXiv preprint arXiv:2502.06453, 2025. +[289] Lei Huang, Xiaocheng Feng, Weitao Ma, Liang Zhao, Yuchun Fan, Weihong Zhong, Dongliang Xu, Qing Yang, Hongtao Liu, and Bing Qin. Advancing large language model attribution through self-improving. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 3822-3836, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.223. URL https://aclanthology.org/2024.emnlp-main.223/. +[290] Shijue Huang, Hongru Wang, Wanjun Zhong, Zhaochen Su, Jiazhan Feng, Bowen Cao, and Yi R Fung. Adactrl: Towards adaptive and controllable reasoning via difficulty-aware budgeting. arXiv preprint arXiv:2505.18822, 2025. +[291] Shulin Huang, Linyi Yang, Yan Song, Shuang Chen, Leyang Cui, Ziyu Wan, Qingcheng Zeng, Ying Wen, Kun Shao, Weinan Zhang, et al. Thinkbench: Dynamic out-of-distribution evaluation for robust llm reasoning. arXiv preprint arXiv:2502.16268, 2025. +[292] Tiansheng Huang, Sihao Hu, Fatih Ilhan, Selim Furkan Tekin, Zachary Yahn, Yichang Xu, and Ling Liu. Safety tax: Safety alignment makes your large reasoning models less reasonable. arXiv preprint arXiv:2503.00555, 2025. +[293] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025. +[294] Xiaoke Huang, Juncheng Wu, Hui Liu, Xianfeng Tang, and Yuyin Zhou. m1: Unleash the potential of test-time scaling for medical reasoning with large language models. arXiv preprint arXiv:2504.00869, 2025. +[295] Yiming Huang, Xiao Liu, Yeyun Gong, Zhibin Gou, Yelong Shen, Nan Duan, and Weizhu Chen. Key-point-driven data synthesis with its enhancement on mathematical reasoning. arXiv preprint arXiv:2403.02333, 2024. +[296] Yuzhen Huang, Weihao Zeng, Xingshan Zeng, Qi Zhu, and Junxian He. Pitfalls of rule-and model-based verifiers-a case study on mathematical reasoning. arXiv preprint arXiv:2505.22203, 2025. +[297] Zeyu Huang, Tianhao Cheng, Zihan Qiu, Zili Wang, Yinghui Xu, Edoardo M Ponti, and Ivan Titov. Blending supervised and reinforcement fine-tuning with prefix sampling. arXiv preprint arXiv:2507.01679, 2025. + +[298] Zhen Huang, Zengzhi Wang, Shijie Xia, Xuefeng Li, Haoyang Zou, Ruijie Xu, Run-Ze Fan, Lyumanshan Ye, Ethan Chern, Yixin Ye, Yikai Zhang, Yuqing Yang, Ting Wu, Binjie Wang, Shichao Sun, Yang Xiao, Yiyuan Li, Fan Zhou, Steffi Chern, Yiwei Qin, Yan Ma, Jiadi Su, Yixiu Liu, Yuxiang Zheng, Shaoting Zhang, Dahua Lin, Yu Qiao, and Pengfei Liu. Olympic: Benchmarking multi-discipline cognitive reasoning for superintelligent AI. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024. URL https://openreview.net/forum?id=ayF8bEKYQy. +[299] Zhen Huang, Haoyang Zou, Xuefeng Li, Yixiu Liu, Yuxiang Zheng, Ethan Chern, Shijie Xia, Yiwei Qin, Weizhe Yuan, and Pengfei Liu. O1 replication journey–part 2: Surpassing o1-preview through simple distillation, big progress or bitter lesson? arXiv preprint arXiv:2411.16489, 2024. +[300] Zhongzhen Huang, Gui Geng, Shengyi Hua, Zhen Huang, Haoyang Zou, Shaoting Zhang, Pengfei Liu, and Xiaofan Zhang. O1 replication journey–part 3: Inference-time scaling for medical reasoning. arXiv preprint arXiv:2501.06458, 2025. +[301] Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2.5-coder technical report. arXiv preprint arXiv:2409.12186, 2024. +[302] Hyeonbin Hwang, Doyoung Kim, Seungone Kim, Seonghyeon Ye, and Minjoon Seo. Self-exlore: Enhancing mathematical reasoning in language models with fine-grained rewards. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 1444-1466, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.78. URL https://aclanthology.org/2024 findings-emnlp.78/. +[303] Shima Imani, Liang Du, and Harsh Shrivastava. Mathprompter: Mathematical reasoning using large language models. 2023. +[304] Md Ashraful Islam, Mohammed Eunus Ali, and Md Rizwan Parvez. Mapcoder: Multi-agent code generation for competitive problem solving. arXiv preprint arXiv:2405.11403, 2024. +[305] Hamish Ivison, Yizhong Wang, Valentina Pyatkin, Nathan Lambert, Matthew Peters, Pradeep Dasigi, Joel Jang, David Wadden, Noah A Smith, Iz Beltagy, et al. Camels in a changing climate: Enhancing lm adaptation with tulu 2, 2023. +[306] Hamish Ivison, Yizhong Wang, Jiacheng Liu, Zeqiu Wu, Valentina Pyatkin, Nathan Lambert, Noah A. Smith, Yejin Choi, and Hannaneh Hajishirzi. Unpacking DPO and PPO: Disentangling best practices for learning from preference feedback. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=JMBWTlazjW. +[307] Aaron Jaech, Adam Kalai, Adam Lerner, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024. +[308] Eeshaan Jain, Johann Wenckstern, Benedikt von Querfurth, and Charlotte Bunne. Test-time view selection for multi-modal decision making. In ICLR 2025 Workshop on Machine Learning for Genomics Explorations, March 2025. URL https://openreview.net/forum?id=aNmZ9s6BZV. +[309] Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. Livecodebench: Holistic and contamination free evaluation of large language models for code. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=chfJJYC3iL. +[310] Sooyoung Jang and Hyung-II Kim. Entropy-aware model initialization for effective exploration in deep reinforcement learning. Sensors, 22(15):5845, 2022. +[311] Ke Ji, Jiahao Xu, Tian Liang, Qiuzhi Liu, Zhiwei He, Xingyu Chen, Xiaoyuan Liu, Zhijie Wang, Junying Chen, Benyou Wang, et al. The first few tokens are all you need: An efficient and effective unsupervised prefix fine-tuning method for reasoning models. arXiv preprint arXiv:2503.02875, 2025. + +[312] Tao Ji, Bin Guo, Yuanbin Wu, Qipeng Guo, Lixing Shen, Zhan Chen, Xipeng Qiu, Qi Zhang, and Tao Gui. Towards economical inference: Enabling deepseek's multi-head latent attention in any transformer-based llms. arXiv preprint arXiv:2502.14837, 2025. +[313] Yichao Ji. A small step towards reproducing openai o1: Progress report on the steiner open source models, October 2024. URL https://medium.com/@peakji/b9a756a00855. +[314] Yixin Ji, Juntao Li, Hai Ye, Kaixin Wu, Jia Xu, Linjian Mo, and Min Zhang. Test-time computing: from system-1 thinking to system-2 thinking. arXiv preprint arXiv:2501.02497, 2025. +[315] Ziwei Ji, Tiezheng Yu, Yan Xu, Nayeon Lee, Etsuko Ishii, and Pascale Fung. Towards mitigating LLM hallucination via self reflection. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 1827-1843, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.123. URL https://aclanthology.org/2023.findings-emnlp.123/. +[316] Boyu Jia, Junzhe Zhang, Huixuan Zhang, and Xiaojun Wan. Exploring and evaluating multimodal knowledge reasoning consistency of multimodal large language models. arXiv preprint arXiv:2503.04801, 2025. +[317] Zeyu Jia, Alexander Rakhlin, and Tengyang Xie. Do we need to verify step by step? rethinking process supervision from a theoretical perspective. arXiv preprint arXiv:2502.10581, 2025. +[318] Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh, Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, Lélio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. Mistral 7b, October 2023. +[319] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024. +[320] Fengqing Jiang, Zhangchen Xu, Yuetai Li, Luyao Niu, Zhen Xiang, Bo Li, Bill Yuchen Lin, and Radha Poovendran. Safechain: Safety of language models with long chain-of-thought reasoning capabilities. arXiv preprint arXiv:2502.12025, 2025. +[321] Huchen Jiang, Yangyang Ma, Chaofan Ding, Kexin Luan, and Xinhan Di. Towards intrinsic self-correction enhancement in monte carlo tree search boosted reasoning via iterative preference learning. arXiv preprint arXiv:2412.17397, 2024. +[322] Jinhao Jiang, Jiayi Chen, Junyi Li, Ruiyang Ren, Shijie Wang, Wayne Xin Zhao, Yang Song, and Tao Zhang. Rag-star: Enhancing deliberative reasoning with retrieval augmented verification and refinement. arXiv preprint arXiv:2412.12881, 2024. +[323] Jinhao Jiang, Zhipeng Chen, Yingqian Min, Jie Chen, Xiaoxue Cheng, Jiapeng Wang, Yiru Tang, Haoxiang Sun, Jia Deng, Wayne Xin Zhao, et al. Technical report: Enhancing llm reasoning with reward-guided tree search. arXiv preprint arXiv:2411.11694, 2024. +[324] Nan Jiang, Ziming Wu, De-Chuan Zhan, Fuming Lai, and Shaobing Lian. Dart: Distilling autoregressive reasoning to silent thought. arXiv preprint arXiv:2506.11752, 2025. +[325] Shuyang Jiang, Yusheng Liao, Zhe Chen, Ya Zhang, Yanfeng Wang, and Yu Wang. Meds 3: Towards medical small language models with self-evolved slow thinking. arXiv preprint arXiv:2501.12051, 2025. +[326] Yuxuan Jiang, Dawei Li, and Frank Ferraro. Drp: Distilled reasoning pruning with skill-aware step decomposition for efficient large reasoning models. arXiv preprint arXiv:2505.13975, 2025. +[327] Carlos E Jimenez, John Yang, Alexander Wettig, Shunyu Yao, Kexin Pei, Ofir Press, and Karthik R Narasimhan. SWE-bench: Can language models resolve real-world github issues? In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=VTF8yNQM66. +[328] Di Jin, Eileen Pan, Nassim Oufattole, Wei-Hung Weng, Hanyi Fang, and Peter Szolovits. What disease does this patient have? a large-scale open domain question answering dataset + +from medical exams. Applied Sciences, 11(14), July 2021. ISSN 2076-3417. doi: 10.3390/app11146421. URL https://www.mdpi.com/2076-3417/11/14/6421. +[329] Mingyu Jin, Weidi Luo, Sitao Cheng, Xinyi Wang, Wenyue Hua, Ruixiang Tang, William Yang Wang, and Yongfeng Zhang. Disentangling memory and reasoning ability in large language models. arXiv preprint arXiv:2411.13504, 2024. +[330] Mingyu Jin, Qinkai Yu, Dong Shu, Haiyan Zhao, Wenyue Hua, Yanda Meng, Yongfeng Zhang, and Mengnan Du. The impact of reasoning step length on large language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 1830–1842, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.108. URL https://aclanthology.org/2024-findings-acl.108/. +[331] Mingyu Jin, Qinkai Yu, Jingyuan Huang, Qingcheng Zeng, Zhenting Wang, Wenyue Hua, Haiyan Zhao, Kai Mei, Yanda Meng, Kaize Ding, Fan Yang, Mengnan Du, and Yongfeng Zhang. Exploring concept depth: How large language models acquire knowledge and concept at different layers? In Owen Rambow, Leo Wanner, Marianna Apidianaki, Hend Al-Khalifa, Barbara Di Eugenio, and Steven Schockaert, editors, Proceedings of the 31st International Conference on Computational Linguistics, pages 558-573, Abu Dhabi, UAE, January 2025. Association for Computational Linguistics. URL https://aclanthology.org/2025.coling-main.37/. +[332] Zhensheng Jin, Xinze Li, Yifan Ji, Chunyi Peng, Zhenghao Liu, Qi Shi, Yukun Yan, Shuo Wang, Furong Peng, and Ge Yu. Recut: Balancing reasoning length and accuracy in llms via stepwise trails and preference optimization. arXiv preprint arXiv:2506.10822, 2025. +[333] Andy L Jones. Scaling scaling laws with board games. arXiv preprint arXiv:2104.03113, 2021. +[334] Cameron R Jones and Benjamin K Bergen. Large language models pass the Turing test. arXiv preprint arXiv:2503.23674, 2025. +[335] Prashank Kadam. Gpt-guided monte carlo tree search for symbolic regression in financial fraud detection. arXiv preprint arXiv:2411.04459, 2024. +[336] Saurav Kadavath, Tom Conerly, Amanda Askell, Tom Henighan, Dawn Drain, Ethan Perez, Nicholas Schiefer, Zac Hatfield-Dodds, Nova DasSarma, Eli Tran-Johnson, et al. Language models (mostly) know what they know. arXiv preprint arXiv:2207.05221, 2022. +[337] Ryo Kamoi, Sarkar Snigdha Sarathi Das, Renze Lou, Jihyun Janice Ahn, Yilun Zhao, Xiaoxin Lu, Nan Zhang, Yusen Zhang, Haoran Ranran Zhang, Sujeeth Reddy Vummanthala, Salika Dave, Shaobo Qin, Arman Cohan, Wenpeng Yin, and Rui Zhang. Evaluating LLMs at detecting errors in LLM responses. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=dnwRScljXr. +[338] Jikun Kang, Xin Zhe Li, Xi Chen, Amirreza Kazemi, Qianyi Sun, Boxing Chen, Dong Li, Xu He, Quan He, Feng Wen, et al. Mindstar: Enhancing math reasoning in pre-trained llms at inference time. arXiv preprint arXiv:2405.16265, 2024. +[339] Liwei Kang, Zirui Zhao, David Hsu, and Wee Sun Lee. On the empirical complexity of reasoning and planning in LLMs. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 2897-2936, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.164. URL https://aclanthology.org/2024-findings-emnlp.164/. +[340] Yu Kang, Xianghui Sun, Liangyu Chen, and Wei Zou. C3ot: Generating shorter chain-of-thought without compromising effectiveness. 39(23):24312-24320, Apr 2025. +[341] Zhewei Kang, Xuandong Zhao, and Dawn Song. Scalable best-of-n selection for large language models via self-certainty. arXiv preprint arXiv:2502.18581, 2025. +[342] Manuj Kant, Sareh Nabi, Manav Kant, Roland Scharrer, Megan Ma, and Marzieh Nabi. Towards robust legal reasoning: Harnessing logical llms in law. arXiv preprint arXiv:2502.17638, 2025. +[343] Mehran Kazemi, Najoung Kim, Deepti Bhatia, Xin Xu, and Deepak Ramachandran. LAM-BADA: Backward chaining for automated reasoning in natural language. In Anna Rogers, + +Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6547-6568, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.361. URL https://aclanthology.org/2023.acl-long.361/. +[344] Amirhossein Kazemnejad, Milad Aghajohari, Eva Portelance, Alessandro Sordoni, Siva Reddy, Aaron Courville, and Nicolas Le Roux. Vineppo: Unlocking rl potential for llm reasoning through refined credit assignment. arXiv preprint arXiv:2410.01679, 2024. +[345] Muhammad Khalifa, Lajanugen Logeswaran, Moontae Lee, Honglak Lee, and Lu Wang. Grace: Discriminator-guided chain-of-thought reasoning. arXiv preprint arXiv:2305.14934, 2023. +[346] Artyom Kharinaev, Viktor Moskvoretskii, Egor Shvetsov, Kseniia Studenikina, Bykov Mikhail, and Evgeny Burnaev. Investigating the impact of quantization methods on the safety and reliability of large language models. arXiv preprint arXiv:2502.15799, 2025. +[347] Hyunwoo Kim, Melanie Sclar, Tan Zhi-Xuan, Lance Ying, Sydney Levine, Yang Liu, Joshua B Tenenbaum, and Yejin Choi. Hypothesis-driven theory-of-mind reasoning for large language models. arXiv preprint arXiv:2502.11881, 2025. +[348] Jiin Kim, Byeongjun Shin, Jinha Chung, and Minsoo Rhu. The cost of dynamic reasoning: Demystifying ai agents and test-time scaling from an ai infrastructure perspective. arXiv preprint arXiv:2506.04301, 2025. +[349] Juno Kim, Denny Wu, Jason Lee, and Taiji Suzuki. Metastable dynamics of chain-of-thought reasoning: Provable benefits of search, rl and distillation. arXiv preprint arXiv:2502.01694, 2025. +[350] Moo Jin Kim, Chelsea Finn, and Percy Liang. Fine-tuning vision-language-action models: Optimizing speed and success. arXiv preprint arXiv:2502.19645, 2025. +[351] Naryeong Kim, Sungmin Kang, Gabin An, and Shin Yoo. Lachesis: Predicting llm inference accuracy using structural properties of reasoning paths. arXiv preprint arXiv:2412.08281, 2024. +[352] Seungone Kim, Se Joo, Doyoung Kim, Joel Jang, Seonghyeon Ye, Jamin Shin, and Minjoon Seo. The CoT collection: Improving zero-shot and few-shot learning of language models via chain-of-thought fine-tuning. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 12685-12708, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.782. URL https://aclanthology.org/2023.emnlp-main.782/. +[353] Seungone Kim, Juyoung Suk, Shayne Longpre, Bill Yuchen Lin, Jamin Shin, Sean Welleck, Graham Neubig, Moontae Lee, Kyungjae Lee, and Minjoon Seo. Prometheus 2: An open source language model specialized in evaluating other language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 4334-4353, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.248. URL https://aclanthology.org/2024.emnlp-main.248/. +[354] Sunnie SY Kim, Jennifer Wortman Vaughan, Q Vera Liao, Tania Lombrozo, and Olga Russakovsky. Fostering appropriate reliance on large language models: The role of explanations, sources, and inconsistencies. arXiv preprint arXiv:2502.08554, 2025. +[355] Jing Yu Koh, Stephen McAleer, Daniel Fried, and Ruslan Salakhutdinov. Tree search for language model agents. arXiv preprint arXiv:2407.01476, 2024. +[356] Deqian Kong, Minglu Zhao, Dehong Xu, Bo Pang, Shu Wang, Edouardo Honig, Zhangzhang Si, Chuan Li, Jianwen Xie, Sirui Xie, et al. Scalable language models with posterior inference of latent thought vectors. arXiv preprint arXiv:2502.01567, 2025. +[357] Abhinav Kumar, Jaechul Roh, Ali Naseh, Marzena Karpinska, Mohit Iyyer, Amir Houmansadr, and Eugene Bagdasarian. Overthinking: Slowdown attacks on reasoning llms. arXiv preprint arXiv:2502.02542, 2025. + +[358] Aviral Kumar, Vincent Zhuang, Rishabh Agarwal, Yi Su, John D Co-Reyes, Avi Singh, Kate Baumli, Shariq Iqbal, Colton Bishop, Rebecca Roelofs, et al. Training language models to self-correct via reinforcement learning. arXiv preprint arXiv:2409.12917, 2024. +[359] Komal Kumar, Tajamul Ashraf, Omkar Thawakar, Rao Muhammad Anwer, Hisham Cholakkal, Mubarak Shah, Ming-Hsuan Yang, Phillip H. S. Torr, Salman Khan, and Fahad Shahbaz Khan. Llm post-training: A deep dive into reasoning large language models, 2025. +[360] Martin Kuo, Jianyi Zhang, Aolin Ding, Qinsi Wang, Louis DiValentin, Yujia Bao, Wei Wei, Da-Cheng Juan, Hai Li, and Yiran Chen. H-cot: Hijacking the chain-of-thought safety reasoning mechanism to jailbreak large reasoning models, including openai o1/o3, deepseek-r1, and gemini 2.0 flash thinking. arXiv preprint arXiv:2502.12893, 2025. +[361] EvolvingLMMs Lab. Open-r1-multimodal. https://github.com/EvolvingLMMs-Lab/open-r1-multimodal, February 2025. +[362] Bespoke Labs. Bespoke-stratos: The unreasonable effectiveness of reasoning distillation. https://www.bespokelabs.ai/blog/bespoke-stratos-the-unreasonable-effectiveness-of-reasoning-distillation, January 2025. Accessed: 2025-01-22. +[363] Inception Labs, Samar Khanna, Siddhant Kharbanda, Shufan Li, Harshit Varma, Eric Wang, Sawyer Birnbaum, Ziyang Luo, Yanis Miraoui, Akash Palrecha, et al. Mercury: Ultra-fast language models based on diffusion. arXiv preprint arXiv:2506.17298, 2025. +[364] Huiyuan Lai, Xiao Zhang, and Malvina Nissim. Multidimensional consistency improves reasoning in language models. arXiv preprint arXiv:2503.02670, 2025. +[365] Xin Lai, Zhuotao Tian, Yukang Chen, Senqiao Yang, Xiangru Peng, and Jiaya Jia. Step-dpo: Step-wise preference optimization for long-chain reasoning of llms. arXiv preprint arXiv:2406.18629, 2024. +[366] Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V. Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, Yuling Gu, Saumya Malik, Victoria Graf, Jena D. Hwang, Jiangjiang Yang, Ronan Le Bras, Oyvind Tafjord, Chris Wilhelm, Luca Soldaini, Noah A. Smith, Yizhong Wang, Pradeep Dasigi, and Hannaneh Hajishirzi. Tulu 3: Pushing frontiers in open language model post-training, 2024. +[367] Nathan Lambert, Valentina Pyatkin, Jacob Morrison, LJ Miranda, Bill Yuchen Lin, Khyathi Chandu, Nouha Dziri, Sachin Kumar, Tom Zick, Yejin Choi, et al. Rewardbench: Evaluating reward models for language modeling. arXiv preprint arXiv:2403.13787, 2024. +[368] Andrew Lampinen, Ishita Dasgupta, Stephanie Chan, Kory Mathewson, Mh Tessler, Antonia Creswell, James McClelland, Jane Wang, and Felix Hill. Can language models learn from explanations in context? In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang, editors, Findings of the Association for Computational Linguistics: EMNLP 2022, pages 537-563, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022-findings-emnlp.38. URL https://aclanthology.org/2022-findings-emnlp.38. +[369] Jack Lanchantin, Angelica Chen, Shehzaad Dhuliawala, Ping Yu, Jason Weston, Sainbayar Sukhbaatar, and Ilia Kulikov. Diverse preference optimization. arXiv preprint arXiv:2501.18101, 2025. +[370] Anh Duc Le, Tu Vu, Nam Le Hai, Nguyen Thi Ngoc Diep, Linh Ngo Van, Trung Le, and Thien Huu Nguyen. Cot2align: Cross-chain of thought distillation via optimal transport alignment for language models with different tokenizers. arXiv preprint arXiv:2502.16806, 2025. +[371] Joshua Ong Jun Leang, Aryo Pradipta Gema, and Shay B Cohen. Comat: Chain of mathematically annotated thought improves mathematical reasoning. arXiv preprint arXiv:2410.10336, 2024. +[372] Joshua Ong Jun Leang, Giwon Hong, Wenda Li, and Shay B Cohen. Theorem prover as a judge for synthetic data generation. arXiv preprint arXiv:2502.13137, 2025. +[373] Byeongchan Lee, Jonghoon Lee, Dongyoung Kim, Jaehyung Kim, and Jinwoo Shin. Collaborative llm inference via planning for efficient reasoning. arXiv preprint arXiv:2506.11578, 2025. + +[374] Hyunseok Lee, Seunghyuk Oh, Jaehyung Kim, Jinwoo Shin, and Jihoon Tack. Revise: Learning to refine at test-time via intrinsic self-verification. arXiv preprint arXiv:2502.14565, 2025. +[375] Jinu Lee and Julia Hockenmaier. Evaluating step-by-step reasoning traces: A survey. arXiv preprint arXiv:2502.12289, 2025. +[376] Jung Hyun Lee, June Yong Yang, Byeongho Heo, Dongyoon Han, and Kang Min Yoo. Token-supervised value models for enhancing mathematical reasoning capabilities of large language models. arXiv preprint arXiv:2407.12863, 2024. +[377] Kuang-Huei Lee, Ian Fischer, Yueh-Hua Wu, Dave Marwood, Shumeet Baluja, Dale Schuurmans, and Xinyun Chen. Evolving deeper llm thinking. arXiv preprint arXiv:2501.09891, 2025. +[378] Lucas Lehnert, Sainbayar Sukhbaatar, DiJia Su, Qinqing Zheng, Paul McVay, Michael Rabbat, and Yuandong Tian. Beyond a*: Better planning with transformers via search dynamics bootstrapping. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=SGoVIC0u0f. +[379] Bin Lei, Yi Zhang, Shan Zuo, Ali Payani, and Caiwen Ding. MACM: Utilizing a multi-agent system for condition mining in solving complex mathematical problems. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=VR2RdSxtzs. +[380] Jixuan Leng, Cassandra A Cohen, Zhixian Zhang, Chenyan Xiong, and William W Cohen. Semi-structured llm reasoners can be rigorously audited. arXiv preprint arXiv:2505.24217, 2025. +[381] Adam Lerer, Hengyuan Hu, Jakob Foerster, and Noam Brown. Improving policies via search in cooperative partially observable games. Proceedings of the AAAI Conference on Artificial Intelligence, 34(05):7187-7194, Apr. 2020. doi: 10.1609/aaai.v34i05.6208. URL https://ojs.aaai.org/index.php/AAAI/article/view/6208. +[382] Belinda Z Li, Been Kim, and Zi Wang. Questbench: Can llms ask the right question to acquire information in reasoning tasks? arXiv preprint arXiv:2503.22674, 2025. +[383] Bingxuan Li, Yiwei Wang, Jiuming Gu, Kai-Wei Chang, and Nanyun Peng. Metal: A multiagent framework for chart generation with test-time scaling. arXiv preprint arXiv:2502.17651, 2025. +[384] Bohan Li, Jiannan Guan, Longxu Dou, Yunlong Feng, Dingzirui Wang, Yang Xu, Enbo Wang, Qiguang Chen, Bichen Wang, Xiao Xu, et al. Can large language models understand you better? an mbti personality detection dataset aligned with population traits. arXiv preprint arXiv:2412.12510, 2024. +[385] Chen Li, Weiqi Wang, Jingcheng Hu, Yixuan Wei, Nanning Zheng, Han Hu, Zheng Zhang, and Houwen Peng. Common 7b language models already possess strong math capabilities. arXiv preprint arXiv:2403.04706, 2024. +[386] Chen Li, Nazhou Liu, and Kai Yang. Adaptive group policy optimization: Towards stable training and token-efficient reasoning. arXiv preprint arXiv:2503.15952, 2025. +[387] Chengpeng Li, Zhengyang Tang, Ziniu Li, Mingfeng Xue, Keqin Bao, Tian Ding, Ruoyu Sun, Benyou Wang, Xiang Wang, Junyang Lin, et al. Cort: Code-integrated reasoning within thinking. arXiv preprint arXiv:2506.09820, 2025. +[388] Chengpeng Li, Mingfeng Xue, Zhenru Zhang, Jiaxi Yang, Beichen Zhang, Xiang Wang, Bowen Yu, Binyuan Hui, Junyang Lin, and Dayiheng Liu. Start: Self-taught reasoner with tools. arXiv preprint arXiv:2503.04625, 2025. +[389] Chengshu Li, Jacky Liang, Andy Zeng, Xinyun Chen, Karol Hausman, Dorsa Sadigh, Sergey Levine, Li Fei-Fei, Fei Xia, and Brian Ichter. Chain of code: Reasoning with a language model-augmented code emulator. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 28259-28277. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/1i24ar.html. + +[390] Chengzhu Li, Wenshan Wu, Huanyu Zhang, Yan Xia, Shaoguang Mao, Li Dong, Ivan Vulic, and Furu Wei. Imagine while reasoning in space: Multimodal visualization-of-thought. arXiv preprint arXiv:2501.07542, 2025. +[391] Cheryl Li, Tianyuan Xu, and Yiwen Guo. Reasoning-as-logic-units: Scaling test-time reasoning in large language models through logic unit alignment. arXiv preprint arXiv:2502.07803, 2025. +[392] Dacheng Li, Shiyi Cao, Chengkun Cao, Xiuyu Li, Shangyin Tan, Kurt Keutzer, Jiarong Xing, Joseph E Gonzalez, and Ion Stoica. S*: Test time scaling for code generation. arXiv preprint arXiv:2502.14382, 2025. +[393] Dacheng Li, Shiyi Cao, Tyler Griggs, Shu Liu, Xiangxi Mo, Shishir G Patil, Matei Zaharia, Joseph E Gonzalez, and Ion Stoica. Llms can easily learn to reason from demonstrations structure, not content, is what matters! arXiv preprint arXiv:2502.07374, 2025. +[394] Dawei Li, Bohan Jiang, Liangjie Huang, Alimohammad Beigi, Chengshuai Zhao, Zhen Tan, Amrita Bhattacharjee, Yuxuan Jiang, Canyu Chen, Tianhao Wu, et al. From generation to judgment: Opportunities and challenges of llm-as-a-judge. arXiv preprint arXiv:2411.16594, 2024. +[395] Gengxu Li, Tingyu Xia, Yi Chang, and Yuan Wu. Length-controlled margin-based preference optimization without reference model. arXiv preprint arXiv:2502.14643, 2025. +[396] Haitao Li, Qian Dong, Junjie Chen, Huixue Su, Yujia Zhou, Qingyao Ai, Ziyi Ye, and Yiqun Liu. Llms-as-judges: a comprehensive survey on llm-based evaluation methods. arXiv preprint arXiv:2412.05579, 2024. +[397] Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. Numinamath. https://huggingface.co/AI-MO/NuminaMath-CoT, 2024. +[398] Jia-Nan Li, Jian Guan, Wei Wu, and Rui Yan. Extended inductive reasoning for personalized preference inference from behavioral signals. arXiv preprint arXiv:2505.18071, 2025. +[399] Jiachun Li, Pengfei Cao, Yubo Chen, Jiexin Xu, Huajun Li, Xiaojian Jiang, Kang Liu, and Jun Zhao. Rewarding curse: Analyze and mitigate reward modeling issues for llm reasoning. arXiv preprint arXiv:2503.05188, 2025. +[400] Jierui Li, Hung Le, Yinbo Zhou, Caiming Xiong, Silvio Savarese, and Doyen Sahoo. Codetree: Agent-guided tree search for code generation with large language models. arXiv preprint arXiv:2411.04329, 2024. +[401] Junlong Li, Daya Guo, Dejian Yang, Runxin Xu, Yu Wu, and Junxian He. Codei/o: Condensing reasoning patterns via code input-output prediction. arXiv preprint arXiv:2502.07316, 2025. +[402] Kaixin Li. Verified taco problems. https://huggingface.co/datasets/likaixin/TACO-verified, 2024. URL https://huggingface.co/datasets/likaixin/TACO-verified. +[403] Kechen Li, Wenqi Zhu, Coralia Cartis, Tianbo Ji, and Shiwei Liu. Sos1: O1 and r1-like reasoning llms are sum-of-square solvers. arXiv preprint arXiv:2502.20545, 2025. +[404] Long Li, Weiwen Xu, Jiayan Guo, Ruochen Zhao, Xingxuan Li, Yuqian Yuan, Boqiang Zhang, Yuming Jiang, Yifei Xin, Ronghao Dang, et al. Chain of ideas: Revolutionizing research via novel idea development with llm agents. arXiv preprint arXiv:2410.13185, 2024. +[405] Margaret Li, Sneha Kudugunta, and Luke Zettlemoyer. (mis) fitting: A survey of scaling laws. arXiv preprint arXiv:2502.18969, 2025. +[406] Ming Li, Lichang Chen, Jiuhai Chen, Shwai He, Heng Huang, Jiuming Gu, and Tianyi Zhou. Reflection-tuning: Data recycling improves llm instruction-tuning. arXiv preprint arXiv:2310.11716, 2023. +[407] Ming Li, Yanhong Li, and Tianyi Zhou. What happened in llms layers when trained for fast vs. slow thinking: A gradient perspective. arXiv preprint arXiv:2410.23743, 2024. + +[408] Minzhi Li, Zhengyuan Liu, Shumin Deng, Shafiq Joty, Nancy Chen, and Min-Yen Kan. Dna-eval: Enhancing large language model evaluation through decomposition and aggregation. In Proceedings of the 31st International Conference on Computational Linguistics, pages 2277-2290, January 2025. +[409] Moxin Li, Yuantao Zhang, Wenjie Wang, Wentao Shi, Zhuo Liu, Fuli Feng, and Tat-Seng Chua. Self-improvement towards pareto optimality: Mitigating preference conflicts in multi-objective alignment. arXiv preprint arXiv:2502.14354, 2025. +[410] Peiji Li, Kai Lv, Yunfan Shao, Yichuan Ma, Linyang Li, Xiaqing Zheng, Xipeng Qiu, and Qipeng Guo. Fastmcts: A simple sampling strategy for data synthesis. arXiv preprint arXiv:2502.11476, 2025. +[411] Qingyao Li, Wei Xia, Kounianhua Du, Xinyi Dai, Ruiming Tang, Yasheng Wang, Yong Yu, and Weinan Zhang. Rethinkmcts: Refining erroneous thoughts in monte carlo tree search for code generation. arXiv preprint arXiv:2409.09584, 2024. +[412] Shuangtao Li, Shuaihao Dong, Kexin Luan, Xinhan Di, and Chaofan Ding. Enhancing reasoning through process supervision with monte carlo tree search. In The First Workshop on Neural Reasoning and Mathematical Discovery at AAAI'2025, January 2025. URL https://openreview.net/forum?id=OupEEi1341. +[413] Siheng Li, Zhanhui Zhou, Wai Lam, Chao Yang, and Chaochao Lu. Repo: Replay-enhanced policy optimization. arXiv preprint arXiv:2506.09340, 2025. +[414] Wen-Ding Li, Keya Hu, Carter Larsen, Yuqing Wu, Simon Alford, Caleb Woo, Spencer M Dunn, Hao Tang, Michelangelo Naim, Dat Nguyen, et al. Combining induction and transduction for abstract reasoning. arXiv preprint arXiv:2411.02272, 2024. +[415] Wendi Li and Yixuan Li. Process reward model with q-value rankings. arXiv preprint arXiv:2410.11287, 2024. +[416] Wenjun Li, Changyu Chen, and Pradeep Varakantham. Unlocking large language model's planning capabilities with maximum diversity fine-tuning. arXiv preprint arXiv:2406.10479, 2024. +[417] Xiaonan Li and Xipeng Qiu. MoT: Memory-of-thought enables ChatGPT to self-improve. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 6354-6374, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.392. URL https://aclanthology.org/2023.emnlp-main.392/. +[418] Xiaoxi Li, Guanting Dong, Jiajie Jin, Yuyao Zhang, Yujia Zhou, Yutao Zhu, Peitian Zhang, and Zhicheng Dou. Search-o1: Agentic search-enhanced large reasoning models. arXiv preprint arXiv:2501.05366, 2025. +[419] Xinzhe Li. A survey on llm test-time compute via search: Tasks, llm profiling, search algorithms, and relevant frameworks. arXiv preprint arXiv:2501.10069, 2025. +[420] Xuefeng Li, Haoyang Zou, and Pengfei Liu. Limr: Less is more for rl scaling. arXiv preprint arXiv:2502.11886, 2025. +[421] Yafu Li, Zhilin Wang, Tingchen Fu, Ganqu Cui, Sen Yang, and Yu Cheng. From drafts to answers: Unlocking lIm potential via aggregation fine-tuning. arXiv preprint arXiv:2501.11877, 2025. +[422] Yang Li. Policy guided tree search for enhanced ltm reasoning. arXiv preprint arXiv:2502.06813, 2025. +[423] Yang Li, Dong Du, Linfeng Song, Chen Li, Weikang Wang, Tao Yang, and Haitao Mi. Hunyuanprover: A scalable data synthesis framework and guided tree search for automated theorem proving. arXiv preprint arXiv:2412.20735, 2024. +[424] Yang Li, Youssef Emad, Karthik Padthe, Jack Lanchantin, Weizhe Yuan, Thao Nguyen, Jason Weston, Shang-Wen Li, Dong Wang, Ilia Kulikov, et al. Naturalthoughts: Selecting and distilling reasoning traces for general reasoning tasks. arXiv preprint arXiv:2507.01921, 2025. +[425] Yifei Li, Zeqi Lin, Shizhuo Zhang, Qiang Fu, Bei Chen, Jian-Guang Lou, and Weizhu Chen. Making language models better reasoners with step-aware verifier. In Anna Rogers, Jordan + +Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 5315-5333, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.291. URL https://aclanthology.org/2023.acl-long.291/. +[426] Yiwei Li, Ji Zhang, Shaoxiong Feng, Peiwen Yuan, Xinglin Wang, Jiayi Shi, Yueqi Zhang, Chuyi Tan, Boyuan Pan, Yao Hu, et al. Revisiting self-consistency from dynamic distributional alignment perspective on answer aggregation. arXiv preprint arXiv:2502.19830, 2025. +[427] Yujia Li, David Choi, Junyoung Chung, Nate Kushman, Julian Schrittwieser, Rémi Leblond, Tom Eccles, James Keeling, Felix Gimeno, Agustin Dal Lago, Thomas Hubert, Peter Choy, Cyprien de Masson d'Autume, Igor Babuschkin, Xinyun Chen, Po-Sen Huang, Johannes Welbl, Sven Gowal, Alexey Cherepanov, James Molloy, Daniel Mankowitz, Esme Sutherland Robson, Pushmeet Kohli, Nando de Freitas, Koray Kavukcuoglu, and Oriol Vinyals. Competition-level code generation with alphabet. arXiv preprint arXiv:2203.07814, 2022. +[428] Yunxin Li, Zhenyu Liu, Zitao Li, Xuanyu Zhang, Zhenran Xu, Xinyu Chen, Haoyuan Shi, Shenyuan Jiang, Xintong Wang, Jifang Wang, et al. Perception, reason, think, and plan: A survey on large multimodal reasoning models. arXiv preprint arXiv:2505.04921, 2025. +[429] Zheng Li, Qingxiu Dong, Jingyuan Ma, Di Zhang, and Zhifang Sui. Selfbudgeter: Adaptive token allocation for efficient llm reasoning. arXiv preprint arXiv:2505.11274, 2025. +[430] Zhiyuan Li, Hong Liu, Denny Zhou, and Tengyu Ma. Chain of thought empowers transformers to solve inherently serial problems. In The Twelfth International Conference on Learning Representations, January 2023. URL https://openreview.net/pdf?id=3EWTEy9MTM. +[431] Zhiyuan Li, Dongnan Liu, Chaoyi Zhang, Heng Wang, Tengfei Xue, and Weidong Cai. Enhancing advanced visual reasoning ability of large language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 1915-1929, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.114. URL https://aclanthology.org/2024.emnlp-main.114/. +[432] Zhong-Zhi Li, Duzhen Zhang, Ming-Liang Zhang, Jiaxin Zhang, Zengyan Liu, Yuxuan Yao, Haotian Xu, Junhao Zheng, Pei-Jie Wang, Xiuyi Chen, et al. From system 1 to system 2: A survey of reasoning large language models. arXiv preprint arXiv:2502.17419, 2025. +[433] Zhongzhi Li, Ming-Liang Zhang, Pei-Jie Wang, Jian Xu, Rui-Song Zhang, Yin Fei, Zhi-Long Ji, Jin-Feng Bai, Zhen-Ru Pan, Jiaxin Zhang, and Cheng-Lin Liu. CMMaTH: A Chinese multi-modal math skill evaluation benchmark for foundation models. In Owen Rambow, Leo Wanner, Marianna Apidianaki, Hend Al-Khalifa, Barbara Di Eugenio, and Steven Schockaert, editors, Proceedings of the 31st International Conference on Computational Linguistics, pages 2690–2726, Abu Dhabi, UAE, January 2025. Association for Computational Linguistics. URL https://aclanthology.org/2025.coling-main.184/. +[434] Zhuoqun Li, Haiyang Yu, Xuanang Chen, Hongyu Lin, Yaojie Lu, Fei Huang, Xianpei Han, Yongbin Li, and Le Sun. Deepsolution: Boosting complex engineering solution design via tree-based exploration and bi-point thinking. arXiv preprint arXiv:2502.20730, 2025. +[435] Zichao Li, Xueru Wen, Jie Lou, Yuqiu Ji, Yaojie Lu, Xianpei Han, Debing Zhang, and Le Sun. The devil is in the details: Tackling unimodal spurious correlations for generalizable multimodal reward models. In *Forty-second International Conference on Machine Learning*, 2025. URL https://openreview.net/forum?id=b0qRSUcQP7. +[436] Ziniu Li, Tian Xu, Yushun Zhang, Zhihang Lin, Yang Yu, Ruoyu Sun, and Zhi-Quan Luo. Remax: A simple, effective, and efficient reinforcement learning method for aligning large language models. In *Forty-first International Conference on Machine Learning*, May 2024. URL https://openreview.net/forum?id=Stn8hXkpe6. +[437] Jing Liang, Hongyao Tang, Yi Ma, Jinyi Liu, Yan Zheng, Shuyue Hu, Lei Bai, and Jianye Hao. Squeeze the soaked sponge: Efficient off-policy reinforcement finetuning for large language model. arXiv preprint arXiv:2507.06892, 2025. + +[438] Jintao Liang, Gang Su, Huifeng Lin, You Wu, Rui Zhao, and Ziyue Li. Reasoning rag via system 1 or system 2: A survey on reasoning agentic retrieval-augmented generation for industry challenges. arXiv preprint arXiv:2506.10408, 2025. +[439] Xiao Liang, Zhong-Zhi Li, Yeyun Gong, Yang Wang, Hengyuan Zhang, Yelong Shen, Ying Nian Wu, and Weizhu Chen. Sws: Self-aware weakness-driven problem synthesis in reinforcement learning for llm reasoning. arXiv preprint arXiv:2506.08989, 2025. +[440] Xun Liang, Shichao Song, Zifan Zheng, Hanyu Wang, Qingchen Yu, Xunkai Li, Rong-Hua Li, Yi Wang, Zhonghao Wang, Feiyu Xiong, et al. Internal consistency and self-feedback in large language models: A survey. arXiv preprint arXiv:2407.14507, 2024. +[441] Baohao Liao, Xinyi Chen, Sara Rajaee, Yuhui Xu, Christian Herold, Anders Søgaard, Maarten de Rijke, and Christof Monz. Lost at the beginning of reasoning. arXiv preprint arXiv:2506.22058, 2025. +[442] Baohao Liao, Yuhui Xu, Hanze Dong, Junnan Li, Christof Monz, Silvio Savarese, Doyen Sahoo, and Caiming Xiong. Reward-guided speculative decoding for efficient ltm reasoning. arXiv preprint arXiv:2501.19324, 2025. +[443] Huanxuan Liao, Shizhu He, Yupu Hao, Xiang Li, Yanzhe Zhang, Jun Zhao, and Kang Liu. Skintern: Internalizing symbolic knowledge for distilling better cot capabilities into small language models. In Proceedings of the 31st International Conference on Computational Linguistics, pages 3203-3221, January 2025. URL https://aclanthology.org/2025.coling-main.215.pdf. +[444] Mengqi Liao, Xiangyu Xi, Ruinian Chen, Jia Leng, Yangen Hu, Ke Zeng, Shuai Liu, and Huaiyu Wan. Enhancing efficiency and exploration in reinforcement learning for llms. arXiv preprint arXiv:2505.18573, 2025. +[445] Minpeng Liao, Wei Luo, Chengxi Li, Jing Wu, and Kai Fan. Mario: Math reasoning with code interpreter output-a reproducible pipeline. arXiv preprint arXiv:2401.08190, 2024. +[446] Weibin Liao, Xu Chu, and Yasha Wang. Tpo: Aligning large language models with multi-branch & multi-step preference trees. arXiv preprint arXiv:2410.12854, 2024. +[447] Jonathan Light, Min Cai, Weiqin Chen, Guanzhi Wang, Xiusi Chen, Wei Cheng, Yisong Yue, and Ziniu Hu. Strategist: Learning strategic skills by LLMs via bi-level tree search. In Automated Reinforcement Learning: Exploring Meta-Learning, AutoML, and LLMs, June 2024. URL https://openreview.net/forum?id=UHWbmZuJPF. +[448] Jonathan Light, Yue Wu, Yiyou Sun, Wenchao Yu, Xujiang Zhao, Ziniu Hu, Haifeng Chen, Wei Cheng, et al. Scattered forest search: Smarter code space exploration with llms. arXiv preprint arXiv:2411.05010, 2024. +[449] Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=v8L0pN6EOi. +[450] Bill Yuchen Lin, Ronan Le Bras, Kyle Richardson, Ashish Sabharwal, Radha Poovendran, Peter Clark, and Yejin Choi. Zebralogic: On the scaling limits of lms for logical reasoning. arXiv preprint arXiv:2502.01100, 2025. +[451] Haohan Lin, Zhiqing Sun, Yiming Yang, and Sean Welleck. Lean-star: Learning to interleave thinking and proving. arXiv preprint arXiv:2407.10040, 2024. +[452] Qingwen Lin, Boyan Xu, Guimin Hu, Zijian Li, Zhifeng Hao, Keli Zhang, and Ruichu Cai. Cmcts: A constrained monte carlo tree search framework for mathematical reasoning in large language model. arXiv preprint arXiv:2502.11169, 2025. +[453] Qingwen Lin, Boyan Xu, Zijian Li, Zhifeng Hao, Keli Zhang, and Ruichu Cai. Leveraging constrained monte carlo tree search to generate reliable long chain-of-thought for mathematical reasoning. arXiv preprint arXiv:2502.11169, 2025. +[454] Yen-Ting Lin, Di Jin, Tengyu Xu, Tianhao Wu, Sainbayar Sukhbaatar, Chen Zhu, Yun He, Yun-Nung Chen, Jason Weston, Yuandong Tian, et al. Step-kto: Optimizing mathematical reasoning through stepwise binary feedback. arXiv preprint arXiv:2501.10799, 2025. + +[455] Yujie Lin, Ante Wang, Moye Chen, Jingyao Liu, Hao Liu, Jinsong Su, and Xinyan Xiao. Investigating inference-time scaling for chain of multi-modal thought: A preliminary study. arXiv preprint arXiv:2502.11514, 2025. +[456] Zicheng Lin, Zhibin Gou, Tian Liang, Ruilin Luo, Haowei Liu, and Yujiu Yang. CriticBench: Benchmarking LLMs for critique-correct reasoning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 1552–1587, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.91. URL https://aclanthology.org/2024-findings-acl.91/. +[457] Zicheng Lin, Tian Liang, Jiahao Xu, Xing Wang, Ruilin Luo, Chufan Shi, Siheng Li, Yujiu Yang, and Zhaopeng Tu. Critical tokens matter: Token-level contrastive estimation enhance llm's reasoning capability. arXiv preprint arXiv:2411.19943, 2024. +[458] Zongyu Lin, Yao Tang, Xingcheng Yao, Da Yin, Ziniu Hu, Yizhou Sun, and Kai-Wei Chang. Qlass: Boosting language agent inference via q-guided stepwise search. arXiv preprint arXiv:2502.02584, 2025. +[459] Zehui Ling, Deshu Chen, Hongwei Zhang, Yifeng Jiao, Xin Guo, and Yuan Cheng. Fast on the easy, deep on the hard: Efficient reasoning via powered length penalty. arXiv preprint arXiv:2506.10446, 2025. +[460] Zhan Ling, Yunhao Fang, Xuanlin Li, Zhiao Huang, Mingu Lee, Roland Memisevic, and Hao Su. Deductive verification of chain-of-thought reasoning. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 36407-36433. Curran Associates, Inc., September 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/72393bd47a35f5b3bee4c609e7bba733-Paper-Conference.pdf. +[461] Philip Lippmann and Jie Yang. Style over substance: Distilled language models reason via stylistic replication. arXiv preprint arXiv:2504.01738, 2025. +[462] Aiwei Liu, Haoping Bai, Zhiyun Lu, Xiang Kong, Xiaoming Wang, Jiulong Shan, Meng Cao, and Lijie Wen. Direct large language model alignment through self-rewarding contrastive prompt distillation. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9688–9712, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.523. URL https://aclanthology.org/2024.acl-long.523/. +[463] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024. +[464] Bingbin Liu, Sebastien Bubeck, Ronen Eldan, Janardhan Kulkarni, Yanzhi Li, Anh Nguyen, Rachel Ward, and Yi Zhang. Tinygsm: achieving $>80\%$ on gsm8k with small language models. arXiv preprint arXiv:2312.09241, 2023. +[465] Bo Liu, Leon Guertler, Simon Yu, Zichen Liu, Penghui Qi, Daniel Balcells, Mickel Liu, Cheston Tan, Weiyan Shi, Min Lin, et al. Spiral: Self-play on zero-sum games incentivizes reasoning via multi-agent multi-turn reinforcement learning. arXiv preprint arXiv:2506.24119, 2025. +[466] Chris Yuhao Liu, Liang Zeng, Jiacai Liu, Rui Yan, Jujie He, Chaojie Wang, Shuicheng Yan, Yang Liu, and Yahui Zhou. Skywork-reward: Bag of tricks for reward modeling in llms. arXiv preprint arXiv:2410.18451, 2024. +[467] Chris Yuhao Liu, Liang Zeng, Yuzhen Xiao, Jujie He, Jiacai Liu, Chaojie Wang, Rui Yan, Wei Shen, Fuxiang Zhang, Jiacheng Xu, et al. Skywork-reward-v2: Scaling preference data curation via human-ai synergy. arXiv preprint arXiv:2507.01352, 2025. +[468] Cong Liu, Zhong Wang, ShengYu Shen, Jialiang Peng, Xiaoli Zhang, Zhen-Dong Du, and YaFang Wang. The chinese dataset distilled from deepseek-r1-671b. https://huggingface.co/datasets/Congliu/Chinese-DeepSeek-R1-Distill-data-110k, 2025. + +[469] Dancheng Liu, Amir Nassereldine, Ziming Yang, Chenhui Xu, Yuting Hu, Jiajie Li, Utkarsh Kumar, Changjae Lee, Ruiyang Qin, Yiyu Shi, et al. Large language models have intrinsic self-correction ability. arXiv preprint arXiv:2406.15673, 2024. +[470] Fan Liu, Wenshuo Chao, Naiqiang Tan, and Hao Liu. Bag of tricks for inference-time computation of lIm reasoning. arXiv preprint arXiv:2502.07191, 2025. +[471] Guanlin Liu, Kaixuan Ji, Renjie Zheng, Zheng Wu, Chen Dun, Quanquan Gu, and Lin Yan. Enhancing multi-step reasoning abilities of language models through direct q-function optimization. arXiv preprint arXiv:2410.09302, 2024. +[472] Hanbing Liu, Lang Cao, Yuanyi Ren, Mengyu Zhou, Haoyu Dong, Xiaojun Ma, Shi Han, and Dongmei Zhang. Bingo: Boosting efficient reasoning of llms via dynamic and significance-based reinforcement learning. arXiv preprint arXiv:2506.08125, 2025. +[473] Hanmeng Liu, Zhizhang Fu, Mengru Ding, Ruoxi Ning, Chaoli Zhang, Xiaozhang Liu, and Yue Zhang. Logical reasoning in large language models: A survey. arXiv preprint arXiv:2502.09100, 2025. +[474] Hao Liu, Zhengren Wang, Xi Chen, Zhiyu Li, Feiyu Xiong, Qinhan Yu, and Wentao Zhang. Hoprag: Multi-hop reasoning for logic-aware retrieval-augmented generation. arXiv preprint arXiv:2502.12442, 2025. +[475] Hongxuan Liu, Zhiyao Luo, and Tingting Zhu. Best of both worlds: Harmonizing LLM capabilities in decision-making and question-answering for treatment regimes. In Advances In Medical Foundation Models: Explainability, Robustness, Security, and Beyond, 2024. URL https://openreview.net/forum?id=afu9qhp7md. +[476] Jiacai Liu, Chaojie Wang, Chris Yuhao Liu, Liang Zeng, Rui Yan, Yiwen Sun, Yang Liu, and Yahui Zhou. Improving multi-step reasoning abilities of large language models with direct advantage policy optimization. arXiv preprint arXiv:2412.18279, 2024. +[477] Jiacheng Liu, Andrew Cohen, Ramakanth Pasunuru, Yejin Choi, Hannaneh Hajishirzi, and Asli Celikyilmaz. Don't throw away your value model! generating more preferable text with value-guided monte-carlo tree search decoding. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=kh9Zt2Ldmn. +[478] Jiacheng Liu, Andrew Cohen, Ramakanth Pasunuru, Yejin Choi, Hannaneh Hajishirzi, and Asli Celikyilmaz. Making PPO even better: Value-guided monte-carlo tree search decoding, September 2024. URL https://openreview.net/forum?id=QaODpeRaOK. +[479] Junnan Liu, Hongwei Liu, Linchen Xiao, Shudong Liu, Taolin Zhang, Zihan Ma, Songyang Zhang, and Kai Chen. Deciphering trajectory-aided lIm reasoning: An optimization perspective. arXiv preprint arXiv:2505.19815, 2025. +[480] Junnan Liu, Linhao Luo, Thuy-Trang Vu, and Gholamreza Haffari. Situatedthinker: Grounding llm reasoning with real-world through situated thinking. arXiv preprint arXiv:2505.19300, 2025. +[481] Junteng Liu, Yuanxiang Fan, Zhuo Jiang, Han Ding, Yongyi Hu, Chi Zhang, Yiqi Shi, Shitong Weng, Aili Chen, Shiqi Chen, et al. Synlogic: Synthesizing verifiable reasoning data at scale for learning logical reasoning and beyond. arXiv preprint arXiv:2505.19641, 2025. +[482] Liping Liu, Chunhong Zhang, Likang Wu, Chuang Zhao, Zheng Hu, Ming He, and Jianping Fan. Instruct-of-reflection: Enhancing large language models iterative reflection capabilities via dynamic-meta instruction. arXiv preprint arXiv:2503.00902, 2025. +[483] Mingjie Liu, Shizhe Diao, Ximing Lu, Jian Hu, Xin Dong, Yejin Choi, Jan Kautz, and Yi Dong. Prorl: Prolonged reinforcement learning expands reasoning boundaries in large language models. arXiv preprint arXiv:2505.24864, 2025. +[484] Qiang Liu, Xinlong Chen, Yue Ding, Shizhen Xu, Shu Wu, and Liang Wang. Attention-guided self-reflection for zero-shot hallucination detection in large language models. arXiv preprint arXiv:2501.09997, 2025. +[485] Qin Liu, Wenxuan Zhou, Nan Xu, James Y Huang, Fei Wang, Sheng Zhang, Hoifung Poon, and Muhao Chen. Metascale: Test-time scaling with evolving meta-thoughts. arXiv preprint arXiv:2503.13447, 2025. + +[486] Runze Liu, Junqi Gao, Jian Zhao, Kaiyan Zhang, Xiu Li, Biqing Qi, Wanli Ouyang, and Bowen Zhou. Can 1b llm surpass 405b llm? rethinking compute-optimal test-time scaling. arXiv preprint arXiv:2502.06703, 2025. +[487] Tengxuan Liu, Shiyao Li, Jiayi Yang, Tianchen Zhao, Feng Zhou, Xiaohui Song, Guohao Dai, Shengen Yan, Huazhong Yang, and Yu Wang. Pm-kvq: Progressive mixed-precision kv cache quantization for long-cot llms. arXiv preprint arXiv:2505.18610, 2025. +[488] Wanlong Liu, Junxiao Xu, Fei Yu, Yukang Lin, Ke Ji, Wenyu Chen, Yan Xu, Yasheng Wang, Lifeng Shang, and Benyou Wang. Qfft, question-free fine-tuning for adaptive reasoning. arXiv preprint arXiv:2506.12860, 2025. +[489] Wei Liu, Junlong Li, Xiwen Zhang, Fan Zhou, Yu Cheng, and Junxian He. Diving into self-evolving training for multimodal reasoning. arXiv preprint arXiv:2412.17451, 2024. +[490] Wei Liu, Ruochen Zhou, Yiyun Deng, Yuzhen Huang, Junteng Liu, Yuntian Deng, Yizhe Zhang, and Junxian He. Learn to reason efficiently with adaptive length-based reward shaping. arXiv preprint arXiv:2505.15612, 2025. +[491] Ye Liu, Kevin Qinghong Lin, Chang Wen Chen, and Mike Zheng Shou. Videomind: A chain-of-lora agent for long video reasoning. arXiv preprint arXiv:2503.13444, 2025. +[492] Yongjiang Liu, Haoxi Li, Xiaosong Ma, Jie Zhang, and Song Guo. Think how to think: Mitigating overthinking with autonomous difficulty cognition in large reasoning models. arXiv preprint arXiv:2507.02663, 2025. +[493] Yue Liu, Hongcheng Gao, Shengfang Zhai, Jun Xia, Tianyi Wu, Zhiwei Xue, Yulin Chen, Kenji Kawaguchi, Jiaheng Zhang, and Bryan Hooi. Guardreasoner: Towards reasoning-based llm safeguards. arXiv preprint arXiv:2501.18492, 2025. +[494] Yue Liu, Jiaying Wu, Yufei He, Hongcheng Gao, Hongyu Chen, Baolong Bi, Ruihan Gong, Jiaheng Zhang, Zhiqi Huang, and Bryan Hooi. Efficient inference for large reasoning models: A survey. arXiv preprint arXiv:2503.23077, 2025. +[495] Yuliang Liu, Junjie Lu, Zhaoling Chen, Chaofeng Qu, Jason Klein Liu, Chonghan Liu, Zefan Cai, Yunhui Xia, Li Zhao, Jiang Bian, et al. Adaptivestep: Automatically dividing reasoning step through model confidence. arXiv preprint arXiv:2502.13943, 2025. +[496] Zhaowei Liu, Xin Guo, Fangqi Lou, Lingfeng Zeng, Jinyi Niu, Zixuan Wang, Jiajie Xu, Weige Cai, Ziwei Yang, Xueqian Zhao, et al. Fin-r1: A large language model for financial reasoning through reinforcement learning. arXiv preprint arXiv:2503.16252, 2025. +[497] Zhiyuan Liu, Yuting Zhang, Feng Liu, Changwang Zhang, Ying Sun, and Jun Wang. Othinkmr1: Stimulating multimodal generalized reasoning capabilities through dynamic reinforcement learning. arXiv preprint arXiv:2503.16081, 2025. +[498] Zichen Liu, Changyu Chen, Wenjun Li, Tianyu Pang, Chao Du, and Min Lin. There may not be aha moment in r1-zero-like training — a pilot study. https://oatllm.notion.site/oat-zero, 2025. Notion Blog. +[499] Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783, 2025. +[500] Zihan Liu, Yang Chen, Mohammad Shoeybi, Bryan Catanzaro, and Wei Ping. Acemath: Advancing frontier math reasoning with post-training and reward modeling. arXiv preprint arXiv:2412.15084, 2024. +[501] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025. +[502] Elita Lobo, Chirag Agarwal, and Himabindu Lakkaraju. On the impact of fine-tuning on chain-of-thought reasoning. arXiv preprint arXiv:2411.15382, 2024. +[503] Chenwei Lou, Zewei Sun, Xinnian Liang, Meng Qu, Wei Shen, Wenqi Wang, Yuntao Li, Qingping Yang, and Shuangzhi Wu. Adacot: Pareto-optimal adaptive chain-of-thought triggering via reinforcement learning. arXiv preprint arXiv:2505.11896, 2025. +[504] Dakuan Lu, Xiaoyu Tan, Rui Xu, Tianchu Yao, Chao Qu, Wei Chu, Yinghui Xu, and Yuan Qi. Scp-116k: A high-quality problem-solution dataset and a generalized pipeline for automated extraction in the higher education science domain, 2025. + +[505] Haolang Lu, Yilian Liu, Jingxin Xu, Guoshun Nan, Yuanlong Yu, Zhican Chen, and Kun Wang. Auditing meta-cognitive hallucinations in reasoning large language models. arXiv preprint arXiv:2505.13143, 2025. +[506] Jianqiao Lu, Zhiyang Dou, Hongru WANG, Zeyu Cao, Jianbo Dai, Yunlong Feng, and Zhijiang Guo. Autopsy: Automated process-supervised verifier. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 79935-79962. Curran Associates, Inc., December 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/9246aa822579d9b29a140ecdac36ad60-Paper-Conference.pdf. +[507] Pan Lu, Swaroop Mishra, Tony Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho, editors, Advances in Neural Information Processing Systems, November 2022. URL https://openreview.net/forum?id=HjwK-Tc_Bc. +[508] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=KUNzEQMWU7. +[509] Pan Lu, Bowen Chen, Sheng Liu, Rahul Thapa, Joseph Boen, and James Zou. Octo tools: An agentic framework with extensible tools for complex reasoning. arXiv preprint arXiv:2502.11271, 2025. +[510] Rubing Lu, João Sedoc, and Arun Sundararajan. Reasoning and the trusting behavior of deepseek and gpt: An experiment revealing hidden fault lines in large language models. arXiv preprint arXiv:2502.12825, 2025. +[511] Wenquan Lu, Yuechuan Yang, Kyle Lee, Yanshu Li, and Enqi Liu. Latent chain-of-thought? decoding the depth-recurrent transformer. arXiv preprint arXiv:2507.02199, 2025. +[512] Zhengxi Lu, Yuxiang Chai, Yaxuan Guo, Xi Yin, Liang Liu, Hao Wang, Guanjing Xiong, and Hongsheng Li. Ui-r1: Enhancing action prediction of gui agents by reinforcement learning. arXiv preprint arXiv:2503.21620, 2025. +[513] Zimu Lu, Aojun Zhou, Houxing Ren, Ke Wang, Weikang Shi, Junting Pan, Mingjie Zhan, and Hongsheng Li. Mathgenie: Generating synthetic data with question back-translation for enhancing mathematical reasoning of llms. arXiv preprint arXiv:2402.16352, 2024. +[514] Haipeng Luo, Qingfeng Sun, Can Xu, Pu Zhao, Jianguang Lou, Chongyang Tao, Xiubo Geng, Qingwei Lin, Shifeng Chen, and Dongmei Zhang. Wizardmath: Empowering mathematical reasoning for large language models via reinforced evol-instruct. arXiv preprint arXiv:2308.09583, 2023. +[515] Hanjun Luo, Shenyu Dai, Chiming Ni, Xinfeng Li, Guibin Zhang, Kun Wang, Tongliang Liu, and Hanan Salam. Agent auditor: Human-level safety and security evaluation for lIm agents. arXiv preprint arXiv:2506.00641, 2025. +[516] Haotian Luo, Li Shen, Haiying He, Yibo Wang, Shiwei Liu, Wei Li, Naiqiang Tan, Xiaochun Cao, and Dacheng Tao. O1-pruner: Length-harmonizing fine-tuning for o1-like reasoning pruning. arXiv preprint arXiv:2501.12570, 2025. +[517] Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, et al. Improve mathematical reasoning in language models by automated process supervision. arXiv preprint arXiv:2406.06592, 2024. +[518] Michael Luo, Sijun Tan, Justin Wong, Xiaoxiang Shi, William Y. Tang, Manan Roongta, Colin Cai, Jeffrey Luo, Tianjun Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. Deepscaler: Surpassing o1-preview with a 1.5b model by scaling rl, February 2025. URL https://github.com/agentica-project/rllm. Notion Blog. +[519] Ruilin Luo, Zhuofan Zheng, Yifan Wang, Yiyao Yu, Xinzhe Ni, Zicheng Lin, Jin Zeng, and Yujiu Yang. Ursa: Understanding and verifying chain-of-thought reasoning in multimodal mathematics. arXiv preprint arXiv:2501.04686, 2025. + +[520] Xianzhen Luo, Qingfu Zhu, Zhiming Zhang, Libo Qin, Xuanyu Zhang, Qing Yang, Dongliang Xu, and Wanxiang Che. Python is not always the best choice: Embracing multilingual program of thoughts. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 7185-7212, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.408. URL https://aclanthology.org/2024.emnlp-main.408/. +[521] Yijia Luo, Yulin Song, Xingyao Zhang, Jiaheng Liu, Weixun Wang, GengRu Chen, Wenbo Su, and Bo Zheng. Deconstructing long chain-of-thought: A structured reasoning optimization framework for long cot distillation. arXiv preprint arXiv:2503.16385, 2025. +[522] Chengqi Lyu, Songyang Gao, Yuzhe Gu, Wenwei Zhang, Jianfei Gao, Kuikun Liu, Ziyi Wang, Shuaibin Li, Qian Zhao, Haian Huang, et al. Exploring the limit of outcome reward for learning mathematical reasoning. arXiv preprint arXiv:2502.06781, 2025. +[523] Qing Lyu, Shreya Havaldar, Adam Stein, Li Zhang, Delip Rao, Eric Wong, Marianna Apidianaki, and Chris Callison-Burch. Faithful chain-of-thought reasoning. In Jong C. Park, Yuki Arase, Baotian Hu, Wei Lu, Derry Wijaya, Ayu Purwarianti, and Adila Alfa Krisnadhi, editors, Proceedings of the 13th International Joint Conference on Natural Language Processing and the 3rd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pages 305-329, Nusa Dua, Bali, November 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.ijcnlp-main.20. URL https://aclanthology.org/2023.ijcnlp-main.20/. +[524] Alexander Lyzhov, Yuliya Molchanova, Armenii Ashukha, Dmitry Molchanov, and Dmitry Vetrov. Greedy policy search: A simple baseline for learnable test-time augmentation. In Jonas Peters and David Sontag, editors, Proceedings of the 36th Conference on Uncertainty in Artificial Intelligence (UAI), volume 124 of Proceedings of Machine Learning Research, pages 1308-1317. PMLR, 03-06 Aug 2020. URL https://proceedings.mlr.press/v124/lyzhov20a.html. +[525] Jingyuan Ma, Rui Li, Zheng Li, Junfeng Liu, Lei Sha, and Zhifang Sui. Hauntattack: When attack follows reasoning as a shadow. arXiv preprint arXiv:2506.07031, 2025. +[526] Lu Ma, Hao Liang, Meiyi Qiang, Lexiang Tang, Xiaochen Ma, Zhen Hao Wong, Junbo Niu, Chengyu Shen, Running He, Bin Cui, et al. Learning what reinforcement learning can't: Interleaved online fine-tuning for hardest questions. arXiv preprint arXiv:2506.07527, 2025. +[527] Nanye Ma, Shangyuan Tong, Haolin Jia, Hexiang Hu, Yu-Chuan Su, Mingda Zhang, Xuan Yang, Yandong Li, Tommi Jaakkola, Xuhui Jia, et al. Inference-time scaling for diffusion models beyond scaling denoising steps. arXiv preprint arXiv:2501.09732, 2025. +[528] Qianli Ma, Haotian Zhou, Tingkai Liu, Jianbo Yuan, Pengfei Liu, Yang You, and Hongxia Yang. Let's reward step by step: Step-level reward model as the navigators for reasoning. arXiv preprint arXiv:2310.10080, 2023. +[529] Ruotian Ma, Peisong Wang, Cheng Liu, Xingyan Liu, Jiaqi Chen, Bang Zhang, Xin Zhou, Nan Du, and Jia Li. $S^2 r$ : Teaching llms to self-verify and self-correct via reinforcement learning. arXiv preprint arXiv:2502.12853, 2025. +[530] Xinyin Ma, Guangnian Wan, Runpeng Yu, Gongfan Fang, and Xinchao Wang. Cot-valve: Length-compressible chain-of-thought tuning. arXiv preprint arXiv:2502.09601, 2025. +[531] Xueguang Ma, Qian Liu, Dongfu Jiang, Ge Zhang, Zejun Ma, and Wenhu Chen. Generalreasoner: Advancing llm reasoning across all domains. arXiv preprint arXiv:2505.14652, 2025. +[532] Xuetao Ma, Wenbin Jiang, and Hua Huang. Problem-solving logic guided curriculum in-context learning for llms complex reasoning. arXiv preprint arXiv:2502.15401, 2025. +[533] Yan Ma, Steffi Chern, Xuyang Shen, Yiran Zhong, and Pengfei Liu. Rethinking rl scaling for vision language models: A transparent, from-scratch framework and comprehensive evaluation scheme. arXiv preprint arXiv:2504.02587, 2025. +[534] Yiran Ma, Zui Chen, Tianqiao Liu, Mi Tian, Zhuo Liu, Zitao Liu, and Weiqi Luo. What are step-level reward models rewarding? counterintuitive findings from mcts-boosted mathematical reasoning. arXiv preprint arXiv:2412.15904, 2024. + +[535] Zexiong Ma, Chao Peng, Pengfei Gao, Xiangxin Meng, Yanzhen Zou, and Bing Xie. Sortf: Issue resolving with subtask-oriented reinforced fine-tuning. arXiv preprint arXiv:2502.20127, 2025. +[536] Zeyao Ma, Xiaokang Zhang, Jing Zhang, Jifan Yu, Sijia Luo, and Jie Tang. Dynamic scaling of unit tests for code reward modeling. arXiv preprint arXiv:2501.01054, 2025. +[537] Ziyang Ma, Zhuo Chen, Yuping Wang, Eng Siong Chng, and Xie Chen. Audio-cot: Exploring chain-of-thought reasoning in large audio language model. arXiv preprint arXiv:2501.07246, 2025. +[538] Aman Madaan, Katherine Hermann, and Amir Yazdanbakhsh. What makes chain-of-thought prompting effective? a counterfactual study. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 1448-1535, Singapore, December 2023. URL https://aclanthology.org/2023.findings-emnlp.101.pdf. +[539] Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, Shashank Gupta, Bodhisattwa Prasad Majumder, Katherine Hermann, Sean Welleck, Amir Yazdanbakhsh, and Peter Clark. Self-refine: Iterative refinement with self-feedback. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 46534-46594. Curran Associates, Inc., March 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/91edff07232fb1b55a505a9e9f6c0ff3-Paper-Conference.pdf. +[540] Sathwik Tejaswi Madhusudhan, Shruthan Radhakrishna, Jash Mehta, and Toby Liang. Millions scale dataset distilled from r1-32b. https://huggingface.co/datasets/ServiceNow-AI/R1-Distill-SFT, February 2025. +[541] Sadegh Mahdavi, Muchen Li, Kaiwen Liu, Christos Thrampoulidis, Leonid Sigal, and Renjie Liao. Leveraging online olympiad-level math problems for llms training and contamination-resistant evaluation. arXiv preprint arXiv:2501.14275, 2025. +[542] Tobias Materzok. Cos (m+ o) s: Curiosity and rl-enhanced mcts for exploring story space via language models. arXiv preprint arXiv:2501.17104, 2025. +[543] Justus Mattern, Sami Jaghourar, Manveer Basra, Jannik Straube, Matthew Di Ferrante, Felix Gabriel, Jack Min Ong, Vincent Weisser, and Johannes Hagemann. Synthetic-1: Two million collaboratively generated reasoning traces from deepseek-r1, 2025. URL https://www.primeintellect.ai/blog/synthetic-1-release. +[544] Nat McAleese, Rai Michael Pokorny, Juan Felipe Ceron Uribe, Evgenia Nitishinskaya, Maja Trebacz, and Jan Leike. Llm critics help catch llm bugs. arXiv preprint arXiv:2407.00215, 2024. +[545] R Thomas McCoy, Shunyu Yao, Dan Friedman, Mathew D Hardy, and Thomas L Grifths. When a language model is optimized for reasoning, does it still show embers of autoregression? an analysis of openai o1. arXiv preprint arXiv:2410.01792, 2024. +[546] Lingrui Mei, Jiayu Yao, Yuyao Ge, Yiwei Wang, Baolong Bi, Yujun Cai, Jiazhi Liu, Mingyu Li, Zhong-Zhi Li, Duzhen Zhang, Chenlin Zhou, Jiayi Mao, Tianze Xia, Jiafeng Guo, and Shenghua Liu. A survey of context engineering for large language models. arXiv preprint arXiv:2507.13334, 2025. +[547] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, Ping Luo, Yu Qiao, Qiaosheng Zhang, and Wenqi Shao. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025. +[548] William Merrill and Ashish Sabharwal. The expressive power of transformers with chain of thought. In *The Twelfth International Conference on Learning Representations*, January 2023. URL https://openreview.net/pdf?id=CDmerQ37Zs. +[549] Ning Miao, Yee Whye Teh, and Tom Rainforth. Selfcheck: Using LLMs to zero-shot check their own step-by-step reasoning. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id= pTHfApDakA. + +[550] Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, et al. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems. arXiv preprint arXiv:2412.09413, 2024. +[551] Seyed Iman Mirzadeh, Keivan Alizadeh, Hooman Shahrokhi, Oncel Tuzel, Samy Bengio, and Mehrdad Farajtabar. GSM-symbolic: Understanding the limitations of mathematical reasoning in large language models. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=AjXkRZIvjb. +[552] Prakamya Mishra, Jiang Liu, Jialian Wu, Xiaodong Yu, Zicheng Liu, and Emad Barsoum. Tttbench: A benchmark for evaluating reasoning ability with simple and novel tic-tac-toe-style games. arXiv preprint arXiv:2506.10209, 2025. +[553] Arindam Mitra, Hamed Khanpour, Corby Rosset, and Ahmed Awadallah. Orca-math: Unlocking the potential of slms in grade school math. arXiv preprint arXiv:2402.14830, 2024. +[554] Chancharik Mitra, Brandon Huang, Trevor Darrell, and Roei Herzig. Compositional chain-of-thought prompting for large multimodal models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14420-14431, 2024. +[555] Purbesh Mitra and Sennur Ulukus. Motif: Modular thinking via reinforcement fine-tuning in llms. arXiv preprint arXiv:2507.02851, 2025. +[556] Shentong Mo and Miao Xin. Tree of uncertain thoughts reasoning for large language models. In ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 12742-12746, April 2024. doi: 10.1109/ICASSP48485.2024.10448355. URL https://ieeexplore.ieee.org/document/10448355. +[557] Philipp Mondorf and Barbara Plank. Beyond accuracy: Evaluating the reasoning behavior of large language models—a survey. arXiv preprint arXiv:2404.01869, 2024. +[558] Terufumi Morishita, Gaku Morio, Atsuki Yamaguchi, and Yasuhiro Sogawa. Enhancing reasoning capabilities of llms via principled synthetic logic corpus. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 73572-73604. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/8678da90126aa58326b2fc0254b33a8c-Paper-Conference.pdf. +[559] Yongyu Mu, Jiali Zeng, Bei Li, Xinyan Guan, Fandong Meng, Jie Zhou, Tong Xiao, and Jingbo Zhu. Dissecting long reasoning models: An empirical study. arXiv preprint arXiv:2506.04913, 2025. +[560] Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025. +[561] Tergel Munkhbat, Namgyu Ho, Seohyun Kim, Yongjin Yang, Yujin Kim, and Se-Young Yun. Self-training elicits concise reasoning in large language models. arXiv preprint arXiv:2502.20122, 2025. +[562] Vaskar Nath, Pranav Raja, Claire Yoon, and Sean Hendryx. Toolcomp: A multi-tool reasoning & process supervision benchmark. arXiv preprint arXiv:2501.01290, 2025. +[563] Sania Nayab, Giulio Rossolini, Marco Simoni, Andrea Saracino, Giorgio Buttazzo, Nicola Maria Manes, and Fabrizio Giacomelli. Concise thoughts: Impact of output length on llm reasoning and cost. arXiv preprint arXiv:2407.19825, 2024. +[564] Ansong Ni, Srini Iyer, Dragomir Radev, Veselin Stoyanov, Wen-Tau Yih, Sida Wang, and Xi Victoria Lin. LEVER: Learning to verify language-to-code generation with execution. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett, editors, Proceedings of the 40th International Conference on Machine Learning, volume 202 of Proceedings of Machine Learning Research, pages 26106-26128. PMLR, 23-29 Jul 2023. URL https://proceedings.mlr.press/v202/ni23b.html. + +[565] Ziyi Ni, Yifan Li, Ning Yang, Dou Shen, Pin Lv, and Daxiang Dong. Tree-of-code: A tree-structured exploring framework for end-to-end code generation and execution in complex task handling. arXiv preprint arXiv:2412.15305, 2024. +[566] Allen Nie, Yi Su, Bo Chang, Jonathan N Lee, Ed H Chi, Quoc V Le, and Minmin Chen. Evolve: Evaluating and optimizing llms for exploration. arXiv preprint arXiv:2410.06238, 2024. +[567] Yansong Ning, Wei Li, Jun Fang, Naiqiang Tan, and Hao Liu. Not all thoughts are generated equal: Efficient lIm reasoning via multi-turn reinforcement learning. arXiv preprint arXiv:2505.11827, 2025. +[568] Harsha Nori, Naoto Usuyama, Nicholas King, Scott Mayer McKinney, Xavier Fernandes, Sheng Zhang, and Eric Horvitz. From medprompt to o1: Exploration of run-time strategies for medical challenge problems and beyond. arXiv preprint arXiv:2411.03590, 2024. +[569] Maxwell Nye, Anders Johan Andreassen, Guy Gur-Ari, Henryk Michalewski, Jacob Austin, David Bieber, David Dohan, Aitor Lewkowycz, Maarten Bosma, David Luan, Charles Sutton, and Augustus Odena. Show your work: Scratchpads for intermediate computation with language models. In Deep Learning for Code Workshop, March 2022. URL https://openreview.net/forum?id=HB1x2idbkbq. +[570] Skywork o1 Team. Skywork-o1 open series. https://huggingface.co/Skywork, November 2024. +[571] OpenCompass. Aime 2025. https://huggingface.co/datasets/opencompass/AIME2025, February 2025. +[572] Yixin Ou, Yunzhi Yao, Ningyu Zhang, Hui Jin, Jiacheng Sun, Shumin Deng, Zhenguo Li, and Huajun Chen. How do llms acquire new knowledge? a knowledge circuits perspective on continual pre-training. arXiv preprint arXiv:2502.11196, 2025. +[573] Alexander Pan, Kush Bhatia, and Jacob Steinhardt. The effects of reward misspecification: Mapping and mitigating misaligned models. arXiv preprint arXiv:2201.03544, 2022. +[574] Jiabao Pan, Yan Zhang, Chen Zhang, Zuozhu Liu, Hongwei Wang, and Haizhou Li. DynaThink: Fast or slow? a dynamic decision-making framework for large language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 14686-14695, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.814. URL https://aclanthology.org/2024.emnlp-main.814/. +[575] Jianfeng Pan, Senyou Deng, and Shaomang Huang. Coat: Chain-of-associated-thoughts framework for enhancing large language models reasoning. arXiv preprint arXiv:2502.02390, 2025. +[576] Jiayi Pan, Junjie Zhang, Xingyao Wang, Lifan Yuan, Hao Peng, and Alane Suhr. Tinyzero. https://github.com/Jiayi-Pan/TinyZero, 2025. Accessed: 2025-01-24. +[577] Jiazhen Pan, Che Liu, Junde Wu, Fenglin Liu, Jiayuan Zhu, Hongwei Bran Li, Chen Chen, Cheng Ouyang, and Daniel Rueckert. Medvlm-r1: Incentivizing medical reasoning capability of vision-language models (vlms) via reinforcement learning. arXiv preprint arXiv:2502.19634, 2025. +[578] Liangming Pan, Michael Saxon, Wenda Xu, Deepak Nathani, Xinyi Wang, and William Yang Wang. Automatically correcting large language models: Surveying the landscape of diverse self-correction strategies. arXiv preprint arXiv:2308.03188, 2023. +[579] Wenbo Pan, Zhichao Liu, Qiguang Chen, Xiangyang Zhou, Haining Yu, and Xiaohua Jia. The hidden dimensions of llm alignment: A multi-dimensional safety analysis. arXiv preprint arXiv:2502.09674, 2025. +[580] Zhihong Pan, Kai Zhang, Yuze Zhao, and Yupeng Han. Route to reason: Adaptive routing for lIm and reasoning strategy selection. arXiv preprint arXiv:2505.19435, 2025. +[581] Bo Pang, Hanze Dong, Jiacheng Xu, Silvio Savarese, Yingbo Zhou, and Caiming Xiong. Bolt: Bootstrap long chain-of-thought in language models without distillation. arXiv preprint arXiv:2502.03860, 2025. + +[582] Richard Yuanzhe Pang, Weizhe Yuan, He He, Kyunghyun Cho, Sainbayar Sukhbaatar, and Jason Weston. Iterative reasoning preference optimization. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 116617-116637. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/d37c9ad425fe5b65304d500c6edcba00-Paper-Conference.pdf. +[583] Shubham Parashar, Blake Olson, Sambhav Khurana, Eric Li, Hongyi Ling, James Caverlee, and Shuiwang Ji. Inference-time computations for llm reasoning and planning: A benchmark and insights. arXiv preprint arXiv:2502.12521, 2025. +[584] Chanwoo Park, Seungju Han, Xingzhi Guo, Asuman Ozdaglar, Kaiqing Zhang, and Joo-Kyung Kim. Maporl: Multi-agent post-co-training for collaborative large language models with reinforcement learning. arXiv preprint arXiv:2502.18439, 2025. +[585] Junsoo Park, Seungyeon Jwa, Meiying Ren, Daeyoung Kim, and Sanghyuk Choi. Offsetbias: Leveraging debiased data for tuning evaluators, 2024. +[586] Sungjin Park, Xiao Liu, Yeyun Gong, and Edward Choi. Ensembling large language models with process reward-guided tree search for better complex reasoning. arXiv preprint arXiv:2412.15797, 2024. +[587] Manojkumar Parmar and Yuvaraj Govindarajulu. Challenges in ensuring ai safety in deepseek-r1 models: The shortcomings of reinforcement learning strategies. arXiv preprint arXiv:2501.17030, 2025. +[588] Avinash Patil. Advancing reasoning in large language models: Promising methods and approaches. arXiv preprint arXiv:2502.03671, 2025. +[589] Avinash Patil and Amardeep Kour Gedhu. Cognitive-mental-llm: Leveraging reasoning in large language models for mental health prediction via online text. arXiv preprint arXiv:2503.10095, 2025. +[590] Debjit Paul, Mete Ismayilzada, Maxime Peyrard, Beatrix Borges, Antoine Bosselut, Robert West, and Boi Faltings. REFINER: Reasoning feedback on intermediate representations. In Yvette Graham and Matthew Purver, editors, Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1100–1126, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.67/. +[591] Patomporn Payoungkhamdee, Pume Tuchinda, Jinheon Baek, Samuel Cahyawijaya, Can Udomcharoenchaikit, Potsawee Manakul, Peerat Limkonchotiwat, Ekapol Chuangsuwanich, and Sarana Nutanong. Towards better understanding of program-of-thought reasoning in cross-lingual and multilingual environments. arXiv preprint arXiv:2502.17956, 2025. +[592] Chunyi Peng, Zhipeng Xu, Zhenghao Liu, Yishan Li, Yukun Yan, Shuo Wang, Zhiyuan Liu, Yu Gu, Minghe Yu, Ge Yu, et al. Learning to route queries across knowledge bases for step-wise retrieval-augmented reasoning. arXiv preprint arXiv:2505.22095, 2025. +[593] Dengyun Peng, Yuhang Zhou, Qiguang Chen, Jinhao Liu, Jingjing Chen, and Libo Qin. Dlpo: Towards a robust, efficient, and generalizable prompt optimization framework from a deep-learning perspective. arXiv preprint arXiv:2503.13413, 2025. +[594] Hao Peng, Yunjia Qi, Xiaozhi Wang, Zijun Yao, Bin Xu, Lei Hou, and Juanzi Li. Agentic reward modeling: Integrating human preferences with verifiable correctness signals for reliable reward systems. arXiv preprint arXiv:2502.19328, 2025. +[595] Keqin Peng, Liang Ding, Yuanxin Ouyang, Meng Fang, and Dacheng Tao. Revisiting overthinking in long chain-of-thought from the perspective of self-doubt. arXiv preprint arXiv:2505.23480, 2025. +[596] Miao Peng, Nuo Chen, Zongrui Suo, and Jia Li. Rewarding graph reasoning process makes llms more generalized reasoners. arXiv preprint arXiv:2503.00845, 2025. +[597] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b lmms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536, 2025. + +[598] Ivo Petrov, Jasper Dekoninck, Lyuben Baltadzhiev, Maria Drencheva, Kristian Minchev, Mislav Balunovic, Nikola Jovanovic, and Martin Vechev. Proof or bluff? evaluating llms on 2025 usa math olympiad. arXiv preprint arXiv:2503.21934, 2025. +[599] Rolf Pfister and Hansueli Jud. Understanding and benchmarking artificial intelligence: Openai's o3 is not agi. arXiv preprint arXiv:2501.07458, 2025. +[600] Quang Hieu Pham, Thuy Duong Nguyen, Tung Pham, Anh Tuan Luu, and Dat Quoc Nguyen. Clozemath: Improving mathematical reasoning in language models by learning to fill equations. arXiv preprint arXiv:2506.03763, 2025. +[601] Thinh Pham, Nguyen Nguyen, Pratibha Zunjare, Weiyuan Chen, Yu-Min Tseng, and Tu Vu. Sealqa: Raising the bar for reasoning in search-augmented language models. arXiv preprint arXiv:2506.01062, 2025. +[602] Long Phan, Alice Gatti, Ziwen Han, Nathaniel Li, Josephina Hu, Hugh Zhang, Sean Shi, Michael Choi, Anish Agrawal, Arnav Chopra, et al. Humanity's last exam. arXiv preprint arXiv:2501.14249, 2025. +[603] Aske Plaat, Annie Wong, Suzan Verberne, Joost Broekens, Niki van Stein, and Thomas Back. Reasoning with large language models, a survey. arXiv preprint arXiv:2407.11511, 2024. +[604] Gabriel Poesia, Kanishk Gandhi, Eric Zelikman, and Noah Goodman. Certified deductive reasoning with language models. Transactions on Machine Learning Research, May 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=yXnwrS2T16. +[605] Stanislas Polu and Ilya Sutskever. Generative language modeling for automated theorem proving. arXiv preprint arXiv:2009.03393, 2020. +[606] Archiki Prasad, Swarnadeep Saha, Xiang Zhou, and Mohit Bansal. ReCEval: Evaluating reasoning chains via correctness and informativeness. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 10066-10086, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.622. URL https://aclanthology.org/2023.emnlp-main.622/. +[607] Archiki Prasad, Alexander Koller, Mareike Hartmann, Peter Clark, Ashish Sabharwal, Mohit Bansal, and Tushar Khot. ADaPT: As-needed decomposition and planning with language models. In Kevin Duh, Helena Gomez, and Steven Bethard, editors, Findings of the Association for Computational Linguistics: NAACL 2024, pages 4226-4252, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-naacl.264. URL https://aclanthology.org/2024-findings-naacl.264/. +[608] Tidor-Vlad Pricope. Hardml: A benchmark for evaluating data science and machine learning knowledge and reasoning in ai. arXiv preprint arXiv:2501.15627, 2025. +[609] Ben Prystawski, Michael Li, and Noah Goodman. Why think step by step? reasoning emerges from the locality of experience. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 70926-70947. Curran Associates, Inc., September 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/e0af79ad53a336b4c4b4f7e2a68eb609-Paper-Conference.pdf. +[610] Israel Puerta-Merino, Carlos Núñez-Molina, Pablo Mesejo, and Juan Fernández-Olivares. A roadmap to guide the integration of llms in hierarchical planning. arXiv preprint arXiv:2501.08068, 2025. +[611] Haritz Puerto, Tilek Chubakov, Xiaodan Zhu, Harish Tayyar Madabushi, and Iryna Gurevych. Fine-tuning with divergent chains of thought boosts reasoning through self-correction in language models. arXiv preprint arXiv:2407.03181, 2024. +[612] Isha Puri, Shivchander Sudalairaj, Guangxuan Xu, Kai Xu, and Akash Srivastava. A probabilistic inference approach to inference-time scaling of llms using particle-based monte carlo methods. arXiv preprint arXiv:2502.01618, 2025. +[613] Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv preprint arXiv:2408.07199, 2024. + +[614] Penghui Qi, Zichen Liu, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Optimizing anytime reasoning via budget relative policy optimization. arXiv preprint arXiv:2505.13438, 2025. +[615] Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual reasoning makes smaller llms stronger problem-solvers. arXiv preprint arXiv:2408.06195, 2024. +[616] Hongjin Qian and Zheng Liu. Scent of knowledge: Optimizing search-enhanced reasoning with information foraging. arXiv preprint arXiv:2505.09316, 2025. +[617] Libo Qin, Qiguang Chen, Fuxuan Wei, Shijue Huang, and Wanxiang Che. Cross-lingual prompting: Improving zero-shot chain-of-thought reasoning across languages. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 2695–2709, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.163. URL https://aclanthology.org/2023.emnlp-main.163/. +[618] Libo Qin, Qiguang Chen, Hao Fei, Zhi Chen, Min Li, and Wanxiang Che. What factors affect multi-modal in-context learning? an in-depth exploration. arXiv preprint arXiv:2410.20482, 2024. +[619] Libo Qin, Qiguang Chen, Xiachong Feng, Yang Wu, Yongheng Zhang, Yinghui Li, Min Li, Wanxiang Che, and Philip S Yu. Large language models meet nlp: A survey. arXiv preprint arXiv:2405.12819, 2024. +[620] Libo Qin, Qiguang Chen, Yuhang Zhou, Zhi Chen, Yinghui Li, Lizi Liao, Min Li, Wanxiang Che, and Philip S Yu. Multilingual large language model: A survey of resources, taxonomy and frontiers. arXiv preprint arXiv:2404.04925, 2024. +[621] Libo Qin, Qiguang Chen, Jingxuan Zhou, Jin Wang, Hao Fei, Wanxiang Che, and Min Li. Divide-solve-combine: An interpretable and accurate prompting framework for zero-shot multi-intent detection. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 39, pages 25038-25046, 2025. +[622] Libo Qin, Qiguang Chen, Yuhang Zhou, Zhi Chen, Yinghui Li, Lizi Liao, Min Li, Wanxiang Che, and S Yu Philip. A survey of multilingual large language models. Patterns, 6(1), January 2025. URL https://www.cell.com/patterns/fulltext/S2666-3899(24)00290-3. +[623] Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, et al. O1 replication journey: A strategic progress report-part 1. arXiv preprint arXiv:2410.18982, 2024. +[624] Yulei Qin, Gang Li, Zongyi Li, Zihan Xu, Yuchen Shi, Zhekai Lin, Xiao Cui, Ke Li, and Xing Sun. Incentivizing reasoning for advanced instruction-following of large language models. arXiv preprint arXiv:2506.01413, 2025. +[625] Jiahao Qiu, Yifu Lu, Yifan Zeng, Jiacheng Guo, Jiayi Geng, Huazheng Wang, Kaixuan Huang, Yue Wu, and Mengdi Wang. Treebon: Enhancing inference-time alignment with speculative tree-search and best-of-n sampling. arXiv preprint arXiv:2410.16033, 2024. +[626] Xiaoye Qu, Yafu Li, Zhaochen Su, Weigao Sun, Jianhao Yan, Dongrui Liu, Ganqu Cui, Daizong Liu, Shuxian Liang, Junxian He, et al. A survey of efficient reasoning for large reasoning models: Language, multimodality, and beyond. arXiv preprint arXiv:2503.21614, 2025. +[627] Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive introspection: Teaching language model agents how to self-improve. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=DRC9pZwBwR. +[628] Yuxiao Qu, Matthew Y. R. Yang, Amrith Setlur, Lewis Tunstall, Edward Emanuel Beeching, Ruslan Salakhutdinov, and Aviral Kumar. Optimizing test-time compute via meta reinforcement finetuning. In Workshop on Reasoning and Planning for Large Language Models, March 2025. URL https://openreview.net/forum?id=WGz4ytjolh. +[629] Gollam Rabby, Farhana Keya, Parvez Zamil, and Soren Auer. Mc-nest-enhancing mathematical reasoning in large language models with a monte carlo nash equilibrium self-refine tree. arXiv preprint arXiv:2411.15645, 2024. + +[630] Santosh Kumar Radha and Oktay Goktas. On the reasoning capacity of ai models and how to quantify it. arXiv preprint arXiv:2501.13833, 2025. +[631] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023. URL https://openreview.net/pdf?id=HPuSIXJaa9. +[632] Daking Rai and Ziyu Yao. An investigation of neuron activation as a unified lens to explain chain-of-thought eliciting arithmetic reasoning of LLMs. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 7174–7193, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.387. URL https://aclanthology.org/2024.acl-long.387/. +[633] Leonardo Ranaldi, Giulia Pucci, Federico Ranaldi, Elena Sofia Ruzzetti, and Fabio Massimo Zanzotto. A tree-of-thoughts to broaden multi-step reasoning across languages. In Kevin Duh, Helena Gomez, and Steven Bethard, editors, Findings of the Association for Computational Linguistics: NAACL 2024, pages 1229-1241, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-naacl.78. URL https://aclanthology.org/2024 findings-naacl.78/. +[634] Leonardo Ranaldi, Marco Valentino, Alexander Polonsky, and André Freitas. Improving chain-of-thought reasoning via quasi-symbolic abstractions. arXiv preprint arXiv:2502.12616, 2025. +[635] Mohammad Raza and Natasha Milic-Frayling. Instantiation-based formalization of logical reasoning tasks using language models and logical solvers. arXiv preprint arXiv:2501.16961, 2025. +[636] Ali Razghandi, Seyed Mohammad Hadi Hosseini, and Mahdieh Soleymani Baghshah. Cer: Confidence enhanced reasoning in llms. arXiv preprint arXiv:2502.14634, 2025. +[637] David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. GPQA: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=Ti67584b98. +[638] Matthew Renze and Erhan Guven. Self-reflection in llm agents: Effects on problem-solving performance. arXiv preprint arXiv:2405.06682, 2024. +[639] Baptiste Roziere, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaqing Ellen Tan, Yossi Adi, Jingyu Liu, Romain Sauvestre, Tal Remez, et al. Code llama: Open foundation models for code. arXiv preprint arXiv:2308.12950, 2023. +[640] Yangjun Ruan, Neil Band, Chris J Maddison, and Tatsunori Hashimoto. Reasoning to learn from latent thoughts. arXiv preprint arXiv:2503.18866, 2025. +[641] Jon Saad-Falcon, Rajan Vivek, William Berrios, Nandita Shankar Naik, Matija Franklin, Bertie Vidgen, Amanpreet Singh, Douwe Kiela, and Shikib Mehri. Lmunit: Fine-grained evaluation with natural language unit tests. arXiv preprint arXiv:2412.13091, 2024. +[642] Nikta Gohari Sadr, Sangmitra Madhusudan, and Ali Emami. Think or step-by-step? unzipping the black box in zero-shot prompts. arXiv preprint arXiv:2502.03418, 2025. +[643] Swarnadeep Saha, Xian Li, Marjan Ghazvininejad, Jason Weston, and Tianlu Wang. Learning to plan & reason for evaluation with thinking-llm-as-a-judge. arXiv preprint arXiv:2501.18099, 2025. +[644] S Sauhandikaa, R Bhagavath Narethranath, and R Sathya Bama Krishna. Explainable ai in large language models: A review. In 2024 International Conference on Emerging Research in Computational Science (ICERCS), pages 1-6. IEEE, 2024. URL http://ieeexplore.ieee.org/abstract/document/10895578. +[645] William Saunders, Catherine Yeh, Jeff Wu, Steven Bills, Long Ouyang, Jonathan Ward, and Jan Leike. Self-critiquing models for assisting human evaluators. arXiv preprint arXiv:2206.05802, 2022. + +[646] Nikunj Saunshi, Nishanth Dikkala, Zhiyuan Li, Sanjiv Kumar, and Sashank J Reddi. Reasoning with latent thoughts: On the power of looped transformers. arXiv preprint arXiv:2502.17416, 2025. +[647] Mark Schöne, Babak Rahmani, Heiner Kremer, Fabian Falck, Hitesh Ballani, and Jannes Gladrow. Implicit language models are RNNs: Balancing parallelization and expressivity. In *Forty-second International Conference on Machine Learning*, May 2025. URL https://openreview.net/forum?id=5EbiopWH6e. +[648] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017. +[649] ByteDance Seed, Jiaze Chen, Tiantian Fan, Xin Liu, Lingjun Liu, Zhiqi Lin, Mingxuan Wang, Chengyi Wang, Xiangpeng Wei, Wenyuan Xu, et al. Seed1. 5-thinking: Advancing superb reasoning models with reinforcement learning. arXiv preprint arXiv:2504.13914, 2025. +[650] Amrith Setlur, Saurabh Garg, Xinyang Geng, Naman Garg, Virginia Smith, and Aviral Kumar. Rl on incorrect synthetic data scales the efficiency of lIm math reasoning by eight-fold. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 43000-43031. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/4b77d5b896c321a29277524a98a50215-Paper-Conference.pdf. +[651] Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, and Aviral Kumar. Rewarding progress: Scaling automated process verifiers for LLM reasoning. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=A6Y7Aq1zLW. +[652] Amrith Setlur, Nived Rajaraman, Sergey Levine, and Aviral Kumar. Scaling test-time compute without verification or r1 is suboptimal. arXiv preprint arXiv:2502.12118, 2025. +[653] Amrith Setlur, Matthew YR Yang, Charlie Snell, Jeremy Greer, Ian Wu, Virginia Smith, Max Simchowitz, and Aviral Kumar. e3: Learning to explore enables extrapolation of test-time compute for llms. arXiv preprint arXiv:2506.09026, 2025. +[654] Yu Shang, Yu Li, Fengli Xu, and Yong Li. Synergy-of-thoughts: Eliciting efficient reasoning in hybrid language models. arXiv preprint arXiv:2402.02563, 2024. +[655] Rulin Shao, Shuyue Stella Li, Rui Xin, Scott Geng, Yiping Wang, Sewoong Oh, Simon Shaolei Du, Nathan Lambert, Sewon Min, Ranjay Krishna, et al. Spurious rewards: Rethinking training signals in rlvr. arXiv preprint arXiv:2506.10947, 2025. +[656] Wenqi Shao, Qiaosheng Zhang, Lingxiao Du, Xiangyan Liu, and Fanqing Meng. R1-multimodal-journey. https://github.com/FanqingM/R1-Multimodal-Journey, February 2025. +[657] Zhihong Shao, Yeyun Gong, Yelong Shen, Minlie Huang, Nan Duan, and Weizhu Chen. Synthetic prompting: Generating chain-of-thought demonstrations for large language models. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett, editors, Proceedings of the 40th International Conference on Machine Learning, volume 202 of Proceedings of Machine Learning Research, pages 30706-30775. PMLR, 23-29 Jul 2023. URL https://proceedings.mlr.press/v202/shao23a.html. +[658] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. +[659] Shuaijie She, Junxiao Liu, Yifeng Liu, Jiajun Chen, Xin Huang, and Shujian Huang. R-prm: Reasoning-driven process reward modeling. arXiv preprint arXiv:2503.21295, 2025. +[660] Haozhan Shen, Zilun Zhang, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. https://github.com/om-ai-lab/VLM-R1, February 2025. +[661] Maohao Shen, Guangtao Zeng, Zhenting Qi, Zhang-Wei Hong, Zhenfang Chen, Wei Lu, Gregory Wornell, Subhro Das, David Cox, and Chuang Gan. Satori: Reinforcement learning + +with chain-of-action-thought enhances llm reasoning via autoregressive search. arXiv preprint arXiv:2502.02508, 2025. +[662] Xuan Shen, Yizhou Wang, Xiangxi Shi, Yanzhi Wang, Pu Zhao, and Jiuming Gu. Efficient reasoning with hidden thinking. arXiv preprint arXiv:2501.19201, 2025. +[663] Yi Shen, Jian Zhang, Jieyun Huang, Shuming Shi, Wenjing Zhang, Jiangze Yan, Ning Wang, Kai Wang, and Shiguo Lian. Dast: Difficulty-adaptive slow-thinking for large reasoning models. arXiv preprint arXiv:2503.04472, 2025. +[664] Yifan Shen, Yuanzhe Liu, Jingyuan Zhu, Xu Cao, Xiaofeng Zhang, Yixiao He, Wenming Ye, James Matthew Rehg, and Ismini Lourentzou. Fine-grained preference optimization improves spatial reasoning in vlms. arXiv preprint arXiv:2506.21656, 2025. +[665] Leheng Sheng, An Zhang, Zijian Wu, Weixiang Zhao, Changshuo Shen, Yi Zhang, Xiang Wang, and Tat-Seng Chua. On reasoning strength planning in large reasoning models. arXiv preprint arXiv:2506.08390, 2025. +[666] Hengyu Shi, Junhao Su, Huansheng Ning, Xiaoming Wei, and Jialin Gao. Layoutcot: Unleashing the deep reasoning potential of large language models for layout generation. arXiv preprint arXiv:2504.10829, 2025. +[667] Junhao Shi, Zhaoye Fei, Siyin Wang, Qipeng Guo, Jingjing Gong, and Xipeng Qiu. World-aware planning narratives enhance large vision-language model planner. arXiv preprint arXiv:2506.21230, 2025. +[668] Wenhao Shi, Zhiqiang Hu, Yi Bin, Yang Yang, See-Kiong Ng, and Heng Tao Shen. Multimodal mathematical reasoning with diverse solving perspective. arXiv preprint arXiv:2507.02804, 2025. +[669] Noah Shinn, Federico Cassano, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. Reflexion: language agents with verbal reinforcement learning. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 8634-8652. Curran Associates, Inc., December 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/1b44b878bb782e6954cd888628510e90-Paper-Conference.pdf. +[670] Safal Shrestha, Minwu Kim, and Keith Ross. Mathematical reasoning in large language models: Assessing logical and arithmetic errors across wide numerical ranges. arXiv preprint arXiv:2502.08680, 2025. +[671] Kashun Shum, Shizhe Diao, and Tong Zhang. Automatic prompt augmentation and selection with chain-of-thought from labeled data. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 12113-12139, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.811. URL https://aclanthology.org/2023.findings-emnlp.811/. +[672] Chenglei Si, Diyi Yang, and Tatsunori Hashimoto. Can llms generate novel research ideas? a large-scale human study with $100+$ nlp researchers. arXiv preprint arXiv:2409.04109, 2024. +[673] Sam Silver, Jimin Sun, Ivan Zhang, Sara Hooker, and Eddie Kim. Language models can perform single-utterance self-correction of perturbed reasoning. arXiv preprint arXiv:2506.15894, 2025. +[674] Avi Singh, John D Co-Reyes, Rishabh Agarwal, Ankesh Anand, Piyush Patil, Xavier Garcia, Peter J Liu, James Harrison, Jaehoon Lee, Kelvin Xu, et al. Beyond human data: Scaling self-training for problem-solving with language models. Transactions on Machine Learning Research, April 2024. URL https://openreview.net/pdf?id=lnAyUngGFK. +[675] Oscar Skean, Md Rifat Arefin, Dan Zhao, Niket Patel, Jalal Naghiyev, Yann LeCun, and Ravid Shwartz-Ziv. Layer by layer: Uncovering hidden representations in language models. arXiv preprint arXiv:2502.02013, 2025. +[676] Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024. + +[677] Huatong Song, Jinhao Jiang, Yingqian Min, Jie Chen, Zhipeng Chen, Wayne Xin Zhao, Lei Fang, and Ji-Rong Wen. R1-searcher: Incentivizing the search capability in llms via reinforcement learning. arXiv preprint arXiv:2503.05592, 2025. +[678] Jiwon Song, Dongwon Jo, Yulhwa Kim, and Jae-Joon Kim. Reasoning path compression: Compressing generation trajectories for efficient ltm reasoning. arXiv preprint arXiv:2505.13866, 2025. +[679] Mingyang Song, Zhaochen Su, Xiaoye Qu, Jiawei Zhou, and Yu Cheng. Prmbench: A fine-grained and challenging benchmark for process-level reward models. arXiv preprint arXiv:2501.03124, 2025. +[680] Mingyang Song, Mao Zheng, Zheng Li, Wenjie Yang, Xuan Luo, Yue Pan, and Feng Zhang. Fastcurl: Curriculum reinforcement learning with stage-wise context scaling for efficient training r1-like reasoning models. arXiv preprint arXiv:2503.17287, 2025. +[681] Woomin Song, Saket Dingliwal, Sai Muralidhar Jayanthi, Bhavana Ganesh, Jinwoo Shin, Aram Galstyan, and Sravan Babu Bodapati. Accelerated test-time scaling with model-free speculative sampling. arXiv preprint arXiv:2506.04708, 2025. +[682] Xiaoshuai Song, Yanan Wu, Weixun Wang, Jiaheng Liu, Wenbo Su, and Bo Zheng. Progco: Program helps self-correction of large language models. arXiv preprint arXiv:2501.01264, 2025. +[683] Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. arXiv preprint arXiv:2409.12183, 2024. +[684] Zayne Rea Sprague, Xi Ye, Kaj Bostrom, Swarat Chaudhuri, and Greg Durrett. MuSR: Testing the limits of chain-of-thought with multistep soft reasoning. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=jenyYQzuel. +[685] Gaurav Srivastava, Shuxiang Cao, and Xuan Wang. Towards reasoning ability of small language models. arXiv preprint arXiv:2502.11569, 2025. +[686] Saksham Sahai Srivastava and Vaneet Aggarwal. A technical survey of reinforcement learning techniques for large language models. arXiv preprint arXiv:2507.04136, 2025. +[687] Saksham Sahai Srivastava and Ashutosh Gandhi. Mathdivide: Improved mathematical reasoning by large language models. arXiv preprint arXiv:2405.13004, 2024. +[688] Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. Chain of thoughtlessness? an analysis of cot in planning. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id= kPBEAZU5Nm. +[689] Nisan Stiennon, Long Ouyang, Jeffrey Wu, Daniel Ziegler, Ryan Lowe, Chelsea Voss, Alec Radford, Dario Amodei, and Paul F Christiano. Learning to summarize with human feedback. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems, volume 33, pages 3008-3021. Curran Associates, Inc., December 2020. URL https://proceedings.neurips.cc/paper_files/paper/2020/file/1f89885d556929e98d3ef9b86448f951-Paper.pdf. +[690] Josefa Lia Stoisser, Marc Boubnovski Martell, and Julien Fauqueur. Sparks of tabular reasoning via text2sql reinforcement learning. arXiv preprint arXiv:2505.00016, 2025. +[691] DiJia Su, Sainbayar Sukhbaatar, Michael Rabbat, Yuandong Tian, and Qinqing Zheng. Dualformer: Controllable fast and slow thinking by learning with randomized reasoning traces. arXiv preprint arXiv:2410.09918, 2024. +[692] Jinyan Su and Claire Cardie. Thinking fast and right: Balancing accuracy and reasoning length with adaptive rewards. arXiv preprint arXiv:2505.18298, 2025. +[693] Yi Su, Dian Yu, Linfeng Song, Juntao Li, Haitao Mi, Zhaopeng Tu, Min Zhang, and Dong Yu. Expanding rl with verifiable rewards across diverse domains. arXiv preprint arXiv:2503.23829, 2025. + +[694] Zhaochen Su, Peng Xia, Hangyu Guo, Zhenhua Liu, Yan Ma, Xiaoye Qu, Jiaqi Liu, Yanshu Li, Kaide Zeng, Zhengyuan Yang, et al. Thinking with images for multimodal reasoning: Foundations, methods, and future frontiers. arXiv preprint arXiv:2506.23918, 2025. +[695] Guangyan Sun, Mingyu Jin, Zhenting Wang, Cheng-Long Wang, Siqi Ma, Qifan Wang, Tong Geng, Ying Nian Wu, Yongfeng Zhang, and Dongfang Liu. Visual agents as fast and slow thinkers. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=ncCuiD3KJQ. +[696] Jiankai Sun, Chuanyang Zheng, Enze Xie, Zhengying Liu, Ruihang Chu, Jianing Qiu, Jiaqi Xu, Mingyu Ding, Hongyang Li, Mengzhe Geng, et al. A survey of reasoning with foundation models. arXiv preprint arXiv:2312.11562, 2023. +[697] Linzhuang Sun, Hao Liang, Jingxuan Wei, Bihui Yu, Tianpeng Li, Fan Yang, Zenan Zhou, and Wentao Zhang. Mm-verify: Enhancing multimodal reasoning with chain-of-thought verification. arXiv preprint arXiv:2502.13383, 2025. +[698] Qiushi Sun, Zhoumianze Liu, Chang Ma, Zichen Ding, Fangzhi Xu, Zhangyue Yin, Haiteng Zhao, Zhenyu Wu, Kanzhi Cheng, Zhaoyang Liu, Jianing Wang, Qintong Li, Robert Tang, Tianbao Xie, Xiachong Feng, Xiang Li, Ben Kao, Wenhai Wang, Biqing Qi, Lingpeng Kong, and Zhiyong Wu. Scienceboard: Evaluating multimodal autonomous agents in realistic scientific workflows. In ICML 2025 Workshop on Computer Use Agents, June 2025. URL https://openreview.net/forum?id=CTtuHMeU5e. +[699] Shengyang Sun, Yian Zhang, Alexander Bukharin, David Mosallanezhad, Jiaqi Zeng, Soumye Singhal, Gerald Shen, Adi Renduchintala, Tugrul Konuk, Yi Dong, et al. Reward-aware preference optimization: A unified mathematical framework for model alignment. arXiv preprint arXiv:2502.00203, 2025. +[700] Wei Sun, Qianlong Du, Fuwei Cui, and Jiajun Zhang. An efficient and precise training data construction framework for process-supervised reward model in mathematical reasoning. arXiv preprint arXiv:2503.02382, 2025. +[701] Yifan Sun, Jingyan Shen, Yibin Wang, Tianyu Chen, Zhendong Wang, Mingyuan Zhou, and Huan Zhang. Improving data efficiency for ltm reinforcement fine-tuning through difficulty-targeted online data selection and rollout replay. arXiv preprint arXiv:2506.05316, 2025. +[702] Yuhong Sun, Zhangyue Yin, Xuanjing Huang, Xipeng Qiu, and Hui Zhao. Error classification of large language models on math word problems: A dynamically adaptive framework. arXiv preprint arXiv:2501.15581, 2025. +[703] Zhongxiang Sun, Qipeng Wang, Weijie Yu, Xiaoxue Zang, Kai Zheng, Jun Xu, Xiao Zhang, Song Yang, and Han Li. Rearter: Retrieval-augmented reasoning with trustworthy process rewarding. arXiv preprint arXiv:2501.07861, 2025. +[704] Richard S Sutton, David McAllester, Satinder Singh, and Yishay Mansour. Policy gradient methods for reinforcement learning with function approximation. In S. Solla, T. Leen, and K. Müller, editors, Advances in Neural Information Processing Systems, volume 12. MIT Press, November 1999. URL https://proceedings.neurips.cc/paper_files/paper/1999/file/464d828b85b0bed98e80ade0a5c43b0f-Paper.pdf. +[705] Mirac Suzgun, Nathan Scales, Nathanael Schärli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc Le, Ed Chi, Denny Zhou, and Jason Wei. Challenging BIG-bench tasks and whether chain-of-thought can solve them. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Findings of the Association for Computational Linguistics: ACL 2023, pages 13003-13051, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.824. URL https://aclanthology.org/2023-findings-acl.824/. +[706] Jihoon Tack, Jack Lanchantin, Jane Yu, Andrew Cohen, Ilia Kulikov, Janice Lan, Shibo Hao, Yuandong Tian, Jason Weston, and Xian Li. Llm pretraining with continuous concepts. arXiv preprint arXiv:2502.08524, 2025. +[707] Huajie Tan, Yuheng Ji, Xiaoshuai Hao, Minglan Lin, Pengwei Wang, Zhongyuan Wang, and Shanghang Zhang. Reason-rft: Reinforcement fine-tuning for visual reasoning. arXiv preprint arXiv:2503.20752, 2025. + +[708] Juanhe (TJ) Tan. Causal abstraction for chain-of-thought reasoning in arithmetic word problems. In Yonatan Belinkov, Sophie Hao, Jaap Jumelet, Najoung Kim, Arya McCarthy, and Hosein Mohebbi, editors, Proceedings of the 6th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP, pages 155–168, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.blackboxnlp-1.12. URL https://aclanthology.org/2023.blackboxnlp-1.12. +[709] Sijun Tan, Siyuan Zhuang, Kyle Montgomery, William Y Tang, Alejandro Cuadron, Chenguang Wang, Raluca Ada Popa, and Ion Stoica. Judgebench: A benchmark for evaluating llm-based judges. arXiv preprint arXiv:2410.12784, 2024. +[710] Xiaoyu Tan, Tianchu Yao, Chao Qu, Bin Li, Minghao Yang, Dakuan Lu, Haozhe Wang, Xihe Qiu, Wei Chu, Yinghui Xu, et al. Aurora: Automated training framework of universal process reward models via ensemble prompting and reverse verification. arXiv preprint arXiv:2502.11520, 2025. +[711] Kexian Tang, Junyao Gao, Yanhong Zeng, Haodong Duan, Yanan Sun, Zhening Xing, Wenran Liu, Kaifeng Lyu, and Kai Chen. Lego-puzzles: How good are mllms at multi-step spatial reasoning? arXiv preprint arXiv:2503.19990, 2025. +[712] Yihong Tang, Kehai Chen, Muyun Yang, Zhengyu Niu, Jing Li, Tiejun Zhao, and Min Zhang. Thinking in character: Advancing role-playing agents with role-aware reasoning. arXiv preprint arXiv:2506.01748, 2025. +[713] Zhengyang Tang, Ziniu Li, Zhenyang Xiao, Tian Ding, Ruoyu Sun, Benyou Wang, Dayiheng Liu, Fei Huang, Tianyu Liu, Bowen Yu, et al. Enabling scalable oversight via self-evolving critic. arXiv preprint arXiv:2501.05727, 2025. +[714] Zhengyang Tang, Ziniu Li, Zhenyang Xiao, Tian Ding, Ruoyu Sun, Benyou Wang, Dayiheng Liu, Fei Huang, Tianyu Liu, Bowen Yu, et al. Realcritic: Towards effectiveness-driven evaluation of language model critiques. arXiv preprint arXiv:2501.14492, 2025. +[715] Sree Harsha Tanneru, Dan Ley, Chirag Agarwal, and Himabindu Lakkaraju. On the hardness of faithful chain-of-thought reasoning in large language models. arXiv preprint arXiv:2406.10625, 2024. +[716] Amir Taubenfeld, Tom Sheffer, Eran Ofek, Amir Feder, Ariel Goldstein, Zorik Gekhman, and Gal Yona. Confidence improves self-consistency in llms. arXiv preprint arXiv:2502.06233, 2025. +[717] DolphinR1 Team. Dolphin R1. https://huggingface.co/datasets/cognitivecomputations/dolphin-r1, February 2025. +[718] Fancy-MLLM Team. R1 Onevision. https://huggingface.co/datasets/Fancy-MLLM/R1-Onevision, February 2025. +[719] Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024. +[720] Gemma Team, Morgane Riviere, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, Léonard Hussenot, Thomas Mesnard, Bobak Shahriari, Alexandre Ramé, et al. Gemma 2: Improving open language models at a practical size. arXiv preprint arXiv:2408.00118, 2024. +[721] Huggingface Team. Open r1. https://github.com/huggingface/open-r1, January 2025. +[722] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. +[723] NovaSky Team. Think less, achieve more: Cut reasoning costs by $50\%$ without sacrificing accuracy. https://novasky-ai.github.io/posts/reduce-overthinking, January 2025. Accessed: 2025-01-23. +[724] NovaSky Team. Sky-t1: Train your own o1 preview model within $ 450. https://novaskyai.github.io/posts/sky-t1, January 2025. Accessed: 2025-01-09. +[725] NVIDIA Team. Mistral-nemo-12b-instruct. https://huggingface.co/nvidia/Mistral-NeMo-12B-Instruct, July 2024. + +[726] OpenDeepResearch Team. Open deep research. https://github.com/nickscamara/open-deepresearch, February 2025. +[727] OpenO1 Team. Open o1. https://github.com/Open-Source-O1/Open-O1, February 2025. +[728] OpenR1 Team. Open r1 math 200k. https://huggingface.co/datasets/open-r1/OpenR1-Math-220k, February 2025. +[729] OpenThoughts Team. Open Thoughts. https://open-thoughts.ai, January 2025. +[730] PowerInfer Team. QwQ LongCoT 500k. https://huggingface.co/datasets/PowerInfer/QWQ-LONGCOT-500K, January 2025. +[731] QwQ Team. Qwq: Reflect deeply on the boundaries of the unknown. https://qwenlm.github.io/blog/qwq-32b-preview/, November 2025. +[732] X-R1 Team. X-r1. https://github.com/dhcode-cpp/X-R1, February 2025. +[733] Fengwei Teng, Zhaoyang Yu, Quan Shi, Jiayi Zhang, Chenglin Wu, and Yuyu Luo. Atom of thoughts for markov ltm test-time scaling. arXiv preprint arXiv:2502.12018, 2025. +[734] Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, et al. Llamav-o1: Rethinking step-by-step visual reasoning in llms. arXiv preprint arXiv:2501.06186, 2025. +[735] George Thomas, Alex J Chan, Jikun Kang, Wenqi Wu, Filippos Christianos, Fraser Greenlee, Andy Toulis, and Marvin Purtorab. Webgames: Challenging general-purpose web-browsing ai agents. arXiv preprint arXiv:2502.18356, 2025. +[736] Xiaoyu Tian, Sitong Zhao, Haotian Wang, Shuaiang Chen, Yunjie Ji, Yiping Peng, Han Zhao, and Xiangang Li. Think twice: Enhancing lIm reasoning by scaling multi-round test-time thinking. arXiv preprint arXiv:2503.19855, 2025. +[737] Ye Tian, Baolin Peng, Linfeng Song, Lifeng Jin, Dian Yu, Lei Han, Haitao Mi, and Dong Yu. Toward self-improvement of llms via imagination, searching, and criticizing. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 52723-52748. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/5e5853f35164e434015716a8c2a66543-Paper-Conference.pdf. +[738] Yuxuan Tong, Xiwen Zhang, Rui Wang, Ruidong Wu, and Junxian He. Dart-math: Difficulty-aware rejection tuning for mathematical problem-solving. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 7821-7846. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/0ef1afa0daa888d695dcd5e9513bafa3-Paper-Conference.pdf. +[739] Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. Openmathinstruct-2: Accelerating ai for math with massive open-source instruction data. arXiv preprint arXiv:2410.01560, 2024. +[740] Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. Openmathinstruct-2: Accelerating ai for math with massive open-source instruction data. arXiv preprint arXiv:2410.01560, 2024. +[741] Shubham Toshniwal, Ivan Moshkov, Sean Naresthiran, Daria Gitman, Fei Jia, and Igor Gitman. Openmathinstruct-1: A 1.8 million math instruction tuning dataset. arXiv preprint arXiv: Arxiv-2402.10176, 2024. +[742] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. +[743] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. +[744] Christoph Treude and Raula Gaikovina Kula. Interacting with ai reasoning models: Harnessing "thoughts" for ai-driven software engineering. arXiv preprint arXiv:2503.00483, 2025. + +[745] Luong Trung, Xinbo Zhang, Zhanming Jie, Peng Sun, Xiaoran Jin, and Hang Li. ReFT: Reasoning with reinforced fine-tuning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 7601–7614, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.410. URL https://aclanthology.org/2024.acl-long.410/. +[746] Songjun Tu, Jiahao Lin, Qichao Zhang, Xiangyu Tian, Linjing Li, Xiangyuan Lan, and Dongbin Zhao. Learning when to think: Shaping adaptive reasoning in r1-style models via multi-stage rl. arXiv preprint arXiv:2505.10832, 2025. +[747] Benjamin Turtel, Danny Franklin, and Philipp Schoenegger. Llms can teach themselves to better predict the future. arXiv preprint arXiv:2502.05253, 2025. +[748] Martin Tutek, Fateme Hashemi Chaleshtori, Ana Marasović, and Yonatan Belinkov. Measuring faithfulness of chains of thought by unlearning reasoning steps. arXiv preprint arXiv:2502.14829, 2025. +[749] Jonathan Uesato, Nate Kushner, Ramana Kumar, Francis Song, Noah Siegel, Lisa Wang, Antonia Creswell, Geoffrey Irving, and Irina Higgins. Solving math word problems with process- and outcome-based feedback. arXiv preprint arXiv:2211.14275, 2022. +[750] Robert Vacareanu, Anurag Pratik, Evangelia Spiliopoulou, Zheng Qi, Giovanni Paolini, Neha Anna John, Jie Ma, Yassine Benajiba, and Miguel Ballesteros. General purpose verification for chain of thought prompting. arXiv preprint arXiv:2405.00204, 2024. +[751] Karthik Valmeekam, Kaya Stechly, and Subbarao Kambhampati. LLMs still can't plan; can LRMs? a preliminary evaluation of openAI's o1 on planbench. In NeurIPS 2024 Workshop on Open-World Agents, October 2024. URL https://openreview.net/forum?id=Gcr1Lx4Koz. +[752] Jean Vassoyan, Nathanaël Beau, and Roman Plaud. Ignore the kl penalty! boosting exploration on critical tokens to enhance rl fine-tuning. arXiv preprint arXiv:2502.06533, 2025. +[753] Tu Vu, Kalpesh Krishna, Salaheddin Alzubi, Chris Tar, Manaal Faruqui, and Yun-Hsuan Sung. Foundational autorators: Taming large language models for better automatic evaluation. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 17086-17105, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10. 18653/v1/2024.emnlp-main.949. URL https://aclanthology.org/2024.emnlp-main.949/. +[754] Guangya Wan, Yuqi Wu, Jie Chen, and Sheng Li. Cot rerailer: Enhancing the reliability of large language models in complex reasoning tasks through error detection and correction. arXiv preprint arXiv:2408.13940, 2024. +[755] Ziyu Wan, Xidong Feng, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. Alphazero-like tree-search can guide large language model decoding and training. In *Forty-first International Conference on Machine Learning*, May 2024. URL https://openreview.net/forum?id=C4OpREezgj. +[756] Ziyu Wan, Yunxiang Li, Yan Song, Hanjing Wang, Linyi Yang, Mark Schmidt, Jun Wang, Weinan Zhang, Shuyue Hu, and Ying Wen. Rema: Learning to meta-think for llms with multi-agent reinforcement learning. arXiv preprint arXiv:2503.09501, 2025. +[757] Ante Wang, Linfeng Song, Ye Tian, Baolin Peng, Dian Yu, Haitao Mi, Jinsong Su, and Dong Yu. Litesearch: Efficacious tree search for lIm. arXiv preprint arXiv:2407.00320, 2024. +[758] Ante Wang, Linfeng Song, Ye Tian, Dian Yu, Haitao Mi, Xiangyu Duan, Zhaopeng Tu, Jinsong Su, and Dong Yu. Don't get lost in the trees: Streamlining llm reasoning by overcoming tree search exploration pitfalls. arXiv preprint arXiv:2502.11183, 2025. +[759] Boshi Wang, Sewon Min, Xiang Deng, Jiaming Shen, You Wu, Luke Zettlemoyer, and Huan Sun. Towards understanding chain-of-thought prompting: An empirical study of what matters. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2717–2739, Toronto, Canada, July 2023. Association for Computational + +Linguistics. doi: 10.18653/v1/2023.acl-long.153. URL https://aclanthology.org/2023.acl-long.153/. +[760] Chao Wang, Luning Zhang, Zheng Wang, and Yang Zhou. Can large language models unveil the mysteries? an exploration of their ability to unlock information in complex scenarios. arXiv preprint arXiv:2502.19973, 2025. +[761] Chaojie Wang, Yanchen Deng, Zhiyi Lyu, Liang Zeng, Jujie He, Shuicheng Yan, and Bo An. Q*: Improving multi-step reasoning for llms with deliberative planning. arXiv preprint arXiv:2406.14283, 2024. +[762] Chenlong Wang, Yuanning Feng, Dongping Chen, Zhaoyang Chu, Ranjay Krishna, and Tianyi Zhou. Wait, we don't need to" wait!! removing thinking tokens improves reasoning efficiency. arXiv preprint arXiv:2506.08343, 2025. +[763] Clinton J Wang, Dean Lee, Cristina Menghini, Johannes Mols, Jack Doughty, Adam Khoja, Jayson Lynch, Sean Hendryx, Summer Yue, and Dan Hendrycks. Enigmaeval: A benchmark of long multimodal reasoning challenges. arXiv preprint arXiv:2502.08859, 2025. +[764] Danqing Wang, Zhuorui Ye, Fei Fang, and Lei Li. Cooperative strategic planning enhances reasoning capabilities in large language models. arXiv preprint arXiv:2410.20007, 2024. +[765] Evan Z Wang, Federico Cassano, Catherine Wu, Yunfeng Bai, William Song, Vaskar Nath, Ziwen Han, Sean M. Hendryx, Summer Yue, and Hugh Zhang. Planning in natural language improves LLM search for code generation. In The First Workshop on System-2 Reasoning at Scale, NeurIPS'24, October 2024. URL https://openreview.net/forum?id=B2iSfPNj49. +[766] Guoxin Wang, Minyu Gao, Shuai Yang, Ya Zhang, Lizhi He, Liang Huang, Hanlin Xiao, Yexuan Zhang, Wanyue Li, Lu Chen, et al. Citrus: Leveraging expert cognitive pathways in a medical language model for advanced medical decision support. arXiv preprint arXiv:2502.18274, 2025. +[767] Hanbin Wang, Xiaoxuan Zhou, Zhipeng Xu, Keyuan Cheng, Yuxin Zuo, Kai Tian, Jingwei Song, Junting Lu, Wenhui Hu, and Xueyang Liu. Code-vision: Evaluating multimodal llms logic understanding and code generation capabilities. arXiv preprint arXiv:2502.11829, 2025. +[768] Hanlin Wang, Jian Wang, Chak Tou Leong, and Wenjie Li. Steca: Step-level trajectory calibration for lIm agent learning. arXiv preprint arXiv:2502.14276, 2025. +[769] Hanyin Wang, Zhenbang Wu, Gururaj Kolar, Hariprasad Korsapati, Brian Bartlett, Bryan Hull, and Jimeng Sun. Reinforcement learning for out-of-distribution reasoning in llms: An empirical study on diagnosis-related group coding. arXiv preprint arXiv:2505.21908, 2025. +[770] Hao Wang, Boyi Liu, Yufeng Zhang, and Jie Chen. Seed-cts: Unleashing the power of tree search for superior performance in competitive coding tasks. arXiv preprint arXiv:2412.12544, 2024. +[771] Haoxiang Wang, Wei Xiong, Tengyang Xie, Han Zhao, and Tong Zhang. Interpretable preferences via multi-objective reward modeling and mixture-of-experts. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 10582-10592, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.620. URL https://aclanthology.org/2024/findings-emnlp.620/. +[772] Haoyu Wang, Zeyu Qin, Li Shen, Xueqian Wang, Minhao Cheng, and Dacheng Tao. Leveraging reasoning with guidelines to elicit and utilize knowledge for enhancing safety alignment. arXiv preprint arXiv:2502.04040, 2025. +[773] Huaijie Wang, Shibo Hao, Hanze Dong, Shenao Zhang, Yilin Bao, Ziran Yang, and Yi Wu. Offline reinforcement learning for llm multi-step reasoning. arXiv preprint arXiv:2412.16145, 2024. +[774] Jiaan Wang, Fandong Meng, Yunlong Liang, and Jie Zhou. Drt-o1: Optimized deep reasoning translation via long chain-of-thought. arXiv preprint arXiv:2412.17498, 2024. +[775] Jiaan Wang, Fandong Meng, and Jie Zhou. Extrans: Multilingual deep reasoning translation via exemplar-enhanced reinforcement learning. arXiv preprint arXiv:2505.12996, 2025. + +[776] Jiaqi WANG, Yuhang Zhou, Zhixiong Zhang, Qiguang Chen, Yongqiang Chen, and James Cheng. DivIL: Unveiling and addressing over-invariance for out-of-distribution generalization. Transactions on Machine Learning Research, February 2025. ISSN 2835-8856. URL https://openreview.net/forum?id=2Zan4ATYsh. +[777] Jun Wang, Meng Fang, Ziyu Wan, Muning Wen, Jiachen Zhu, Anjie Liu, Ziqin Gong, Yan Song, Lei Chen, Lionel M Ni, et al. Openr: An open source framework for advanced reasoning with large language models. arXiv preprint arXiv:2410.09671, 2024. +[778] Junlin Wang, Jue Wang, Ben Athiwaratkun, Ce Zhang, and James Zou. Mixture-of-agents enhances large language model capabilities. arXiv preprint arXiv:2406.04692, 2024. +[779] Junxiong Wang, Wen-Ding Li, Daniele Paliotta, Daniel Ritter, Alexander M Rush, and Tri Dao. M1: Towards scalable test-time compute with mamba reasoning models. arXiv preprint arXiv:2504.10449, 2025. +[780] Junyang Wang, Haiyang Xu, Xi Zhang, Ming Yan, Ji Zhang, Fei Huang, and Jitao Sang. Mobile-agent-v: Learning mobile device operation through video-guided multi-agent collaboration. arXiv preprint arXiv:2502.17110, 2025. +[781] Ke Wang, Houxing Ren, Aojun Zhou, Zimu Lu, Sichun Luo, Weikang Shi, Renrui Zhang, Linqi Song, Mingjie Zhan, and Hongsheng Li. Mathcoder: Seamless code integration in llms for enhanced mathematical reasoning. arXiv preprint arXiv:2310.03731, 2023. +[782] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Houxing Ren, Aojun Zhou, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with MATH-vision dataset. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=QWTCcxMpPA. +[783] Ke Wang, Houxing Ren, Aojun Zhou, Zimu Lu, Sichun Luo, Weikang Shi, Renrui Zhang, Linqi Song, Mingjie Zhan, and Hongsheng Li. Mathcoder: Seamless code integration in LLMs for enhanced mathematical reasoning. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=z8TW0ttBPp. +[784] Kevin Wang, Junbo Li, Neel P Bhatt, Yihan Xi, Qiang Liu, Ufuk Topcu, and Zhangyang Wang. On the planning abilities of openai's o1 models: Feasibility, optimality, and generalizability. arXiv preprint arXiv:2409.19924, 2024. +[785] Kun Wang, Guibin Zhang, Zhenhong Zhou, Jiahao Wu, Miao Yu, Shiqian Zhao, Chenlong Yin, Jinhu Fu, Yibo Yan, Hanjun Luo, et al. A comprehensive survey in llm (-agent) full stack safety: Data, training and deployment. arXiv preprint arXiv:2504.15585, 2025. +[786] Liang Wang, Haonan Chen, Nan Yang, Xiaolong Huang, Zhicheng Dou, and Furu Wei. Chain-of-retrieval augmented generation. arXiv preprint arXiv:2501.14342, 2025. +[787] Libo Wang. Dynamic chain-of-thought: Towards adaptive deep reasoning. arXiv preprint arXiv:2502.10428, 2025. +[788] Mengru Wang, Xingyu Chen, Yue Wang, Zhiwei He, Jiahao Xu, Tian Liang, Qizhhi Liu, Yunzhi Yao, Wenxuan Wang, Ruotian Ma, et al. Two experts are all you need for steering thinking: Reinforcing cognitive effort in moe reasoning models without additional training. arXiv preprint arXiv:2505.14681, 2025. +[789] Mingyang Wang, Lukas Lange, Heike Adel, Yunpu Ma, Jannik Strötgen, and Hinrich Schütze. Language mixing in reasoning language models: Patterns, impact, and internal causes. arXiv preprint arXiv:2505.14815, 2025. +[790] Minzheng Wang, Yongbin Li, Haobo Wang, Xinghua Zhang, Nan Xu, Bingli Wu, Fei Huang, Haiyang Yu, and Wenji Mao. Adaptive thinking via mode policy optimization for social language agents. arXiv preprint arXiv:2505.02156, 2025. +[791] Peifeng Wang, Austin Xu, Yilun Zhou, Caiming Xiong, and Shafiq Joty. Direct judgement preference optimization. arXiv preprint arXiv:2409.14664, 2024. +[792] Peiyi Wang, Lei Li, Zhihong Shao, Runxin Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. Math-shepherd: Verify and reinforce LLMs step-by-step without human annotations. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long + +Papers), pages 9426-9439, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.510. URL https://aclanthology.org/2024.acl-long.510/. +[793] Peng Wang, Xuesi Hu, Jiageng Wu, Yuntao Zou, Qiancheng Zhang, and Dagang Li. What factors affect llms and rllms in financial question answering? arXiv preprint arXiv:2507.08339, 2025. +[794] Peng Wang, Ruihan Tao, Qiguang Chen, Mengkang Hu, and Libo Qin. X-webagentbench: A multilingual interactive web benchmark for evaluating global agentic system. arXiv preprint arXiv:2505.15372, 2025. +[795] Peng-Yuan Wang, Tian-Shuo Liu, Chenyang Wang, Yi-Di Wang, Shu Yan, Cheng-Xing Jia, Xu-Hui Liu, Xin-Wei Chen, Jia-Cheng Xu, Ziniu Li, et al. A survey on large language models for mathematical reasoning. arXiv preprint arXiv:2506.08446, 2025. +[796] Ru Wang, Wei Huang, Selena Song, Haoyu Zhang, Yusuke Iwasawa, Yutaka Matsuo, and Jiaxian Guo. Beyond in-distribution success: Scaling curves of cot granularity for language model generalization. arXiv preprint arXiv:2502.18273, 2025. +[797] Ruida Wang, Rui Pan, Yuxin Li, Jipeng Zhang, Yizhen Jia, Shizhe Diao, Renjie Pi, Junjie Hu, and Tong Zhang. Ma-lot: Model-collaboration lean-based long chain-of-thought reasoning enhances formal theorem proving. arXiv preprint arXiv:2503.03205, 2025. +[798] Ruoyao Wang, Peter Jansen, Marc-Alexandre Côté, and Prithviraj Ammanabrolu. Science-World: Is your agent smarter than a 5th grader? In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang, editors, Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 11279–11298, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.775. URL https://aclanthology.org/2022.emnlp-main.775/. +[799] Siyuan Wang, Enda Zhao, Zhongyu Wei, and Xiang Ren. Stepwise informativeness search for improving llm reasoning. arXiv preprint arXiv:2502.15335, 2025. +[800] Song Wang, Gongfan Fang, Lingdong Kong, Xiangtai Li, Jianyun Xu, Sheng Yang, Qiang Li, Jianke Zhu, and Xinchao Wang. Pixelthink: Towards efficient chain-of-pixel reasoning. arXiv preprint arXiv:2505.23727, 2025. +[801] Tianlong Wang, Junzhe Chen, Xueting Han, and Jing Bai. Cpl: Critical plan step learning boosts llm generalization in reasoning tasks. arXiv preprint arXiv:2409.08642, 2024. +[802] Tianlu Wang, Ping Yu, Xiaoqing Ellen Tan, Sean O'Brien, Ramakanth Pasunuru, Jane Dwivedi-Yu, Olga Golovneva, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. Shepherd: A critic for language model generation. arXiv preprint arXiv:2308.04592, 2023. +[803] Tianlu Wang, Ilia Kulikov, Olga Golovneva, Ping Yu, Weizhe Yuan, Jane Dwivedi-Yu, Richard Yuanzhe Pang, Maryam Fazel-Zarandi, Jason Weston, and Xian Li. Self-taught evaluators. arXiv preprint arXiv:2408.02666, 2024. +[804] Weixuan Wang, Minghao Wu, Barry Haddow, and Alexandra Birch. Demystifying multilingual chain-of-thought in process reward modeling. arXiv preprint arXiv:2502.12663, 2025. +[805] Weixun Wang, Shaopan Xiong, Gengru Chen, Wei Gao, Sheng Guo, Yancheng He, Ju Huang, Jiaheng Liu, Zhendong Li, Xiaoyang Li, et al. Reinforcement learning optimization for large-scale learning: An efficient and user-friendly scaling library. arXiv preprint arXiv:2506.06122, 2025. +[806] Weiyun Wang, Zhe Chen, Wenhai Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Jinguo Zhu, Xizhou Zhu, Lewei Lu, Yu Qiao, et al. Enhancing the reasoning ability of multimodal large language models via mixed preference optimization. arXiv preprint arXiv:2411.10442, 2024. +[807] Weiyun Wang, Zhangwei Gao, Lianjie Chen, Zhe Chen, Jinguo Zhu, Xiangyu Zhao, Yangzhou Liu, Yue Cao, Shenglong Ye, Xizhou Zhu, et al. Visualprm: An effective process reward model for multimodal reasoning. arXiv preprint arXiv:2503.10291, 2025. + +[808] Xiaoqiang Wang, Suyuchen Wang, Yun Zhu, and Bang Liu. System-1.5 reasoning: Traversal in language and latent spaces with dynamic shortcuts. arXiv preprint arXiv:2505.18962, 2025. +[809] Xiaoxuan Wang, Yihe Deng, Mingyu Derek Ma, and Wei Wang. Entropy-based adaptive weighting for self-training. arXiv preprint arXiv:2503.23913, 2025. +[810] Xinyi Wang, Lucas Caccia, Oleksiy Ostapenko, Xingdi Yuan, William Yang Wang, and Alessandro Sordoni. Guiding language model reasoning with planning tokens. arXiv preprint arXiv:2310.05707, 2023. +[811] Xinyi Wang, Alfonso Amayuelas, Kexun Zhang, Liangming Pan, Wenhu Chen, and William Yang Wang. Understanding reasoning ability of language models from the perspective of reasoning paths aggregation. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 50026-50042. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/wang24a.html. +[812] Xinyi Wang, Shawn Tan, Mingyu Jin, William Yang Wang, Rameswar Panda, and Yikang Shen. Do larger language models imply better reasoning? a pretraining scaling law for reasoning. arXiv preprint arXiv:2504.03635, 2025. +[813] Xiyao Wang, Jiuhai Chen, Zhaoyang Wang, Yuhang Zhou, Yiyang Zhou, Huaxiu Yao, Tianyi Zhou, Tom Goldstein, Parminder Bhatia, Furong Huang, et al. Enhancing visual-language modality alignment in large vision language models via self-improvement. arXiv preprint arXiv:2405.15973, 2024. +[814] Xiyao Wang, Linfeng Song, Ye Tian, Dian Yu, Baolin Peng, Haitao Mi, Furong Huang, and Dong Yu. Towards self-improvement of llms via mcts: Leveraging stepwise knowledge with curriculum preference learning. arXiv preprint arXiv:2410.06508, 2024. +[815] Xuezhi Wang and Denny Zhou. Chain-of-thought reasoning without prompting. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=4Zt7S0B0Jp. +[816] Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, February 2023. URL https://openreview.net/forum?id=1PL1NIMMrw. +[817] Yao Wang, Mingxuan Cui, and Arthur Jiang. Enabling ai scientists to recognize innovation: A domain-agnostic algorithm for assessing novelty. arXiv preprint arXiv:2503.01508, 2025. +[818] Yifei Wang, Yuyang Wu, Zeming Wei, Stefanie Jegelka, and Yisen Wang. A theoretical understanding of self-correction through in-context alignment. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=OtvNLTWYww. +[819] Yiqun Wang, Sile Hu, Yonggang Zhang, Xiang Tian, Xuesong Liu, Yaowu Chen, Xu Shen, and Jieping Ye. How large language models implement chain-of-thought? September 2023. URL https://openreview.net/pdf?id=b2XfOm3RJa. +[820] Yu Wang, Nan Yang, Liang Wang, and Furu Wei. Examining false positives under inference scaling for mathematical reasoning. arXiv preprint arXiv:2502.06217, 2025. +[821] Yubo Wang, Xueguang Ma, Ge Zhang, Yuansheng Ni, Abhranil Chandra, Shiguang Guo, Weiming Ren, Aaran Arulraj, Xuan He, Ziyan Jiang, Tianle Li, Max Ku, Kai Wang, Alex Zhuang, Rongqi Fan, Xiang Yue, and Wenhu Chen. MMLU-pro: A more robust and challenging multi-task language understanding benchmark. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=y10DM6R2r3. +[822] Yubo Wang, Xiang Yue, and Wenhu Chen. Critique fine-tuning: Learning to critique is more effective than learning to imitate. arXiv preprint arXiv:2501.17703, 2025. +[823] Yue Wang, Qiuzhi Liu, Jiahao Xu, Tian Liang, Xingyu Chen, Zhiwei He, Linfeng Song, Dian Yu, Juntao Li, Zhuosheng Zhang, et al. Thoughts are all over the place: On the underthinking of o1-like llms. arXiv preprint arXiv:2501.18585, 2025. + +[824] Yuhang Wang, Youhe Jiang, Bin Cui, and Fangcheng Fu. Thinking short and right over thinking long: Serving lmm reasoning efficiently and accurately. arXiv preprint arXiv:2505.13326, 2025. +[825] Zengzhi Wang, Fan Zhou, Xuefeng Li, and Pengfei Liu. Octothinker: Mid-training incentivizes reinforcement learning scaling. arXiv preprint arXiv:2506.20512, 2025. +[826] Zhaoyang Wang, Weilei He, Zhiyuan Liang, Xuchao Zhang, Chetan Bansal, Ying Wei, Weitong Zhang, and Huaxiu Yao. Cream: Consistency regularized self-rewarding language models. In Neurips Safe Generative AI Workshop 2024, October 2024. URL https://openreview.net/forum?id=oaWajnM93y. +[827] Zhengren Wang, Jiayang Yu, Dongsheng Ma, Zhe Chen, Yu Wang, Zhiyu Li, Feiyu Xiong, Yanfeng Wang, Linpeng Tang, Wentao Zhang, et al. Rare: Retrieval-augmented reasoning modeling. arXiv preprint arXiv:2503.23513, 2025. +[828] Zhenhailong Wang, Haiyang Xu, Junyang Wang, Xi Zhang, Ming Yan, Ji Zhang, Fei Huang, and Heng Ji. Mobile-agent-e: Self-evolving mobile assistant for complex tasks. arXiv preprint arXiv:2501.11733, 2025. +[829] Zhilin Wang, Yi Dong, Olivier Delalleau, Jiaqi Zeng, Gerald Shen, Daniel Egert, Jimmy J. Zhang, Makes Narsimhan Sreedhar, and Oleksii Kuchaiev. Helpsteer 2: Open-source dataset for training top-performing reward models. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=PvVKUFhaNy. +[830] Zhongsheng Wang, Jiamou Liu, Qiming Bao, Hongfei Rong, and Jingfeng Zhang. Chatlogic: Integrating logic programming with large language models for multi-step reasoning. In Neuro-Symbolic Learning and Reasoning in the era of Large Language Models, December 2023. URL https://openreview.net/forum?id=AOqGF7Po7Z. +[831] Zihan Wang, Yunxuan Li, Yuexin Wu, Liangchen Luo, Le Hou, Hongkun Yu, and Jingbo Shang. Multi-step problem solving through a verifier: An empirical analysis on model-induced process supervision. arXiv preprint arXiv:2402.02658, 2024. +[832] Zixiao Wang, Yuxin Wang, Xiaorui Wang, Mengting Xing, Jie Gao, Jianjun Xu, Guangcan Liu, Chenhui Jin, Zhuo Wang, Shengzhuo Zhang, et al. Test-time scaling with reflective generative model. arXiv preprint arXiv:2507.01951, 2025. +[833] Anjiang Wei, Jiannan Cao, Ran Li, Hongyu Chen, Yuhui Zhang, Ziheng Wang, Yaofeng Sun, Yuan Liu, Thiago SFX Teixeira, Diyi Yang, et al. Equibench: Benchmarking code reasoning capabilities of large language models via equivalence checking. arXiv preprint arXiv:2502.12466, 2025. +[834] Hao Wei. Medthoughts-8k: A medical question answering dataset, feb 2025. URL https://huggingface.co/datasets/hw-hwei/MedThoughts-8K. +[835] Haoran Wei, Youyang Yin, Yumeng Li, Jia Wang, Liang Zhao, Jianjian Sun, Zheng Ge, and Xiangyu Zhang. Slow perception: Let's perceive geometric figures step-by-step. arXiv preprint arXiv:2412.20631, 2024. +[836] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed Chi, Quoc V Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh, editors, Advances in Neural Information Processing Systems, volume 35, pages 24824-24837. Curran Associates, Inc., November 2022. URL https://proceedings.neurips.cc/paper_files/paper/2022/file/9d5609613524ecf4f15af0f7b31abca4-Paper-Conference.pdf. +[837] Shuyue Wei, Yongxin Tong, Zimu Zhou, Yi Xu, Jingkai Gao, Tongyu Wei, Tianran He, and Weifeng Lv. Federated reasoning llms: a survey. Frontiers of Computer Science, 19(12): 1-23, jun 2025. +[838] Ting-Ruen Wei, Haowei Liu, Xuyang Wu, and Yi Fang. A survey on feedback-based multi-step reasoning for large language models on mathematics. arXiv preprint arXiv:2502.14333, 2025. +[839] Yana Wei, Liang Zhao, Jianjian Sun, Kangheng Lin, Jisheng Yin, Jingcheng Hu, Yinmin Zhang, En Yu, Haoran Lv, Zejia Weng, et al. Open vision reasoner: Transferring linguistic cognitive behavior for visual reasoning. arXiv preprint arXiv:2507.05255, 2025. + +[840] Yongxian Wei, Anke Tang, Li Shen, Zixuan Hu, Chun Yuan, and Xiaochun Cao. Modeling multi-task model merging as adaptive projective gradient descent. arXiv preprint arXiv:2501.01230, 2025. +[841] Yuxiang Wei, Olivier Duchenne, Jade Copet, Quentin Carbonneaux, Lingming Zhang, Daniel Fried, Gabriel Synnaeve, Rishabh Singh, and Sida I. Wang. Swe-rl: Advancing llm reasoning via reinforcement learning on open software evolution. arXiv preprint arXiv:2502.18449, 2025. +[842] Nathaniel Weir, Muhammad Khalifa, Linlu Qiu, Orion Weller, and Peter Clark. Learning to reason via program generation, emulation, and search. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=te6VagJf6G. +[843] Sean Welleck, Amanda Bertsch, Matthew Finlayson, Hailey Schoelkopf, Alex Xie, Graham Neubig, Ilia Kulikov, and Zaid Harchaoui. From decoding to meta-generation: Inference-time algorithms for large language models. Transactions on Machine Learning Research, November 2024. ISSN 2835-8856. URL https://openreview.net/forum?id= eskQMcIbMS. Survey Certification. +[844] Cheng Wen, Tingwei Guo, Shuaijiang Zhao, Wei Zou, and Xiangang Li. Sari: Structured audio reasoning via curriculum-guided reinforcement learning. arXiv preprint arXiv:2504.15900, 2025. +[845] Jiaxin Wen, Jian Guan, Hongning Wang, Wei Wu, and Minlie Huang. Codeplan: Unlocking reasoning potential in large language models by scaling code-form planning. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=dCPF1wlqj8. +[846] Kaiyue Wen, Huaqing Zhang, Hongzhou Lin, and Jingzhao Zhang. From sparse dependence to sparse attention: Unveiling how chain-of-thought enhances transformer sample efficiency. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=AmEgWDhmTr. +[847] Xumeng Wen, Zihan Liu, Shun Zheng, Zhijian Xu, Shengyu Ye, Zhirong Wu, Xiao Liang, Yang Wang, Junjie Li, Ziming Miao, et al. Reinforcement learning with verifiable rewards implicitly incentivizes correct reasoning in base llms. arXiv preprint arXiv:2506.14245, 2025. +[848] Yixuan Weng, Minjun Zhu, Fei Xia, Bin Li, Shizhu He, Shengping Liu, Bin Sun, Kang Liu, and Jun Zhao. Large language models are better reasoners with self-verification. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 2550–2575, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.167. URL https://aclanthology.org/2023-findings-emnlp.167/. +[849] Jason Weston and Sainbayar Sukhbaatar. System 2 attention (is something you might need too). arXiv preprint arXiv:2311.11829, 2023. +[850] Colin White, Samuel Dooley, Manley Roberts, Arka Pal, Benjamin Feuer, Siddhartha Jain, Ravid Shwartz-Ziv, Neel Jain, Khalid Saifullah, Sreemanti Dey, Shubh-Agrawal, Sandeep Singh Sandha, Siddartha Venkat Naidu, Chinmay Hegde, Yann LeCun, Tom Goldstein, Willie Neiswanger, and Micah Goldblum. Livebench: A challenging, contamination-limited LLM benchmark. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=sKYHBTAxVa. +[851] Yotam Wolf, Binyamin Rothberg, Dorin Shteyman, and Amnon Shashua. Compositional hardness of code in large language models—a probabilistic perspective. arXiv preprint arXiv:2409.18028, 2024. +[852] Chengyue Wu, Yixiao Ge, Qiushan Guo, Jiahao Wang, Zhixuan Liang, Zeyu Lu, Ying Shan, and Ping Luo. Plot2code: A comprehensive benchmark for evaluating multi-modal large language models in code generation from scientific plots. arXiv preprint arXiv:2405.07990, 2024. +[853] Jinyang Wu, Mingkuan Feng, Shuai Zhang, Feihu Che, Zengqi Wen, and Jianhua Tao. Beyond examples: High-level automated reasoning paradigm in in-context learning via mcts. arXiv preprint arXiv:2411.18478, 2024. + +[854] Jinyang Wu, Mingkuan Feng, Shuai Zhang, Ruihan Jin, Feihu Che, Zengqi Wen, and Jianhua Tao. Boosting multimodal reasoning with mcts-automated structured thinking. arXiv preprint arXiv:2502.02339, 2025. +[855] Jinyang Wu, Chonghua Liao, Mingkuan Feng, Shuai Zhang, Zhengqi Wen, Pengpeng Shao, Huazhe Xu, and Jianhua Tao. Thought-augmented policy optimization: Bridging external guidance and internal capabilities. arXiv preprint arXiv:2505.15692, 2025. +[856] Junde Wu, Jiayuan Zhu, and Yuyuan Liu. Agentic reasoning: Reasoning llms with tools for the deep research. arXiv preprint arXiv:2502.04644, 2025. +[857] Qiong Wu, Xiangcong Yang, Yiyi Zhou, Chenxin Fang, Baiyang Song, Xiaoshuai Sun, and Rongrong Ji. Grounded chain-of-thought for multimodal large language models. arXiv preprint arXiv:2503.12799, 2025. +[858] Siwei Wu, Zhongyuan Peng, Xinrun Du, Tuney Zheng, Minghao Liu, Jialong Wu, Jiachen Ma, Yizhi Li, Jian Yang, Wangchunshu Zhou, et al. A comparative study on reasoning patterns of openai's o1 model. arXiv preprint arXiv:2410.13639, 2024. +[859] Siye Wu, Jian Xie, Yikai Zhang, Aili Chen, Kai Zhang, Yu Su, and Yanghua Xiao. Arm: Adaptive reasoning model. arXiv preprint arXiv:2505.20258, 2025. +[860] Tianhao Wu, Janice Lan, Weizhe Yuan, Jiantao Jiao, Jason Weston, and Sainbayar Sukhbaatar. Thinking llms: General instruction following with thought generation. arXiv preprint arXiv:2410.10630, 2024. +[861] Wenjie Wu, Yongcheng Jing, Yingjie Wang, Wenbin Hu, and Dacheng Tao. Graph-augmented reasoning: Evolving step-by-step knowledge graph retrieval for llm reasoning. arXiv preprint arXiv:2503.01642, 2025. +[862] Xiaobao Wu. Sailing by the stars: A survey on reward models and learning strategies for learning from rewards. arXiv preprint arXiv:2505.02686, 2025. +[863] Xiong Jun Wu, Zhenduo Zhang, ZuJie Wen, Zhiqiang Zhang, Wang Ren, Lei Shi, Cai Chen, Deng Zhao, Qing Wang, Xudong Han, et al. Sharp: Synthesizing high-quality aligned reasoning problems for large reasoning models reinforcement learning. arXiv preprint arXiv:2505.14147, 2025. +[864] Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models. arXiv preprint arXiv:2408.00724, January 2024. +[865] Yifan Wu, Jingze Shi, Bingheng Wu, Jiayi Zhang, Xiaotian Lin, Nan Tang, and Yuyu Luo. Concise reasoning, big gains: Pruning long reasoning trace with difficulty-aware prompting. arXiv preprint arXiv:2505.19716, 2025. +[866] Yong Wu, Weihang Pan, Ke Li, Chen Binhui, Ping Li, and Binbin Lin. Beyond templates: Dynamic adaptation of reasoning demonstrations via feasibility-aware exploration. arXiv preprint arXiv:2505.20700, 2025. +[867] Yuyang Wu, Yifei Wang, Tianqi Du, Stefanie Jegelka, and Yisen Wang. When more is less: Understanding chain-of-thought length in IIms. arXiv preprint arXiv:2502.07266, 2025. +[868] Zhenyu Wu, Qingkai Zeng, Zhihan Zhang, Zhaoxuan Tan, Chao Shen, and Meng Jiang. Enhancing mathematical reasoning in llms by stepwise correction. arXiv preprint arXiv:2410.12934, 2024. +[869] Zhenyu Wu, Qingkai Zeng, Zhihan Zhang, Zhaoxuan Tan, Chao Shen, and Meng Jiang. Large language models can self-correct with minimal effort. In AI for Math Workshop @ ICML 2024, May 2024. URL https://openreview.net/forum?id=mmZLMs413d. +[870] Zirui Wu, Xiao Liu, Jiayi Li, Lingpeng Kong, and Yansong Feng. Haste makes waste: Evaluating planning abilities of llms for efficient and feasible multitasking with time constraints between actions. arXiv preprint arXiv:2503.02238, 2025. +[871] Zongqian Wu, Tianyu Li, Jiaying Yang, Mengmeng Zhan, Xiaofeng Zhu, and Lei Feng. Is depth all you need? an exploration of iterative reasoning in llms. arXiv preprint arXiv:2502.10858, 2025. +[872] Zhiheng Xi, Wenxiang Chen, Boyang Hong, Senjie Jin, Rui Zheng, Wei He, Yiwen Ding, Shichun Liu, Xin Guo, Junzhe Wang, et al. Training large language models for reasoning through reverse curriculum reinforcement learning. arXiv preprint arXiv:2402.05808, 2024. + +[873] Zhiheng Xi, Dingwen Yang, Jixuan Huang, Jiafu Tang, Guanyu Li, Yiwen Ding, Wei He, Boyang Hong, Shihan Do, Wenyu Zhan, et al. Enhancing llm reasoning via critique models with test-time and training-time supervision. arXiv preprint arXiv:2411.16579, 2024. +[874] Zhiheng Xi, Guanyu Li, Yutao Fan, Honglin Guo, Yufang Liu, Xiaoran Fan, Jiaqi Liu, Jingchao Ding, Wangmeng Zuo, Zhenfei Yin, et al. Bmmr: A large-scale bilingual multimodal multi-discipline reasoning dataset. arXiv preprint arXiv:2507.03483, 2025. +[875] Fanzeng Xia, Yidong Luo, Tinko Sebastian Bartels, Yaqi Xu, and Tongxin Li. Rethinking the unsolvable: When in-context search meets test-time scaling. arXiv preprint arXiv:2505.22290, 2025. +[876] Heming Xia, Yongqi Li, Chak Tou Leong, Wenjie Wang, and Wenjie Li. Tokenskip: Controllable chain-of-thought compression in lms. arXiv preprint arXiv:2502.12067, 2025. +[877] Shijie Xia, Xuefeng Li, Yixin Liu, Tongshuang Wu, and Pengfei Liu. Evaluating mathematical reasoning beyond accuracy. arXiv preprint arXiv:2404.05692, 2024. +[878] Yunhui Xia, Wei Shen, Yan Wang, Jason Klein Liu, Huifeng Sun, Siyue Wu, Jian Hu, and Xiaolong Xu. Leetcodedataset: A temporal dataset for robust evaluation and efficient training of code llms. arXiv preprint arXiv:2504.14655, 2025. +[879] Kun Xiang, Zhili Liu, Zihao Jiang, Yunshuang Nie, Runhui Huang, Haoxiang Fan, Hanhui Li, Weiran Huang, Yihan Zeng, Jianhua Han, et al. Atomthink: A slow thinking framework for multimodal mathematical reasoning. arXiv preprint arXiv:2411.11930, 2024. +[880] Violet Xiang, Chase Blagden, Rafael Rafailov, Nathan Lile, Sang Truong, Chelsea Finn, and Nick Haber. Just enough thinking: Efficient reasoning with adaptive length penalties reinforcement learning. arXiv preprint arXiv:2506.05256, 2025. +[881] Violet Xiang, Charlie Snell, Kanishk Gandhi, Alon Albalak, Anikait Singh, Chase Blagden, Duy Phung, Rafael Rafailov, Nathan Lile, Dakota Mahan, et al. Towards system 2 reasoning in llms: Learning how to think with meta chain-of-though. arXiv preprint arXiv:2501.04682, 2025. +[882] Wenyi Xiao, Zechuan Wang, Leilei Gan, Shuai Zhao, Wanggui He, Luu Anh Tuan, Long Chen, Hao Jiang, Zhou Zhao, and Fei Wu. A comprehensive survey of direct preference optimization: Datasets, theories, variants, and applications. arXiv preprint arXiv:2410.15595, 2024. +[883] Chulin Xie, Yangsibo Huang, Chiyuan Zhang, Da Yu, Xinyun Chen, Bill Yuchen Lin, Bo Li, Badih Ghazi, and Ravi Kumar. On memorization of large language models in logical reasoning. arXiv preprint arXiv:2410.23123, 2024. +[884] Enze Xie, Junsong Chen, Yuyang Zhao, Jincheng Yu, Ligeng Zhu, Chengyue Wu, Yujun Lin, Zhekai Zhang, Muyang Li, Junyu Chen, et al. Sana 1.5: Efficient scaling of training-time and inference-time compute in linear diffusion transformer. arXiv preprint arXiv:2501.18427, 2025. +[885] Senwei Xie, Hongyu Wang, Zhanqi Xiao, Ruiping Wang, and Xilin Chen. Robotic programmer: Video instructed policy code generation for robotic manipulation. arXiv preprint arXiv:2501.04268, 2025. +[886] Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768, February 2025. +[887] Tianbao Xie, Danyang Zhang, Jixuan Chen, Xiaochuan Li, Siheng Zhao, Ruisheng Cao, Toh Jing Hua, Zhoujun Cheng, Dongchan Shin, Fangyu Lei, Yitao Liu, Yiheng Xu, Shuyan Zhou, Silvio Savarese, Caiming Xiong, Victor Zhong, and Tao Yu. OSWorld: Benchmarking multimodal agents for open-ended tasks in real computer environments. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=tN61DTr4Ed. +[888] Yuxi Xie, Kenji Kawaguchi, Yiran Zhao, Xu Zhao, Min-Yen Kan, Junxian He, and Qizhe Xie. Self-evaluation guided beam search for reasoning. In Thirty-seventh Conference on Neural Information Processing Systems, September 2023. URL https://openreview.net/forum?id=Bw82hwg5Q3. + +[889] Yuxi Xie, Anirudh Goyal, Wenyue Zheng, Min-Yen Kan, Timothy P Lillicrap, Kenji Kawaguchi, and Michael Shieh. Monte carlo tree search boosts reasoning via iterative preference learning. arXiv preprint arXiv:2405.00451, 2024. +[890] Zhifei Xie, Mingbao Lin, Zihang Liu, Pengcheng Wu, Shuicheng Yan, and Chunyan Miao. Audio-reasoner: Improving reasoning capability in large audio language models. arXiv preprint arXiv:2503.02318, 2025. +[891] Zhihui Xie, Liyu Chen, Weichao Mao, Jingjing Xu, Lingpeng Kong, et al. Teaching language models to critique via reinforcement learning. arXiv preprint arXiv:2502.03492, 2025. +[892] Siheng Xiong, Ali Payani, Yuan Yang, and Faramarz Fekri. Deliberate reasoning for llms as structure-aware planning with accurate world model. arXiv preprint arXiv:2410.03136, 2024. +[893] Wei Xiong, Chengshuai Shi, Jiaming Shen, Aviv Rosenberg, Zhen Qin, Daniele Calandriello, Misha Khalman, Rishabh Joshi, Bilal Piot, Mohammad Saleh, et al. Building math agents with multi-turn iterative preference learning. arXiv preprint arXiv:2409.02392, 2024. +[894] Wang Xiyao, Yang Zhengyuan, Li Linjie, Lu Hongjin, Xu Yuancheng, Lin Chung-Ching Lin, Lin Kevin, Huang Furong, and Wang Lijuan. Scaling inference-time search with vision value model for improved visual comprehension. arXiv preprint arXiv:2412.03704, 2024. +[895] Austin Xu, Yilun Zhou, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. J4r: Learning to judge with equivalent initial state group relative policy optimization. arXiv preprint arXiv:2505.13346, 2025. +[896] Bin Xu, Yiguan Lin, Yinghao Li, et al. Sra-mcts: Self-driven reasoning augmentation with monte carlo tree search for enhanced code generation. arXiv preprint arXiv:2411.11053, 2024. +[897] Fangzhi Xu, Qiushi Sun, Kanzhi Cheng, Jun Liu, Yu Qiao, and Zhiyong Wu. Interactive evolution: A neural-symbolic self-training framework for large language models. arXiv preprint arXiv:2406.11736, 2024. +[898] Fangzhi Xu, Hang Yan, Chang Ma, Haiteng Zhao, Qiushi Sun, Kanzhi Cheng, Junxian He, Jun Liu, and Zhiyong Wu. Genius: A generalizable and purely unsupervised self-training framework for advanced reasoning. arXiv preprint arXiv:2504.08672, 2025. +[899] Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, et al. Towards large reasoning models: A survey of reinforced reasoning with large language models. arXiv preprint arXiv:2501.09686, 2025. +[900] Guowei Xu, Peng Jin, Li Hao, Yibing Song, Lichao Sun, and Li Yuan. Llava-ol: Let vision language models reason step-by-step. arXiv preprint arXiv:2411.10440, 2024. +[901] Haotian Xu. No train still gain. unleash mathematical reasoning of large language models with monte carlo tree search guided by energy function. arXiv preprint arXiv:2309.03224, 2023. +[902] Haotian Xu, Xing Wu, Weinong Wang, Zhongzhi Li, Da Zheng, Boyuan Chen, Yi Hu, Shijia Kang, Jiaming Ji, Yingying Zhang, et al. Redstar: Does scaling long-cot data unlock better slow-reasoning systems? arXiv preprint arXiv:2501.11284, 2025. +[903] Huimin Xu, Xin Mao, Feng-Lin Li, Xiaobao Wu, Wang Chen, Wei Zhang, and Anh Tuan Luu. Full-step-dpo: Self-supervised preference optimization with step-wise rewards for mathematical reasoning. arXiv preprint arXiv:2502.14356, 2025. +[904] Jin Xu, Zhifang Guo, Jinzheng He, Hangrui Hu, Ting He, Shuai Bai, Keqin Chen, Jialin Wang, Yang Fan, Kai Dang, et al. Qwen2. 5-omni technical report. arXiv preprint arXiv:2503.20215, 2025. +[905] Pusheng Xu, Yue Wu, Kai Jin, Xiaolan Chen, Mingguang He, and Danli Shi. Deepseek-r1 outperforms gemini 2.0 pro, openai o1, and o3-mini in bilingual complex ophthalmology reasoning. arXiv preprint arXiv:2502.17947, 2025. +[906] Rongwu Xu, Xiaojian Li, Shuo Chen, and Wei Xu. "nuclear deployed!": Analyzing catastrophic risks in decision-making of autonomous llm agents. arXiv preprint arXiv:2502.11355, 2025. + +[907] Silei Xu, Wenhao Xie, Lingxiao Zhao, and Pengcheng He. Chain of draft: Thinking faster by writing less. arXiv preprint arXiv:2502.18600, 2025. +[908] Wenda Xu, Guanglei Zhu, Xuandong Zhao, Liangming Pan, Lei Li, and William Wang. Pride and prejudice: LLM amplifies self-bias in self-refinement. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 15474–15492, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.826. URL https://aclanthology.org/2024.acl-long.826/. +[909] Xiaoang Xu, Shuo Wang, Xu Han, Zhenghao Liu, Huijia Wu, Peipei Li, Zhiyuan Liu, Maosong Sun, and Zhaofeng He. A\*thought: Efficient reasoning via bidirectional compression for low-resource settings. arXiv preprint arXiv:2505.24550, 2025. +[910] Xin Xu, Shizhe Diao, Can Yang, and Yang Wang. Can we verify step by step for incorrect answer detection? arXiv preprint arXiv:2402.10528, 2024. +[911] Yao Xu, Mingyu Xu, Fangyu Lei, Wangtao Sun, Xiangrong Zeng, Bingning Wang, Guang Liu, Shizhu He, Jun Zhao, and Kang Liu. Amplify adjacent token differences: Enhancing long chain-of-thought reasoning with shift-ffn. arXiv preprint arXiv:2505.17153, 2025. +[912] Yi Xu, Chengzhu Li, Han Zhou, Xingchen Wan, Caiqi Zhang, Anna Korhonen, and Ivan Vulić. Visual planning: Let's think only with images. In Workshop on Foundation Models Meet Embodied Agents at CVPR 2025, may 2025. URL https://openreview.net/forum?id=ELIt3v3S1J. +[913] Yige Xu, Xu Guo, Zhiwei Zeng, and Chunyan Miao. Softcot: Soft chain-of-thought for efficient reasoning with llms. arXiv preprint arXiv:2502.12134, 2025. +[914] Yige Xu, Xu Guo, Zhiwei Zeng, and Chunyan Miao. Softcot++: Test-time scaling with soft chain-of-thought reasoning. arXiv preprint arXiv:2505.11484, 2025. +[915] Zhangchen Xu, Fengqing Jiang, Luyao Niu, Yuntian Deng, Radha Poovendran, Yejin Choi, and Bill Yuchen Lin. Magpie: Alignment data synthesis from scratch by prompting aligned lms with nothing. arXiv preprint arXiv:2406.08464, 2024. +[916] Zhangchen Xu, Yang Liu, Yueqin Yin, Mingyuan Zhou, and Radha Poovendran. Kodcode: A diverse, challenging, and verifiable synthetic dataset for coding. February 2025. +[917] Jianhao Yan, Yafu Li, Zican Hu, Zhi Wang, Ganqu Cui, Xiaoye Qu, Yu Cheng, and Yue Zhang. Learning to reason under off-policy guidance. arXiv preprint arXiv:2504.14945, 2025. +[918] Kai Yan, Yufei Xu, Zhengyin Du, Xuesong Yao, Zheyu Wang, Xiaowen Guo, and Jiecao Chen. Recitation over reasoning: How cutting-edge language models can fail on elementary school-level reasoning problems? arXiv preprint arXiv:2504.00509, 2025. +[919] Ruin Yan, Zheng Liu, and Defu Lian. O1 embedder: Let retrievers think before action. arXiv preprint arXiv:2502.07555, 2025. +[920] Siming Yan, Min Bai, Weifeng Chen, Xiong Zhou, Qixing Huang, and Li Erran Li. Vigor: Improving visual grounding of large vision language models with fine-grained reward modeling. In European Conference on Computer Vision, pages 37-53. Springer, 2024. +[921] Yibo Yan, Jiamin Su, Jianxiang He, Fangteng Fu, Xu Zheng, Yuanhuiyi Lyu, Kun Wang, Shen Wang, Qingsong Wen, and Xuming Hu. A survey of mathematical reasoning in the era of multimodal large language model: Benchmark, method & challenges. arXiv preprint arXiv:2412.11936, 2024. +[922] Yibo Yan, Shen Wang, Jiahao Huo, Hang Li, Boyan Li, Jiamin Su, Xiong Gao, Yi-Fan Zhang, Tianlong Xu, Zhendong Chu, et al. Errorradar: Benchmarking complex mathematical reasoning of multimodal large language models via error detection. arXiv preprint arXiv:2410.04509, 2024. +[923] Yibo Yan, Shen Wang, Jiahao Huo, Jingheng Ye, Zhendong Chu, Xuming Hu, Philip S Yu, Carla Gomes, Bart Selman, and Qingsong Wen. Position: Multimodal large language models can significantly advance scientific reasoning. arXiv preprint arXiv:2502.02871, 2025. +[924] Yuchen Yan, Jin Jiang, Yang Liu, Yixin Cao, Xin Xu, Xunliang Cai, Jian Shao, et al. S $^{3}$ c-math: Spontaneous step-level self-correction makes large language models better mathematical reasoners. arXiv preprint arXiv:2409.01524, 2024. + +[925] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024. +[926] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024. +[927] An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2.5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024. +[928] Cehao Yang, Xueyuan Lin, Chengjin Xu, Xuhui Jiang, Xiaojun Wu, Honghao Liu, Hui Xiong, and Jian Guo. Select2reason: Efficient instruction-tuning data selection for long-cot reasoning. arXiv preprint arXiv:2505.17266, 2025. +[929] Chen Yang, Chenyang Zhao, Quanquan Gu, and Dongruo Zhou. Cops: Empowering llm agents with provable cross-task experience sharing. arXiv preprint arXiv:2410.16670, 2024. +[930] Cheng Yang, Chufan Shi, Siheng Li, Bo Shui, Yujiu Yang, and Wai Lam. Llm2: Let large language models harness system 2 reasoning. arXiv preprint arXiv:2412.20372, 2024. +[931] Cheng Yang, Chufan Shi, Yaxin Liu, Bo Shui, Junjie Wang, Mohan Jing, Linran Xu, Xinyu Zhu, Siheng Li, Yuxiang Zhang, Gongye Liu, Xiaomei Nie, Deng Cai, and Yujiu Yang. Chartmimic: Evaluating LMM's cross-modal reasoning capability via chart-to-code generation. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=sGpCzsfd1K. +[932] Kailai Yang, Zhiwei Liu, Qianqian Xie, Jimin Huang, Erxue Min, and Sophia Ananiadou. Selective preference optimization via token-level reward function estimation. arXiv preprint arXiv:2408.13518, 2024. +[933] Kaiyu Yang, Gabriel Poesia, Jingxuan He, Wenda Li, Kristin Lauter, Swarat Chaudhuri, and Dawn Song. Formal mathematical reasoning: A new frontier in ai. arXiv preprint arXiv:2412.16075, 2024. +[934] Lei Yang, Renren Jin, Ling Shi, Jianxiang Peng, Yue Chen, and Deyi Xiong. Probench: Benchmarking large language models in competitive programming. arXiv preprint arXiv:2502.20868, 2025. +[935] Ling Yang, Zhaochen Yu, Bin Cui, and Mengdi Wang. Reasonflux: Hierarchical llm reasoning via scaling thought templates. arXiv preprint arXiv:2502.06772, 2025. +[936] Ruihan Yang, Fanghua Ye, Jian Li, Siyu Yuan, Yikai Zhang, Zhaopeng Tu, Xiaolong Li, and Deqing Yang. The lighthouse of language: Enhancing llm agents via critique-guided improvement. arXiv preprint arXiv:2503.16024, 2025. +[937] Sherry Yang, Dale Schuurmans, Pieter Abbeel, and Ofir Nachum. Chain of thought imitation with procedure cloning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho, editors, Advances in Neural Information Processing Systems, November 2022. URL https://openreview.net/forum?id=ZJqqSa8FsH9. +[938] Shiming Yang, Yuxuan Tong, Xinyao Niu, Graham Neubig, and Xiang Yue. Demystifying long chain-of-thought reasoning. In *Forty-second International Conference on Machine Learning*, may 2025. URL https://openreview.net/forum?id=OLodUbcWjb. +[939] Shu Yang, Junchao Wu, Xin Chen, Yunze Xiao, Xinyi Yang, Derek F. Wong, and Di Wang. Understanding aha moments: from external observations to internal mechanisms. arXiv preprint arXiv:2504.02956, 2025. +[940] Shu Yang, Junchao Wu, Xuansheng Wu, Derek Wong, Ninhao Liu, and Di Wang. Is long-to-short a free lunch? investigating inconsistency and reasoning efficiency in Irms. arXiv preprint arXiv:2506.19492, 2025. +[941] Sohee Yang, Elena Gribovskaya, Nora Kassner, Mor Geva, and Sebastian Riedel. Do large language models latently perform multi-hop reasoning? In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 10210–10229, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.550. URL https://aclanthology.org/2024.acl-long.550/. + +[942] Wang Yang, Hongye Jin, Jingfeng Yang, Vipin Chaudhary, and Xiaotian Han. Thinking preference optimization. arXiv preprint arXiv:2502.13173, 2025. +[943] Wenkai Yang, Shuming Ma, Yankai Lin, and Furu Wei. Towards thinking-optimal scaling of test-time compute for lIm reasoning. arXiv preprint arXiv:2502.18080, 2025. +[944] Xiao-Wen Yang, Xuan-Yi Zhu, Wen-Da Wei, Ding-Chu Zhang, Jie-Jing Shao, Zhi Zhou, Lan-Zhe Guo, and Yu-Feng Li. Step back to leap forward: Self-backtracking for boosting reasoning of language models. arXiv preprint arXiv:2502.04404, 2025. +[945] Yang Yang, Xiaolu Zhou, Bosong Ding, and Miao Xin. Uncertainty-aware reward design process. arXiv preprint arXiv:2507.02256, 2025. +[946] Yifei Yang, Zouying Cao, Qiguang Chen, Libo Qin, Dongjie Yang, Hai Zhao, and Zhi Chen. Kvsharer: Efficient inference via layer-wise dissimilar kv cache sharing. arXiv preprint arXiv:2410.18517, 2024. +[947] Yue Yang, MingKang Chen, Qihua Liu, Mengkang Hu, Qiguang Chen, Gengrui Zhang, Shuyue Hu, Guangtao Zhai, Yu Qiao, Yu Wang, et al. Truly assessing fluid intelligence of large language models through dynamic reasoning evaluation. arXiv preprint arXiv:2506.02648, 2025. +[948] Yuqing Yang, Yan Ma, and Pengfei Liu. Weak-to-strong reasoning. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 8350-8367, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.490. URL https://aclanthology.org/2024 findings-emnlp.490/. +[949] Zeyuan Yang, Xueyang Yu, Delin Chen, Maohao Shen, and Chuang Gan. Machine mental imagery: Empower multimodal reasoning with latent visual tokens. arXiv preprint arXiv:2506.17218, 2025. +[950] Zhe Yang, Yichang Zhang, Yudong Wang, Ziyao Xu, Junyang Lin, and Zhifang Sui. Confidence vs critique: A decomposition of self-correction capability for llms. arXiv preprint arXiv:2412.19513, 2024. +[951] Zonghan Yang, Peng Li, Ming Yan, Ji Zhang, Fei Huang, and Yang Liu. React meets actre: Autonomous annotation of agent trajectories for contrastive self-training. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=0VLBwQGWpA. +[952] Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, et al. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. arXiv preprint arXiv:2412.18319, 2024. +[953] Huanjin Yao, Jiaxing Huang, Yawen Qiu, Michael K Chen, Wenzheng Liu, Wei Zhang, Wenjie Zeng, Xikun Zhang, Jingyi Zhang, Yuxin Song, et al. Mmreason: An open-ended multi-modal multi-step reasoning benchmark for mllms toward agi. arXiv preprint arXiv:2506.23563, 2025. +[954] Shunyu Yao, Howard Chen, John Yang, and Karthik R Narasimhan. Webshop: Towards scalable real-world web interaction with grounded language agents. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho, editors, Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=R9KnuFlvnU. +[955] Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Tom Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 11809-11822. Curran Associates, Inc., September 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/271db9922b8d1f4dd7aaef84ed5ac703-Paper-Conference.pdf. +[956] Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik R Narasimhan, and Yuan Cao. React: Synergizing reasoning and acting in language models. In The Eleventh International Conference on Learning Representations, February 2023. URL https://openreview.net/forum?id=WE_vluYUL-X. + +[957] Xinhao Yao, Ruifeng Ren, Yun Liao, and Yong Liu. Unveiling the mechanisms of explicit cot training: How chain-of-thought enhances reasoning generalization. arXiv preprint arXiv:2502.04667, 2025. +[958] Yang Yao, Xuan Tong, Ruofan Wang, Yixu Wang, Lujundong Li, Liang Liu, Yan Teng, and Yingchun Wang. A mousetrap: Fooling large reasoning models for jailbreak with chain of iterative chaos. arXiv preprint arXiv:2502.15806, 2025. +[959] Wang Yaoting, Wu Shengqiong, Zhang Yuechen, Yan Shuicheng, Liu Ziwei, Luo Jiebo, and Fei Hao. Multimodal chain-of-thought reasoning: A comprehensive survey. arXiv preprint arXiv:2503.12605, 2025. +[960] Michihiro Yasunaga, Luke Zettlemoyer, and Marjan Ghazvininejad. Multimodal reward-bench: Holistic evaluation of reward models for vision language models. arXiv preprint arXiv:2502.14191, 2025. +[961] Nicolas Yax, Hernán Anló, and Stefano Palminteri. Studying and improving reasoning in humans and machines. Communications Psychology, 2(1):51, 2024. +[962] Guanghao Ye, Khiem Duc Pham, Xinzhi Zhang, Sivakanth Gopi, Baolin Peng, Beibin Li, Janardhan Kulkarni, and Huseyin A Inan. On the emergence of thinking in llms i: Searching for the right intuition. arXiv preprint arXiv:2502.06773, 2025. +[963] Jiaran Ye, Zijun Yao, Zhidian Huang, Liangming Pan, Jinxin Liu, Yushi Bai, Amy Xin, Liu Weichuan, Xiaoyin Che, Lei Hou, et al. How does transformer learn implicit reasoning? arXiv preprint arXiv:2505.23653, 2025. +[964] Rui Ye, Shuo Tang, Rui Ge, Yaxin Du, Zhenfei Yin, Jing Shao, and Siheng Chen. MAS-GPT: Training LLMs to build LLM-based multi-agent systems. In Workshop on Reasoning and Planning for Large Language Models, March 2025. URL https://openreview.net/forum?id=TqHoQIlumy. +[965] Tian Ye, Zicheng Xu, Yuanzhi Li, and Zeyuan Allen-Zhu. Physics of language models: Part 2.2, how to learn from mistakes on grade-school math problems. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=zpDGwcmMV4. +[966] Xinwu Ye, Chengfan Li, Siming Chen, Xiangru Tang, and Wei Wei. Mmscibench: Benchmarking language models on multimodal scientific problems. arXiv preprint arXiv:2503.01891, 2025. +[967] Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning. arXiv preprint arXiv:2502.03387, 2025. +[968] Zihuiwen Ye, Fraser Greenlee-Scott, Max Bartolo, Phil Blunsom, Jon Ander Campos, and Matthias Galle. Improving reward models with synthetic critiques. arXiv preprint arXiv:2405.20850, 2024. +[969] Zihuiwen Ye, Luckeciano Carvalho Melo, Younesse Kaddar, Phil Blunsom, Sam Staton, and Yarin Gal. Uncertainty-aware step-wise verification with generative reward models. arXiv preprint arXiv:2502.11250, 2025. +[970] Hao Yi, Qingyang Li, Yulan Hu, Fuzheng Zhang, Di Zhang, and Yong Liu. Sppd: Self-training with process preference learning using dynamic value margin. arXiv preprint arXiv:2502.13516, 2025. +[971] Jingyang Yi, Jiazheng Wang, and Sida Li. Shorterbetter: Guiding reasoning models to find optimal inference length for efficient reasoning. arXiv preprint arXiv:2504.21370, 2025. +[972] Qiyue Yin, Pei Xu, Qiaozhe Li, Shengda Liu, Shengqi Shen, Tong Wang, Yihong Han, Xiaonan Zhao, Likun Yang, Shiyue Cao, et al. Wgsr-bench: Wargame-based game-theoretic strategic reasoning benchmark for large language models. arXiv preprint arXiv:2506.10264, 2025. +[973] Zhangyue Yin, Qiushi Sun, Qipeng Guo, Zhiyuan Zeng, Xiaonan Li, Junqi Dai, Qinyuan Cheng, Xuanjing Huang, and Xipeng Qiu. Reasoning in flux: Enhancing large language models reasoning through uncertainty-aware adaptive guidance. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association + +for Computational Linguistics (Volume 1: Long Papers), pages 2401-2416, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.131. URL https://aclanthology.org/2024.acl-long.131/. +[974] Huaiyuan Ying, Shuo Zhang, Linyang Li, Zhejian Zhou, Yunfan Shao, Zhaoye Fei, Yichuan Ma, Jiawei Hong, Kuikun Liu, Ziyi Wang, et al. Internl m - Math: Open math large language models toward verifiable reasoning. arXiv preprint arXiv:2402.06332, 2024. +[975] Eunseop Yoon, Hee Suk Yoon, SooHwan Eom, Gunsoo Han, Daniel Wontae Nam, Daejin Jo, Kyoung-Woon On, Mark A Hasegawa-Johnson, Sungwoong Kim, and Chang D Yoo. Tlcr: Token-level continuous reward for fine-grained reinforcement learning from human feedback. arXiv preprint arXiv:2407.16574, 2024. +[976] Jaesik Yoon, Hyeonseo Cho, Doojin Baek, Yoshua Bengio, and Sungjin Ahn. Monte carlo tree diffusion for system 2 planning. arXiv preprint arXiv:2502.07202, 2025. +[977] Bin Yu, Hang Yuan, Haotian Li, Xueyin Xu, Yuliang Wei, Bailing Wang, Weizhen Qi, and Kai Chen. Long-short chain-of-thought mixture supervised fine-tuning eliciting efficient reasoning in large language models. arXiv preprint arXiv:2505.03469, 2025. +[978] Dian Yu, Baolin Peng, Ye Tian, Linfeng Song, Haitao Mi, and Dong Yu. Siam: Self-improving code-assisted mathematical reasoning of large language models. arXiv preprint arXiv:2408.15565, 2024. +[979] Fei Yu, Anningzhe Gao, and Benyou Wang. OVM, outcome-supervised value models for planning in mathematical reasoning. In Kevin Duh, Helena Gomez, and Steven Bethard, editors, Findings of the Association for Computational Linguistics: NAACL 2024, pages 858-875, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-naacl.55. URL https://aclanthology.org/2024.findings-naacl.55/. +[980] Fei Yu, Hongbo Zhang, Prayag Tiwari, and Benyou Wang. Natural language reasoning, a survey. ACM Comput. Surv., 56(12), October 2024. ISSN 0360-0300. doi: 10.1145/3664194. URL https://doi.org/10.1145/3664194. +[981] Fei Yu, Yingru Li, and Benyou Wang. Uncertainty-aware search and value models: Mitigating search scaling flaws in llms. arXiv preprint arXiv:2502.11155, 2025. +[982] Hongli Yu, Tinghong Chen, Jiangtao Feng, Jiangjie Chen, Weinan Dai, Qiying Yu, YaQin Zhang, Wei-Ying Ma, Jingjing Liu, Mingxuan Wang, et al. Memagent: Reshaping long-context llm with multi-conv rl-based memory agent. arXiv preprint arXiv:2507.02259, 2025. +[983] Longhui Yu, Weisen Jiang, Han Shi, Jincheng YU, Zhengying Liu, Yu Zhang, James Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. Metamath: Bootstrap your own mathematical questions for large language models. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=N8N0hgNDRt. +[984] Ping Yu, Jing Xu, Jason Weston, and Ilia Kulikov. Distilling system 2 into system 1. arXiv preprint arXiv:2407.06023, 2024. +[985] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025. +[986] Tianyu Yu, Bo Ji, Shouli Wang, Shu Yao, Zefan Wang, Ganqu Cui, Lifan Yuan, Ning Ding, Yuan Yao, Zhiyuan Liu, et al. Rlpr: Extrapolating rlvr to general domains without verifiers. arXiv preprint arXiv:2506.18254, 2025. +[987] Tong Yu, Yongcheng Jing, Xikun Zhang, Wentao Jiang, Wenjie Wu, Yingjie Wang, Wenbin Hu, Bo Du, and Dacheng Tao. Benchmarking reasoning robustness in large language models. arXiv preprint arXiv:2503.04550, 2025. +[988] Xiao Yu, Baolin Peng, Vineeth Vajipey, Hao Cheng, Michel Galley, Jianfeng Gao, and Zhou Yu. ExACT: Teaching AI agents to explore with reflective-MCTS and exploratory learning. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=GBIUbwW9D8. + +[989] Yahan Yu, Yuyang Dong, and Masafumi Oyamada. Learning deliberately, acting intuitively: Unlocking test-time reasoning in multimodal llms. arXiv preprint arXiv:2507.06999, 2025. +[990] Yiyao Yu, Yuxiang Zhang, Dongdong Zhang, Xiao Liang, Hengyuan Zhang, Xingxing Zhang, Ziyi Yang, Mahmoud Khademi, Hany Awadalla, Junjie Wang, et al. Chain-of-reasoning: Towards unified mathematical reasoning in large language models via a multi-paradigm perspective. arXiv preprint arXiv:2501.11110, 2025. +[991] Yue Yu, Zhengxing Chen, Aston Zhang, Liang Tan, Chenguang Zhu, Richard Yuanzhe Pang, Yundi Qian, Xuewei Wang, Suchin Gururangan, Chao Zhang, et al. Self-generated critiques boost reward modeling for language models. arXiv preprint arXiv:2411.16646, 2024. +[992] Zeping Yu, Yonatan Belinkov, and Sophia Ananiadou. Back attention: Understanding and enhancing multi-hop reasoning in large language models. arXiv preprint arXiv:2502.10835, 2025. +[993] Zhaojian Yu, Yilun Zhao, Arman Cohan, and Xiao-Ping Zhang. Humaneval pro and mbpp pro: Evaluating large language models on self-invoking code generation. arXiv preprint arXiv:2412.21199, 2024. +[994] Zhaojian Yu, Yinghao Wu, Yilun Zhao, Arman Cohan, and Xiao-Ping Zhang. Z1: Efficient test-time scaling with code. arXiv preprint arXiv:2504.00810, 2025. +[995] Zhouliang Yu, Yuhuan Yuan, Tim Z Xiao, Fuxiang Frank Xia, Jie Fu, Ge Zhang, Ge Lin, and Weiyang Liu. Generating symbolic world models via test-time scaling of large language models. arXiv preprint arXiv:2502.04728, 2025. +[996] Zhuohao Yu, Weizheng Gu, Yidong Wang, Zhengran Zeng, Jindong Wang, Wei Ye, and Shikun Zhang. Outcome-refining process supervision for code generation. arXiv preprint arXiv:2412.15118, 2024. +[997] Zishun Yu, Tengyu Xu, Di Jin, Karthik Abinav Sankararaman, Yun He, Wenxuan Zhou, Zhouhao Zeng, Eryk Helenowski, Chen Zhu, Sinong Wang, et al. Think smarter not harder: Adaptive reasoning with inference aware optimization. arXiv preprint arXiv:2501.17974, 2025. +[998] Hang Yuan, Bin Yu, Haotian Li, Shijun Yang, Christina Dan Wang, Zhou Yu, Xueyin Xu, Weizhen Qi, and Kai Chen. Not all tokens are what you need in thinking. arXiv preprint arXiv:2505.17827, 2025. +[999] Jiahao Yuan, Dehui Du, Hao Zhang, Zixiang Di, and Usman Naseem. Reversal of thought: Enhancing large language models with preference-guided reverse reasoning warm-up. arXiv preprint arXiv:2410.12323, 2024. +[1000] Lifan Yuan, Wendi Li, Huayu Chen, Ganqu Cui, Ning Ding, Kaiyan Zhang, Bowen Zhou, Zhiyuan Liu, and Hao Peng. Free process rewards without process labels. arXiv preprint arXiv:2412.01981, 2024. +[1001] Lifan Yuan, Ganqu Cui, Hanbin Wang, Ning Ding, Xingyao Wang, Boji Shan, Zeyuan Liu, Jia Deng, Huimin Chen, Ruobing Xie, Yankai Lin, Zhenghao Liu, Bowen Zhou, Hao Peng, Zhiyuan Liu, and Maosong Sun. Advancing LLM reasoning generalists with preference trees. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=2ea5TNVR0c. +[1002] Michelle Yuan, Elman Mansimov, Katerina Margatina, Anurag Pratik, Daniele Bonadiman, Monica Sunkara, Yi Zhang, Yassine Benajiba, et al. A study on leveraging search and self-feedback for agent reasoning. arXiv preprint arXiv:2502.12094, 2025. +[1003] Siyu Yuan, Zehui Chen, Zhiheng Xi, Junjie Ye, Zhengyin Du, and Jiecao Chen. Agentr: Training language model agents to reflect via iterative self-training. arXiv preprint arXiv:2501.11425, 2025. +[1004] Weizhe Yuan, Jane Yu, Song Jiang, Karthik Padthe, Yang Li, Dong Wang, Ilia Kulikov, Kyunghyun Cho, Yuandong Tian, Jason E Weston, and Xian Li. Naturalreasoning: Reasoning in the wild with 2.8m challenging questions, 2025. +[1005] Yige Yuan, Teng Xiao, Shuchang Tao, Xue Wang, Jinyang Gao, Bolin Ding, and Bingbing Xu. Incentivizing reasoning from weak supervision. arXiv preprint arXiv:2505.20072, 2025. + +[1006] Xiang Yue, Xingwei Qu, Ge Zhang, Yao Fu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mammoth: Building math generalist models through hybrid instruction tuning. arXiv preprint arXiv:2309.05653, 2023. +[1007] Xiang Yue, Tianyu Zheng, Ge Zhang, and Wenhu Chen. Mammoth2: Scaling instructions from the web. Advances in Neural Information Processing Systems, 37:90629-90660, 2025. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/a4ca07aa108036f80cbb5b82285fd4b1-Paper-Conference.pdf. +[1008] Zhenrui Yue, Bowen Jin, Huimin Zeng, Honglei Zhuang, Zhen Qin, Jinsung Yoon, Lanyu Shang, Jiawei Han, and Dong Wang. Hybrid latent reasoning via reinforcement learning. arXiv preprint arXiv:2505.18454, 2025. +[1009] Mert Yuksekgonul, Federico Bianchi, Joseph Boen, Sheng Liu, Pan Lu, Zhi Huang, Carlos Guestrin, and James Zou. Optimizing generative ai by backpropagating language model feedback. Nature, 639(8055):609-616, March 2025. URL https://www.nature.com/articles/s41586-025-08661-4. +[1010] YuYue, Yufeng Yuan, Qiying Yu, Xiaochen Zuo, Ruofei Zhu, Wenyuan Xu, Jiaze Chen, Chengyi Wang, TianTian Fan, Zhengyin Du, Xiangpeng Wei, Gaohong Liu, Juncai Liu, Lingjun Liu, Haibin Lin, Zhiqi Lin, Bole Ma, Chi Zhang, Mofan Zhang, Wang Zhang, Hang Zhu, Ru Zhang, Xin Liu, Mingxuan Wang, Yonghui Wu, and Lin Yan. Vapo: Efficient and reliable reinforcement learning for advanced reasoning tasks. arXiv preprint arXiv:2504.05118, 2025. +[1011] Yuhang Zang, Xiaoyi Dong, Pan Zhang, Yuhang Cao, Ziyu Liu, Shengyuan Ding, Shenxi Wu, Yubo Ma, Haodong Duan, Wenwei Zhang, et al. Internlm-xcomposer2.5-reward: A simple yet effective multi-modal reward model. arXiv preprint arXiv:2501.12368, 2025. +[1012] Eric Zelikman, Yuhuai Wu, Jesse Mu, and Noah Goodman. Star: Bootstrapping reasoning with reasoning. Advances in Neural Information Processing Systems, 35:15476-15488, November 2022. URL https://openreview.net/pdf?id=3ELRdg2sqI. +[1013] Eric Zelikman, Georges Harik, Yijia Shao, Varuna Jayasiri, Nick Haber, and Noah D Goodman. Quiet-star: Language models can teach themselves to think before speaking. arXiv preprint arXiv:2403.09629, 2024. +[1014] Huaye Zeng, Dongfu Jiang, Haozhe Wang, Ping Nie, Xiaotong Chen, and Wenhu Chen. Acecoder: Acing coder rl via automated test-case synthesis. arXiv preprint arXiv:2502.01718, 2025. +[1015] Thomas Zeng, Shuibai Zhang, Shutong Wu, Christian Classen, Daewon Chae, Ethan Ewer, Minjae Lee, Heeju Kim, Wonjun Kang, Jackson Kunde, et al. Versaprm: Multi-domain process reward model via synthetic reasoning data. arXiv preprint arXiv:2502.06737, 2025. +[1016] Weihao Zeng, Yuzhen Huang, Lulu Zhao, Yijun Wang, Zifei Shan, and Junxian He. B-star: Monitoring and balancing exploration and exploitation in self-taught reasoners. arXiv preprint arXiv:2412.17256, 2024. +[1017] Weihao Zeng, Yuzhen Huang, Qian Liu, Wei Liu, Keqing He, Zejun Ma, and Junxian He. Simplerl-zoo: Investigating and taming zero reinforcement learning for open base models in the wild, 2025. +[1018] Yongcheng Zeng, Xinyu Cui, Xuanfa Jin, Guoqing Liu, Zexu Sun, Quan He, Dong Li, Ning Yang, Jianye Hao, Haifeng Zhang, et al. Aries: Stimulating self-refinement of large language models by iterative preference optimization. arXiv preprint arXiv:2502.05605, 2025. +[1019] Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Bo Wang, Shimin Li, Yunhua Zhou, Qipeng Guo, Xuanjing Huang, and Xipeng Qiu. Scaling of search and learning: A roadmap to reproduce o1 from reinforcement learning perspective. arXiv preprint arXiv:2412.14135, 2024. +[1020] Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Yunhua Zhou, and Xipeng Qiu. Revisiting the test-time scaling of o1-like models: Do they truly possess test-time scaling capabilities? arXiv preprint arXiv:2502.12215, 2025. +[1021] Zhongshen Zeng, Yinhong Liu, Yingjia Wan, Jingyao Li, Pengguang Chen, Jianbo Dai, Yuxuan Yao, Rongwu Xu, Zehan Qi, Wanru Zhao, Linling Shen, Jianqiao Lu, Haochen Tan, Yukang Chen, Hao Zhang, Zhan Shi, Bailin Wang, Zhijiang Guo, and Jiaya Jia. MR-ben: + +A meta-reasoning benchmark for evaluating system-2 thinking in LLMs. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, June 2024. URL https://openreview.net/forum?id=GN2qbxZ1ni. +[1022] Zihao Zeng, Xuyao Huang, Boxiu Li, and Zhijie Deng. Sift: Grounding llm reasoning in contexts via stickers. arXiv preprint arXiv:2502.14922, 2025. +[1023] Yuexiang Zhai, Hao Bai, Zipeng Lin, Jiayi Pan, Shengbang Tong, Yifei Zhou, Alane Suhr, Saining Xie, Yann LeCun, Yi Ma, and Sergey Levine. Fine-tuning large vision-language models as decision-making agents via reinforcement learning. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=nBjmMF2IZU. +[1024] Zaifu Zhan, Shuang Zhou, Huixue Zhou, Jiawen Deng, Yu Hou, Jeremy Yeung, and Rui Zhang. An evaluation of deepseek models in biomedical natural language processing. arXiv preprint arXiv:2503.00624, 2025. +[1025] Alexander Zhang, Marcus Dong, Jiaheng Liu, Wei Zhang, Yejie Wang, Jian Yang, Ge Zhang, Tianyu Liu, Zhongyuan Peng, Yingshui Tan, et al. Codecriticbench: A holistic code critique benchmark for large language models. arXiv preprint arXiv:2502.16614, 2025. +[1026] Beichen Zhang, Yuhong Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Haodong Duan, Yuhang Cao, Dahua Lin, and Jiaqi Wang. Booststep: Boosting mathematical capability of large language models via improved single-step reasoning. arXiv preprint arXiv:2501.03226, 2025. +[1027] Bohan Zhang, Xiaokang Zhang, Jing Zhang, Jifan Yu, Sijia Luo, and Jie Tang. Cot-based synthesizer: Enhancing llm performance through answer synthesis. arXiv preprint arXiv:2501.01668, 2025. +[1028] Che Zhang, Zhenyang Xiao, Chengcheng Han, Yixin Lian, and Yuejian Fang. Learning to check: Unleashing potentials for self-correction in large language models. arXiv preprint arXiv:2402.13035, 2024. +[1029] Chi Zhang, Jiajun Song, Siyu Li, Yitao Liang, Yuxi Ma, Wei Wang, Yixin Zhu, and Song-Chun Zhu. Proposing and solving olympiad geometry with guided tree search. arXiv preprint arXiv:2412.10673, 2024. +[1030] Chunhui Zhang, Zhongyu Ouyang, Kwonjoon Lee, Nakul Agarwal, Sean Dae Houlihan, Soroush Vosoughi, and Shao-Yuan Lo. Overcoming multi-step complexity in multimodal theory-of-mind reasoning: A scalable bayesian planner. In *Forty-second International Conference on Machine Learning*, 2025. URL https://openreview.net/forum?id=2dz6psiiA0. +[1031] Dalong Zhang, Jun Xu, Jun Zhou, Lei Liang, Lin Yuan, Ling Zhong, Mengshu Sun, Peilong Zhao, QiWei Wang, Xiaorui Wang, et al. Kag-thinker: Teaching large language models to think with human-like reasoning process. arXiv preprint arXiv:2506.17728, 2025. +[1032] Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. ReST-MCTS*: LLM self-training via process reward guided tree search. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=8rcFOqEud5. +[1033] Di Zhang, Xiaoshui Huang, Dongzhan Zhou, Yuqiang Li, and Wanli Ouyang. Accessing gpt-4 level mathematical olympiad solutions via monte carlo tree self-refine with llama-3 8b. arXiv preprint arXiv:2406.07394, 2024. +[1034] Di Zhang, Jianbo Wu, Jingdi Lei, Tong Che, Jiatong Li, Tong Xie, Xiaoshui Huang, Shufei Zhang, Marco Pavone, Yuqiang Li, et al. Llama-berry: Pairwise optimization for o1-like olympiad-level mathematical reasoning. arXiv preprint arXiv:2410.02884, 2024. +[1035] Fengji Zhang, Linquan Wu, Huiyu Bai, Guancheng Lin, Xiao Li, Xiao Yu, Yue Wang, Bei Chen, and Jacky Keung. Humaneval-v: Evaluating visual understanding and reasoning abilities of large multimodal models through coding tasks. arXiv preprint arXiv:2410.12381, 2024. +[1036] Hanning Zhang, Pengcheng Wang, Shizhe Diao, Yong Lin, Rui Pan, Hanze Dong, Dylan Zhang, Pavlo Molchanov, and Tong Zhang. Entropy-regularized process reward model. arXiv preprint arXiv:2412.11006, 2024. + +[1037] Haoyue Zhang, Hualei Zhang, Xiaosong Ma, Jie Zhang, and Song Guo. Lazyeviction: Lagged kv eviction with attention pattern observation for efficient long reasoning. arXiv preprint arXiv:2506.15969, 2025. +[1038] Hongbo Zhang, Han Cui, Guangsheng Bao, Linyi Yang, Jun Wang, and Yue Zhang. Direct value optimization: Improving chain-of-thought reasoning in llms with refined values. arXiv preprint arXiv:2502.13723, 2025. +[1039] Jiayi Zhang, Jinyu Xiang, Zhaoyang Yu, Fengwei Teng, Xionghui Chen, Jiaqi Chen, Mingchen Zhuge, Xin Cheng, Sirui Hong, Jinlin Wang, et al. Aflow: Automating agentic workflow generation. arXiv preprint arXiv:2410.10762, 2024. +[1040] Jinghan Zhang, Xiting Wang, Fengran Mo, Yeyang Zhou, Wanfu Gao, and Kunpeng Liu. Entropy-based exploration conduction for multi-step reasoning. arXiv preprint arXiv:2503.15848, 2025. +[1041] Jintian Zhang, Yuqi Zhu, Mengshu Sun, Yujie Luo, Shuofei Qiao, Lun Du, Da Zheng, Huajun Chen, and Ningyu Zhang. Lighthinker: Thinking step-by-step compression. arXiv preprint arXiv:2502.15589, 2025. +[1042] Kaiyi Zhang, Ang Lv, Jinpeng Li, Yongbo Wang, Feng Wang, Haoyuan Hu, and Rui Yan. Stephint: Multi-level stepwise hints enhance reinforcement learning to reason. arXiv preprint arXiv:2507.02841, 2025. +[1043] Kechi Zhang, Ge Li, Jia Li, Yihong Dong, and Zhi Jin. Focused-dpo: Enhancing code generation through focused preference optimization on error-prone points. arXiv preprint arXiv:2502.11475, 2025. +[1044] Kechi Zhang, Ge Li, Jia Li, Huangzhao Zhang, Jingjing Xu, Hao Zhu, Lecheng Wang, Yihong Dong, Jing Mai, Bin Gu, et al. Computational thinking reasoning in large language models. arXiv preprint arXiv:2506.02658, 2025. +[1045] Kexun Zhang, Shang Zhou, Danqing Wang, William Yang Wang, and Lei Li. Scaling llm inference with optimized sample compute allocation. arXiv preprint arXiv:2410.22480, 2024. +[1046] Kongcheng Zhang, Qi Yao, Baisheng Lai, Jiaxing Huang, Wenkai Fang, Dacheng Tao, Mingli Song, and Shunyu Liu. Reasoning with reinforced functional token tuning. arXiv preprint arXiv:2502.13389, 2025. +[1047] Kongcheng Zhang, Qi Yao, Shunyu Liu, Yingjie Wang, Baisheng Lai, Jieping Ye, Mingli Song, and Dacheng Tao. Consistent paths lead to truth: Self-rewarding reinforcement learning for lIm reasoning. arXiv preprint arXiv:2506.08745, 2025. +[1048] Lunjun Zhang, Arian Hosseini, Hritik Bansal, Mehran Kazemi, Aviral Kumar, and Rishabh Agarwal. Generative verifiers: Reward modeling as next-token prediction. arXiv preprint arXiv:2408.15240, 2024. +[1049] Ming Zhang, Yu jiong Shen, Zelin Li, Huayu Sha, Binze Hu, Yuhui Wang, Chenhao Huang, Shichun Liu, Jingqi Tong, Changhao Jiang, et al. Llmeval-med: A real-world clinical benchmark for medical llms with physician validation. arXiv preprint arXiv:2506.04078, 2025. +[1050] Ming-Liang Zhang, Fei yin, and Cheng-Lin Liu. A multi-modal neural geometric solver with textual clauses parsed from diagram. In Edith Elkind, editor, Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence, IJCAI-23, pages 3374-3382. International Joint Conferences on Artificial Intelligence Organization, 8 2023. doi: 10.24963/ijcai.2023/376. URL https://doi.org/10.24963/ijcai.2023/376. Main Track. +[1051] Qingjie Zhang, Han Qiu, Di Wang, Haoting Qian, Yiming Li, Tianwei Zhang, and Minlie Huang. Understanding the dark side of llms' intrinsic self-correction. arXiv preprint arXiv:2412.14959, 2024. +[1052] Qiyuan Zhang, Fuyuan Lyu, Zexu Sun, Lei Wang, Weixu Zhang, Zhihan Guo, Yufei Wang, Irwin King, Xue Liu, and Chen Ma. What, how, where, and how well? a survey on test-time scaling in large language models. arXiv preprint arXiv:2503.24235, 2025. +[1053] Qiyuan Zhang, Fuyuan Lyu, Zexu Sun, Lei Wang, Weixu Zhang, Wenyue Hua, Haolun Wu, Zhihan Guo, Yufei Wang, Niklas Muennighoff, et al. A survey on test-time scaling in large language models: What, how, where, and how well? arXiv preprint arXiv:2503.24235, 2025. + +[1054] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Yu Qiao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? In European Conference on Computer Vision, pages 169-186. Springer, October 2024. URL https://link.springer.com/chapter/10.1007/978-3-031-73242-3_10. +[1055] Shaowei Zhang and Deyi Xiong. BackMATH: Towards backward reasoning for solving math problems step by step. In Owen Rambow, Leo Wanner, Marianna Apidianaki, Hend Al-Khalifa, Barbara Di Eugenio, Steven Schockaert, Kareem Darwish, and Apoorv Agarwal, editors, Proceedings of the 31st International Conference on Computational Linguistics: Industry Track, pages 466-482, Abu Dhabi, UAE, January 2025. Association for Computational Linguistics. URL https://aclanthology.org/2025.coling-industry.40/. +[1056] Shenao Zhang, Yaqing Wang, Yinxiao Liu, Tianqi Liu, Peter Grabowski, Eugene Ie, Zhaoran Wang, and Yunxuan Li. Beyond markovian: Reflective exploration via bayes-adaptive rl for llm reasoning. arXiv preprint arXiv:2505.20561, 2025. +[1057] Shengjia Zhang, Junjie Wu, Jiawei Chen, Changwang Zhang, Xingyu Lou, Wangchunshu Zhou, Sheng Zhou, Can Wang, and Jun Wang. Othink-r1: Intrinsic fast/slow thinking mode switching for over-reasoning mitigation. arXiv preprint arXiv:2506.02397, 2025. +[1058] Shengyu Zhang, Linfeng Dong, Xiaoya Li, Sen Zhang, Xiaofei Sun, Shuhe Wang, Jiwei Li, Runyi Hu, Tianwei Zhang, Fei Wu, et al. Instruction tuning for large language models: A survey. arXiv preprint arXiv:2308.10792, 2023. +[1059] Shimao Zhang, Xiao Liu, Xin Zhang, Junxiao Liu, Zheheng Luo, Shujian Huang, and Yeyun Gong. Process-based self-rewarding language models. arXiv preprint arXiv:2503.03746, 2025. +[1060] Weizhi Zhang, Yangning Li, Yuanchen Bei, Junyu Luo, Guancheng Wan, Liangwei Yang, Chenxuan Xie, Yuyao Yang, Wei-Chieh Huang, Chunyu Miao, et al. From web search towards agentic deep research: Incentivizing search with reasoning agents. arXiv preprint arXiv:2506.18959, 2025. +[1061] Wenjing Zhang, Xuejiao Lei, Zhaoxiang Liu, Ning Wang, Zhenhong Long, Peijun Yang, Jiaojiao Zhao, Minjie Hua, Chaoyang Ma, Kai Wang, et al. Safety evaluation of deepseek models in Chinese contexts. arXiv preprint arXiv:2502.11137, 2025. +[1062] Wenqi Zhang, Yongliang Shen, Linjuan Wu, Qiuying Peng, Jun Wang, Yueting Zhuang, and Weiming Lu. Self-contrast: Better reflection through inconsistent solving perspectives. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 3602–3622, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.197. URL https://aclanthology.org/2024.acl-long.197/. +[1063] Xiaoyun Zhang, Jingqing Ruan, Xing Ma, Yawen Zhu, Haodong Zhao, Hao Li, Jiansong Chen, Ke Zeng, and Xunliang Cai. When to continue thinking: Adaptive thinking mode switching for efficient reasoning. arXiv preprint arXiv:2505.15400, 2025. +[1064] Xinyu Zhang, Yuxuan Dong, Yanrui Wu, Jiaxing Huang, Chengyou Jia, Basura Fernando, Mike Zheng Shou, Lingling Zhang, and Jun Liu. Physreason: A comprehensive benchmark towards physics-based reasoning. arXiv preprint arXiv:2502.12054, 2025. +[1065] Xuan Zhang, Chao Du, Tianyu Pang, Qian Liu, Wei Gao, and Min Lin. Chain of preference optimization: Improving chain-of-thought reasoning in llms. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 333-356. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/00d80722b756de0166523a87805dd00f-Paper-Conference.pdf. +[1066] Xuanliang Zhang, Dingzirui Wang, Keyan Xu, Qingfu Zhu, and Wanxiang Che. Rot: Enhancing table reasoning with iterative row-wise traversals. arXiv preprint arXiv:2505.15110, 2025. +[1067] Yifan Zhang, Yang Yuan, and Andrew Chi-Chih Yao. On the diagram of thought. arXiv preprint arXiv:2409.10038, 2024. + +[1068] Yifan Zhang, Wenyu Du, Dongming Jin, Jie Fu, and Zhi Jin. Finite state automata inside transformers with chain-of-thought: A mechanistic study on state tracking. arXiv preprint arXiv:2502.20129, 2025. +[1069] Yong Zhang, Bingyuan Zhang, Zhitao Li, Ming Li, Ning Cheng, Minchuan Chen, Tao Wei, Jun Ma, Shaojun Wang, and Jing Xiao. Self-enhanced reasoning training: Activating latent reasoning in small models for enhanced reasoning distillation. arXiv preprint arXiv:2502.12744, 2025. +[1070] Yongheng Zhang, Qiguang Chen, Min Li, Wanxiang Che, and Libo Qin. AutoCAP: Towards automatic cross-lingual alignment planning for zero-shot chain-of-thought. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 9191–9200, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.546. URL https://aclanthology.org/2024-findings-acl.546/. +[1071] Yongheng Zhang, Qiguang Chen, Jingxuan Zhou, Peng Wang, Jiasheng Si, Jin Wang, Wenpeng Lu, and Libo Qin. Wrong-of-thought: An integrated reasoning framework with multi-perspective verification and wrong information. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 6644-6653, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.388. URL https://aclanthology.org/2024-findings-emnlp.388/. +[1072] Yongheng Zhang, Xu Liu, Ruihan Tao, Qiguang Chen, Hao Fei, Wanxiang Che, and Libo Qin. Vitcot: Video-text interleaved chain-of-thought for boosting video understanding in large language models. arXiv preprint arXiv:2507.09876, 2025. +[1073] Yongheng Zhang, Xu Liu, Ruoxi Zhou, Qiguang Chen, Hao Fei, Wenpeng Lu, and Libo Qin. Cchall: A novel benchmark for joint cross-lingual and cross-modal hallucinations detection in large language models. arXiv preprint arXiv:2505.19108, 2025. +[1074] Yudi Zhang, Lu Wang, Meng Fang, Yali Du, Chenghua Huang, Jun Wang, Qingwei Lin, Mykola Pechenizkiy, Dongmei Zhang, Saravan Rajmohan, et al. Distill not only data but also rewards: Can smaller language models surpass larger ones? arXiv preprint arXiv:2502.19557, 2025. +[1075] Yunxiang Zhang, Muhammad Khalifa, Lajanugen Logeswaran, Jaekyeom Kim, Moontae Lee, Honglak Lee, and Lu Wang. Small language models need strong verifiers to self-correct reasoning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 15637–15653, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.924. URL https://aclanthology.org/2024 findings-acl.924/. +[1076] Yuxiang Zhang, Shangxi Wu, Yuqi Yang, Jiangming Shu, Jinlin Xiao, Chao Kong, and Jitao Sang. o1-coder: an o1 replication for coding. arXiv preprint arXiv:2412.00154, 2024. +[1077] Yuxiang Zhang, Yuqi Yang, Jiangming Shu, Yuhang Wang, Jinlin Xiao, and Jitao Sang. Openrft: Adapting reasoning foundation model for domain-specific tasks with reinforcement fine-tuning. arXiv preprint arXiv:2412.16849, 2024. +[1078] Zhenru Zhang, Chujie Zheng, Yangzhen Wu, Beichen Zhang, Runji Lin, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. The lessons of developing process reward models in mathematical reasoning. arXiv preprint arXiv:2501.07301, 2025. +[1079] Zhihao Zhang, Qiaole Dong, Qi Zhang, Jun Zhao, Enyu Zhou, Zhiheng Xi, Senjie Jin, Xiaoran Fan, Yuhao Zhou, Yanwei Fu, et al. Reinforcement fine-tuning enables mllms learning novel tasks stably. arXiv preprint arXiv:2506.23508, 2025. +[1080] Zhongwang Zhang, Pengxiao Lin, Zhiwei Wang, Yaoyu Zhang, and Zhi-Qin John Xu. Complexity control facilitates reasoning-based compositional generalization in transformers. arXiv preprint arXiv:2501.08537, 2025. +[1081] Zhuosheng Zhang, Aston Zhang, Mu Li, hai zhao, George Karypis, and Alex Smola. Multi-modal chain-of-thought reasoning in language models. Transactions on Machine Learning Research, June 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=y1pPWFVfvR. + +[1082] Deji Zhao, Donghong Han, Jia Wu, Zhongjiang He, Bo Ning, Ye Yuan, Yongxiang Li, Chao Wang, and Shuangyong Song. Enhancing math reasoning ability of large language models via computation logic graphs. Knowledge-Based Systems, page 113905, 2025. +[1083] Eric Zhao, Pranjal Awasthi, and Sreenivas Gollapudi. Sample, scrutinize and scale: Effective inference-time search by scaling verification. arXiv preprint arXiv:2502.01839, 2025. +[1084] Han Zhao, Haotian Wang, Yiping Peng, Sitong Zhao, Xiaoyu Tian, Shuaiying Chen, Yunjie Ji, and Xiangang Li. 1.4 million open-source distilled reasoning dataset to empower large language model training. arXiv preprint arXiv:2503.19633, 2025. +[1085] Jun Zhao, Jingqi Tong, Yurong Mou, Ming Zhang, Qi Zhang, and Xuanjing Huang. Exploring the compositional deficiency of large language models in mathematical reasoning through trap problems. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 16361-16376, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.915. URL https://aclanthology.org/2024.emnlp-main.915/. +[1086] Lili Zhao, Yang Wang, Qi Liu, Mengyun Wang, Wei Chen, Zhichao Sheng, and Shijin Wang. Evaluating large language models through role-guide and self-reflection: A comparative study. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=E36NHwe7Zc. +[1087] Shangziqi Zhao, Jiahao Yuan, Guisong Yang, and Usman Naseem. Can pruning improve reasoning? revisiting long-cot compression with capability in mind for better reasoning. arXiv preprint arXiv:2505.14582, 2025. +[1088] Weixiang Zhao, Jiahe Guo, Yang Deng, Xingyu Sui, Yulin Hu, Yanyan Zhao, Wanxiang Che, Bing Qin, Tat-Seng Chua, and Ting Liu. Exploring and exploiting the inherent efficiency within large reasoning models for self-guided efficiency enhancement. arXiv preprint arXiv:2506.15647, 2025. +[1089] Xuandong Zhao, Zhewei Kang, Aosong Feng, Sergey Levine, and Dawn Song. Learning to reason without external rewards. arXiv preprint arXiv:2505.19590, 2025. +[1090] Xueliang Zhao, Wei Wu, Jian Guan, and Lingpeng Kong. Promptcot: Synthesizing olympiad-level problems for mathematical reasoning in large language models. arXiv preprint arXiv:2503.02324, 2025. +[1091] Xufeng Zhao, Mengdi Li, Wenhao Lu, Cornelius Weber, Jae Hee Lee, Kun Chu, and Stefan Wermter. Enhancing zero-shot chain-of-thought reasoning in large language models through logic. In Nicoletta Calzolari, Min-Yen Kan, Veronique Hoste, Alessandro Lenci, Sakriani Sakti, and Nianwen Xue, editors, Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 6144-6166, Torino, Italia, May 2024. ELRA and ICCL. URL https://aclanthology.org/2024.lrec-main.543/. +[1092] Yachao Zhao, Bo Wang, and Yan Wang. Explicit vs. implicit: Investigating social bias in large language models through self-reflection. arXiv preprint arXiv:2501.02295, 2025. +[1093] Yang Zhao, Kai Xiong, Xiao Ding, Li Du, Zhouhao Sun, Jiannan Guan, Wenbin Zhang, Bin Liu, Dong Hu, Bing Qin, et al. Ufo-rl: Uncertainty-focused optimization for efficient reinforcement learning data selection. arXiv preprint arXiv:2505.12457, 2025. +[1094] Yichong Zhao and Susumu Goto. Can frontier llms replace annotators in biomedical text mining? analyzing challenges and exploring solutions. arXiv preprint arXiv:2503.03261, 2025. +[1095] Yu Zhao, Huifeng Yin, Bo Zeng, Hao Wang, Tianqi Shi, Chenyang Lyu, Longyue Wang, Weihua Luo, and Kaifu Zhang. Marco-o1: Towards open reasoning models for open-ended solutions. arXiv preprint arXiv:2411.14405, 2024. +[1096] Yurui Zhao, Xiang Wang, Jiahong Liu, Irwin King, and Zhitao Huang. Towards geometry problem solving in the large model era: A survey. arXiv preprint arXiv:2506.02690, 2025. +[1097] Zhonghan Zhao, Wenwei Zhang, Haian Huang, Kuikun Liu, Jianfei Gao, Gaoang Wang, and Kai Chen. Rig: Synergizing reasoning and imagination in end-to-end generalist policy. arXiv preprint arXiv:2503.24388, 2025. + +[1098] Zilong Zhao, Yao Rong, Dongyang Guo, Emek Gözlüklü, Emir Gülboy, and Enkelejda Kasneci. Stepwise self-consistent mathematical reasoning with large language models. arXiv preprint arXiv:2402.17786, 2024. +[1099] Zirui Zhao, Wee Sun Lee, and David Hsu. Large language models as commonsense knowledge for large-scale task planning. Advances in Neural Information Processing Systems, 36:31967-31987, December 2023. URL https://openreview.net/pdf?id=ted747HURfX. +[1100] Bowen Zheng, Xiaolei Wang, Enze Liu, Xi Wang, Lu Hongyu, Yu Chen, Wayne Xin Zhao, and Ji-Rong Wen. Deeprec: Towards a deep dive into the item space with large language model based recommendation. arXiv preprint arXiv:2505.16810, 2025. +[1101] Chuanyang Zheng, Zhengying Liu, Enze Xie, Zhenguo Li, and Yu Li. Progressive-hint prompting improves reasoning in large language models. In AI for Math Workshop @ ICML 2024, June 2024. URL https://openreview.net/forum?id=UkFEs3ciz8. +[1102] Chujie Zheng, Zhenru Zhang, Beichen Zhang, Runji Lin, Keming Lu, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. Processbench: Identifying process errors in mathematical reasoning. arXiv preprint arXiv:2412.06559, 2024. +[1103] Da Zheng, Lun Du, Junwei Su, Yuchen Tian, Yuqi Zhu, Jintian Zhang, Lanning Wei, Ningyu Zhang, and Huajun Chen. Knowledge augmented complex problem solving with large language models: A survey. arXiv preprint arXiv:2505.03418, 2025. +[1104] Ge Zheng, Bin Yang, Jiajin Tang, Hong-Yu Zhou, and Sibei Yang. Ddcot: Duty-distinct chain-of-thought prompting for multimodal reasoning in language models. Advances in Neural Information Processing Systems, 36:5168-5191, 2023. +[1105] Hang Zheng, Hongshen Xu, Yuncong Liu, Lu Chen, Pascale Fung, and Kai Yu. Enhancing llm reliability via explicit knowledge boundary modeling. arXiv preprint arXiv:2503.02233, 2025. +[1106] Jiani Zheng, Lu Wang, Fangkai Yang, Chaoyun Zhang, Lingrui Mei, Wenjie Yin, Qingwei Lin, Dongmei Zhang, Saravan Rajmohan, and Qi Zhang. Vem: Environment-free exploration for training gui agent with value environment model. arXiv preprint arXiv:2502.18906, 2025. +[1107] Kunhao Zheng, Juliette Decugis, Jonas Gehring, Taco Cohen, benjamin negrevergne, and Gabriel Synnaeve. What makes large language models reason in (multi-turn) code generation? In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=Zk9guO19NS. +[1108] Tianyu Zheng, Ge Zhang, Tianhao Shen, Xueling Liu, Bill Yuchen Lin, Jie Fu, Wenhu Chen, and Xiang Yue. OpenCodeInterpreter: Integrating code generation with execution and refinement. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 12834–12859, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.762. URL https://aclanthology.org/2024-findings-acl.762/. +[1109] Xin Zheng, Jie Lou, Boxi Cao, Xueru Wen, Yuqiu Ji, Hongyu Lin, Yaojie Lu, Xianpei Han, Debing Zhang, and Le Sun. Critic-cot: Boosting the reasoning abilities of large language model via chain-of-thoughts critic. arXiv preprint arXiv:2408.16326, 2024. +[1110] Zhi Zheng, Zhuoliang Xie, Zhenkun Wang, and Bryan Hooi. Monte carlo tree search for comprehensive exploration in llm-based automatic heuristic design. arXiv preprint arXiv:2501.08603, 2025. +[1111] Jianyuan Zhong, Zeju Li, Zhijian Xu, Xiangyu Wen, and Qiang Xu. Dyve: Thinking fast and slow for dynamic process verification. arXiv preprint arXiv:2502.11157, 2025. +[1112] Qihuang Zhong, Kang Wang, Ziyang Xu, Juhua Liu, Liang Ding, and Bo Du. Achieving> 97% on gsm8k: Deeply understanding the problems makes llms better solvers for math word problems. arXiv preprint arXiv:2404.14963, 2024. +[1113] Tianyang Zhong, Zhengliang Liu, Yi Pan, Yutong Zhang, Yifan Zhou, Shizhe Liang, Zihao Wu, Yanjun Lyu, Peng Shu, Xiaowei Yu, et al. Evaluation of openai o1: Opportunities and challenges of agi. arXiv preprint arXiv:2409.18486, 2024. + +[1114] Andy Zhou, Kai Yan, Michal Shlapentokh-Rothman, Haohan Wang, and Yu-Xiong Wang. Language agent tree search unifies reasoning, acting, and planning in language models. In *Forty-first International Conference on Machine Learning*, May 2024. URL https://openreview.net/forum?id=njwv9BsGHF. +[1115] Aojun Zhou, Ke Wang, Zimu Lu, Weikang Shi, Sichun Luo, Zipeng Qin, Shaoqing Lu, Anya Jia, Linqi Song, Mingjie Zhan, and Hongsheng Li. Solving challenging math word problems using GPT-4 code interpreter with code-based self-verification. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=c8McWs4Av0. +[1116] Changzhi Zhou, Xinyu Zhang, Dandan Song, Xiancai Chen, Wanli Gu, Huipeng Ma, Yuhang Tian, Mengdi Zhang, and Linmei Hu. Refinecoder: Iterative improving of large language models via adaptive critique refinement for code generation. arXiv preprint arXiv:2502.09183, 2025. +[1117] Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc V Le, and Ed H. Chi. Least-to-most prompting enables complex reasoning in large language models. In The Eleventh International Conference on Learning Representations, February 2023. URL https://openreview.net/forum?id=WZH7099tgfM. +[1118] Fan Zhou, Haoyu Dong, Qian Liu, Zhoujun Cheng, Shi Han, and Dongmei Zhang. Reflection of thought: Inversely eliciting numerical reasoning in language models via solving linear systems. arXiv preprint arXiv:2210.05075, 2022. +[1119] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's" aha moment" in visual reasoning on a 2b non-sft model. arXiv preprint arXiv:2503.05132, 2025. +[1120] Jin Peng Zhou, Charles E Staats, Wenda Li, Christian Szegedy, Kilian Q Weinberger, and Yuhuai Wu. Don't trust: Verify – grounding LLM quantitative reasoning with autoformalization. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=V5tdi14ple. +[1121] Jin Peng Zhou, Kaiwen Wang, Jonathan Chang, Zhaolin Gao, Nathan Kallus, Kilian Q Weinberger, Kianté Brantley, and Wen Sun. q#: Provably optimal distributional rl for llm post-training. arXiv preprint arXiv:2502.20548, 2025. +[1122] Kaiwen Zhou, Chengzhi Liu, Xuandong Zhao, Shreedhar Jangam, Jayanth Srinivasa, Gaowen Liu, Dawn Song, and Xin Eric Wang. The hidden risks of large reasoning models: A safety assessment of r1. arXiv preprint arXiv:2502.12659, 2025. +[1123] Lexin Zhou, Wout Schellaert, Fernando Martínez-Plumed, Yael Moros-Daval, César Ferri, and José Hernández-Orallo. Larger and more instructable language models become less reliable. Nature, 634(8032):61–68, 2024. URL https://www.nature.com/articles/s41586-024-07930-y. +[1124] Li Zhou, Ruijie Zhang, Xunlian Dai, Daniel Hershcovich, and Haizhou Li. Large language models penetration in scholarly writing and peer review. arXiv preprint arXiv:2502.11193, 2025. +[1125] Ruochen Zhou, Minrui Xu, Shiqi Chen, Junteng Liu, Yunqi Li, LIN Xinxin, Zhengyu Chen, and Junxian He. AI for math or math for AI? on the generalization of learning mathematical problem solving. In The 4th Workshop on Mathematical Reasoning and AI at NeurIPS'24, 2024. URL https://openreview.net/forum?id=xlnvZ85CSo. +[1126] Shuyan Zhou, Frank F. Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Tianyue Ou, Yonatan Bisk, Daniel Fried, Uri Alon, and Graham Neubig. Webarena: A realistic web environment for building autonomous agents. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=oKn9c6ytLx. +[1127] Xiangxin Zhou, Zichen Liu, Anya Sims, Haonan Wang, Tianyu Pang, Chongxuan Li, Liang Wang, Min Lin, and Chao Du. Reinforcing general reasoning without verifiers. arXiv preprint arXiv:2505.21493, 2025. + +[1128] Xiaofeng Zhou, Heyan Huang, and Lizi Liao. Debate, reflect, and distill: Multi-agent feedback with tree-structured preference optimization for efficient language model enhancement. arXiv preprint arXiv:2506.03541, 2025. +[1129] Xin Zhou, Yiwen Guo, Ruotian Ma, Tao Gui, Qi Zhang, and Xuanjing Huang. Self-consistency of the internal reward models improves self-rewarding language models. arXiv preprint arXiv:2502.08922, 2025. +[1130] Yang Zhou, Hongyi Liu, Zhuoming Chen, Yuandong Tian, and Beidi Chen. Gsm-infinite: How do your llms behave over infinitely increasing context length and reasoning complexity? arXiv preprint arXiv:2502.05252, 2025. +[1131] Yifei Zhou, Song Jiang, Yuandong Tian, Jason Weston, Sergey Levine, Sainbayar Sukhbaatar, and Xian Li. Sweet-rl: Training multi-turn llm agents on collaborative reasoning tasks. arXiv preprint arXiv:2503.15478, 2025. +[1132] Yufa Zhou, Shaobo Wang, Xingyu Dong, Xiangqi Jin, Yifang Chen, Yue Min, Kexin Yang, Xingzhang Ren, Dayiheng Liu, and Linfeng Zhang. Reasoning like an economist: Posttraining on economic problems induces strategic generalization in llms. arXiv preprint arXiv:2506.00577, 2025. +[1133] Zhanke Zhou, Zhaocheng Zhu, Xuan Li, Mikhail Galkin, Xiao Feng, Sanmi Koyejo, Jian Tang, and Bo Han. Landscape of thoughts: Visualizing the reasoning process of large language models. arXiv preprint arXiv:2503.22165, 2025. +[1134] Zhi Zhou, Tan Yuhao, Zenan Li, Yuan Yao, Lan-Zhe Guo, Xiaoxing Ma, and Yu-Feng Li. Bridging internal probability and self-consistency for effective and efficient lIm reasoning. arXiv preprint arXiv:2502.00511, 2025. +[1135] Bin Zhu, Hailong Yin, Jingjing Chen, and Yu-Gang Jiang. Reasoning models are more easily gaslighted than you think. arXiv preprint arXiv:2506.09677, 2025. +[1136] Dawei Zhu, Xiyu Wei, Guangxiang Zhao, Wenhao Wu, Haosheng Zou, Junfeng Ran, Xun Wang, Lin Sun, Xiangzheng Zhang, and Sujian Li. Chain-of-thought matters: Improving long-context language models with reasoning path supervision. arXiv preprint arXiv:2502.20790, 2025. +[1137] Jason Zhu and Hongyu Li. Towards concise and adaptive thinking in large reasoning models: A survey. arXiv preprint arXiv:2507.09662, 2025. +[1138] Junda Zhu, Lingyong Yan, Shuaiqiang Wang, Dawei Yin, and Lei Sha. Reasoning-to-defend: Safety-aware reasoning can defend large language models from jailbreaking. arXiv preprint arXiv:2502.12970, 2025. +[1139] King Zhu, Hanhao Li, Siwei Wu, Tianshun Xing, Dehua Ma, Xiangru Tang, Minghao Liu, Jian Yang, Jiaheng Liu, Yuchen Eleanor Jiang, et al. Scaling test-time compute for llm agents. arXiv preprint arXiv:2506.12928, 2025. +[1140] Kunlun Zhu, Hongyi Du, Zhaochen Hong, Xiaocheng Yang, Shuyi Guo, Zhe Wang, Zhenhailong Wang, Cheng Qian, Xiangru Tang, Heng Ji, et al. Multiagentbench: Evaluating the collaboration and competition of lIm agents. arXiv preprint arXiv:2503.01935, 2025. +[1141] Rongzhi Zhu, Yi Liu, Zequn Sun, Yiwei Wang, and Wei Hu. When can large reasoning models save thinking? mechanistic analysis of behavioral divergence in reasoning. arXiv preprint arXiv:2505.15276, 2025. +[1142] Tinghui Zhu, Kai Zhang, Jian Xie, and Yu Su. Deductive beam search: Decoding deducible rationale for chain-of-thought reasoning. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=S1XnUsqwr7. +[1143] Xinyu Zhu, Junjie Wang, Lin Zhang, Yuxiang Zhang, Yongfeng Huang, Ruyi Gan, Jiaxing Zhang, and Yujiu Yang. Solving math word problems via cooperative reasoning induced language models. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 4471–4485, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.245. URL https://aclanthology.org/2023.acl-long.245/. + +[1144] Zihao Zhu, Hongbao Zhang, Ruotong Wang, Ke Xu, Siwei Lyu, and Baoyuan Wu. To think or not to think: Exploring the unthinking vulnerability in large reasoning models. arXiv preprint arXiv:2502.12202, 2025. +[1145] Zihao Zhu, Hongbao Zhang, Mingda Zhang, Ruotong Wang, Guanzong Wu, Ke Xu, and Baoyuan Wu. Bot: Breaking long thought processes of o1-like large language models through backdoor attack. arXiv preprint arXiv:2502.12202, 2025. +[1146] Ren Zhuang, Ben Wang, and Shuifa Sun. Accelerating chain-of-thought reasoning: When goal-gradient importance meets dynamic skipping. arXiv preprint arXiv:2505.08392, 2025. +[1147] Ziyu Zhuang, Qiguang Chen, Longxuan Ma, Mingda Li, Yi Han, Yushan Qian, Haopeng Bai, Weinan Zhang, and Liu Ting. Through the lens of core competency: Survey on evaluation of large language models. In Proceedings of the 22nd Chinese National Conference on Computational Linguistics (Volume 2: Frontier Forum), pages 88–109, Harbin, China, August 2023. Chinese Information Processing Society of China. URL https://aclanthology.org/2023.ccl-2.8/. +[1148] Alireza S Ziabari, Nona Ghazizadeh, Zhivar Sourati, Farzan Karimi-Malekabadi, Payam Piray, and Morteza Dehghani. Reasoning on a spectrum: Aligning llms to system 1 and system 2 thinking. arXiv preprint arXiv:2502.12470, 2025. +[1149] Henry Peng Zou, Zhengyao Gu, Yue Zhou, Yankai Chen, Weizhi Zhang, Liancheng Fang, Yibo Wang, Yangning Li, Kay Liu, and Philip S Yu. Testnuc: Enhancing test-time computing approaches through neighboring unlabeled data consistency. arXiv preprint arXiv:2502.19163, 2025. +[1150] Yuxin Zuo, Shang Qu, Yifei Li, Zhangren Chen, Xuekai Zhu, Ermo Hua, Kaiyan Zhang, Ning Ding, and Bowen Zhou. Medxpertqa: Benchmarking expert-level medical reasoning and understanding. arXiv preprint arXiv:2501.18362, 2025. \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09567/images/04fef9422d7990eb4d902d3c902905109bd7fe0911512bee51a344a37488531e.jpg b/data/2025/2503_09xxx/2503.09567/images/04fef9422d7990eb4d902d3c902905109bd7fe0911512bee51a344a37488531e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd5287d271e388f34fe8a21418737840be770f4e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/04fef9422d7990eb4d902d3c902905109bd7fe0911512bee51a344a37488531e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:346503f8c4a3c7309f9ad3e944a3184e11ed34e9d6bf2bed63d858b92f68c5bd +size 22480 diff --git a/data/2025/2503_09xxx/2503.09567/images/09f798353b1e4615f84c4d824a90ec1e55d3d23579c20ec2917c35a81ade4452.jpg b/data/2025/2503_09xxx/2503.09567/images/09f798353b1e4615f84c4d824a90ec1e55d3d23579c20ec2917c35a81ade4452.jpg new file mode 100644 index 0000000000000000000000000000000000000000..966fd53bfb544df62004c3520d844dd33de7e5d2 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/09f798353b1e4615f84c4d824a90ec1e55d3d23579c20ec2917c35a81ade4452.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5b9f6671a2cb46de0857caa202df315a4401bf874f97901b5563f0f999cece2 +size 16296 diff --git a/data/2025/2503_09xxx/2503.09567/images/0a51aaca5f29b7a1155025d64a0e6be21201b8129c276ddc53c0a7fa47545014.jpg b/data/2025/2503_09xxx/2503.09567/images/0a51aaca5f29b7a1155025d64a0e6be21201b8129c276ddc53c0a7fa47545014.jpg new file mode 100644 index 0000000000000000000000000000000000000000..61a53d86f166bdf035f1fa7d98ad156f46ce0542 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/0a51aaca5f29b7a1155025d64a0e6be21201b8129c276ddc53c0a7fa47545014.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ba0fd868a05009474ca04feb0bc38545fb5d0390e5d08f6b27f482ab71ec6b8 +size 180666 diff --git a/data/2025/2503_09xxx/2503.09567/images/1c2231d3720d10adabb9b0e0eb328582cefd25a3bc7b9de3b2b5d8e38fcb3e48.jpg b/data/2025/2503_09xxx/2503.09567/images/1c2231d3720d10adabb9b0e0eb328582cefd25a3bc7b9de3b2b5d8e38fcb3e48.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d0961f1ad939c399eb7753320b9a492af2aa933 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/1c2231d3720d10adabb9b0e0eb328582cefd25a3bc7b9de3b2b5d8e38fcb3e48.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39b541f709d7a7fa97f4c0b863e3c9ef51a57546543ed90da2eaf291fc77ef26 +size 6555 diff --git a/data/2025/2503_09xxx/2503.09567/images/229175aa5f40cea2d4b91811dde0c78deb3d0da81008eac080070bf43c375633.jpg b/data/2025/2503_09xxx/2503.09567/images/229175aa5f40cea2d4b91811dde0c78deb3d0da81008eac080070bf43c375633.jpg new file mode 100644 index 0000000000000000000000000000000000000000..776976d895e5d09f317912070d9dfdf0c4428768 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/229175aa5f40cea2d4b91811dde0c78deb3d0da81008eac080070bf43c375633.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d80d709a951ecedff5a214ef9db6eeb19fb48449e00fb2f4f558207e95c09de4 +size 21797 diff --git a/data/2025/2503_09xxx/2503.09567/images/23630b42c465d84d800277ffb7ad33291ea526c1dea42266eee59f4ed6d6ce9b.jpg b/data/2025/2503_09xxx/2503.09567/images/23630b42c465d84d800277ffb7ad33291ea526c1dea42266eee59f4ed6d6ce9b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c1897935b3a10c9139349397139da3e9d3f84dce --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/23630b42c465d84d800277ffb7ad33291ea526c1dea42266eee59f4ed6d6ce9b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be07d38cf322df93798cdfa6f04355fee2dd8b4fc1f256964b7df98190c11877 +size 3232 diff --git a/data/2025/2503_09xxx/2503.09567/images/302920f94ae85e94ce64fd964759f21a7a4160de1d28055d6f3573f758563039.jpg b/data/2025/2503_09xxx/2503.09567/images/302920f94ae85e94ce64fd964759f21a7a4160de1d28055d6f3573f758563039.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e46c84ac0d18f3d7e5d8bfc4155293c4b154b23 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/302920f94ae85e94ce64fd964759f21a7a4160de1d28055d6f3573f758563039.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da85ac127b075b2017c874a04fc86c4ca8217ec3be294eb13281fbb8cb0e1a67 +size 55620 diff --git a/data/2025/2503_09xxx/2503.09567/images/367cca6990189dfda7e049a2d562809a0e9869ca5351f1d2d1d1e74c0f9bcafd.jpg b/data/2025/2503_09xxx/2503.09567/images/367cca6990189dfda7e049a2d562809a0e9869ca5351f1d2d1d1e74c0f9bcafd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7aecc7918a69d9eec23f3fbea39dd6ffff6f651b --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/367cca6990189dfda7e049a2d562809a0e9869ca5351f1d2d1d1e74c0f9bcafd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:407a0f672559d25df625914af9cbc69e6ed85b99743cd78bf4dfa74b996e1d89 +size 15548 diff --git a/data/2025/2503_09xxx/2503.09567/images/3ad5130812a0e24167c7dfa551d883eb38722de0d7f62be67b48e96ae2092acf.jpg b/data/2025/2503_09xxx/2503.09567/images/3ad5130812a0e24167c7dfa551d883eb38722de0d7f62be67b48e96ae2092acf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0207766cd9d7b434e78c3fe573f2acc0e5181312 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/3ad5130812a0e24167c7dfa551d883eb38722de0d7f62be67b48e96ae2092acf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0eee75419a1951aff2eaae1ca339e7e96a24ab591713d54e3d55c89e900aa64e +size 279836 diff --git a/data/2025/2503_09xxx/2503.09567/images/3bcec7826e1fbcdb6dfe89578c968d54d801a27312caf0fc018f86ba59fa632d.jpg b/data/2025/2503_09xxx/2503.09567/images/3bcec7826e1fbcdb6dfe89578c968d54d801a27312caf0fc018f86ba59fa632d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a2e4fa1bca5546a0ca235abaf03408c53eafa2c5 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/3bcec7826e1fbcdb6dfe89578c968d54d801a27312caf0fc018f86ba59fa632d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0791bf7b4d3418653e4e99ad50640b38c300adc672f7c349ec337cda28a37132 +size 130746 diff --git a/data/2025/2503_09xxx/2503.09567/images/3effed9f4c545b03b2ac2c365b4d87fe9724f58c4dddd4294def63e3d2f5672e.jpg b/data/2025/2503_09xxx/2503.09567/images/3effed9f4c545b03b2ac2c365b4d87fe9724f58c4dddd4294def63e3d2f5672e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..91541baa6bd6c77166b3d5e0534f5d76584cda06 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/3effed9f4c545b03b2ac2c365b4d87fe9724f58c4dddd4294def63e3d2f5672e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9644af9affaab9623fe64997edacb112dd45ea9d42bfbb740a571e83da0de2d5 +size 14333 diff --git a/data/2025/2503_09xxx/2503.09567/images/51fec61d82ab2a769606104af5832df56e4604f317836d062424f65c9e9866bf.jpg b/data/2025/2503_09xxx/2503.09567/images/51fec61d82ab2a769606104af5832df56e4604f317836d062424f65c9e9866bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c6cd435a3bdf6774398caffc617e558d0917b8ae --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/51fec61d82ab2a769606104af5832df56e4604f317836d062424f65c9e9866bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75b9ff9717a7867ad296eadabfa3a876fad71137416a1d31ae66a144863ab9eb +size 3251 diff --git a/data/2025/2503_09xxx/2503.09567/images/55a2cddee6720d6d5b6d79848689909b6e03f9c8563319f2fff7f35746a40240.jpg b/data/2025/2503_09xxx/2503.09567/images/55a2cddee6720d6d5b6d79848689909b6e03f9c8563319f2fff7f35746a40240.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c085560ccf701f9460668c6a28c207970e16439 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/55a2cddee6720d6d5b6d79848689909b6e03f9c8563319f2fff7f35746a40240.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cf2756bbaf417c7ccb17174b625180ba8b37ad1fde92e38104cbe973b765deb +size 53623 diff --git a/data/2025/2503_09xxx/2503.09567/images/5627b8fe0330637f914d05c2ea3b75f4df43c678ec6ae3f0e9b7da8f94f4f43f.jpg b/data/2025/2503_09xxx/2503.09567/images/5627b8fe0330637f914d05c2ea3b75f4df43c678ec6ae3f0e9b7da8f94f4f43f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c14e049095aabf4a73f064ef830f0879de9f7cb5 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/5627b8fe0330637f914d05c2ea3b75f4df43c678ec6ae3f0e9b7da8f94f4f43f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78dc6d22f0055a7fee9a7c02714d176105b85087716e2a5047da24ba1b7b749c +size 1447 diff --git a/data/2025/2503_09xxx/2503.09567/images/5a5b622a5ef9a492838399c45ff5d29022e17e93ea38f8784aa310a395d4009d.jpg b/data/2025/2503_09xxx/2503.09567/images/5a5b622a5ef9a492838399c45ff5d29022e17e93ea38f8784aa310a395d4009d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..84b0e787dfddb8a70b617026566c63206fd4ebb4 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/5a5b622a5ef9a492838399c45ff5d29022e17e93ea38f8784aa310a395d4009d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ce4acaaf7d41d3156db3c1c15d498c8f5571df3ea21e7282ea5fd1a3168f02f +size 27595 diff --git a/data/2025/2503_09xxx/2503.09567/images/6186a168b180947a0489ea06e2588913d69a4a6c8207832b97251d4c7cdb7e9f.jpg b/data/2025/2503_09xxx/2503.09567/images/6186a168b180947a0489ea06e2588913d69a4a6c8207832b97251d4c7cdb7e9f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6dff9bee9309f83fe68740639aa9b17e63e5cda5 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/6186a168b180947a0489ea06e2588913d69a4a6c8207832b97251d4c7cdb7e9f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7448782c470315bb35529b78bb764a044a9fbef4d1ba0173ce4ceffa4f4cc5ee +size 16146 diff --git a/data/2025/2503_09xxx/2503.09567/images/6462f102f8623b3fc4af62f2c0f413f3392b4362b8f808630fa2fdef3362d761.jpg b/data/2025/2503_09xxx/2503.09567/images/6462f102f8623b3fc4af62f2c0f413f3392b4362b8f808630fa2fdef3362d761.jpg new file mode 100644 index 0000000000000000000000000000000000000000..76b2bfb2a24443d9ee46074daad8bf7ff0b16c2e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/6462f102f8623b3fc4af62f2c0f413f3392b4362b8f808630fa2fdef3362d761.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a9f8f9ba5140debb7c8f28a49227bb476ee22fb9d9572e92977321b36e4635b +size 42840 diff --git a/data/2025/2503_09xxx/2503.09567/images/651c3a02f7c05e2fa7e8a9730a03db50638cef9382a4885f455c35d277bec9cc.jpg b/data/2025/2503_09xxx/2503.09567/images/651c3a02f7c05e2fa7e8a9730a03db50638cef9382a4885f455c35d277bec9cc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..748bf17d600d7c8f8127fa99b0f21dd4d1a7756a --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/651c3a02f7c05e2fa7e8a9730a03db50638cef9382a4885f455c35d277bec9cc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7ffce7b6e61b7cd62ca15e1a0bd29ec5f848101d338f31c600a51433bb6bc49 +size 38290 diff --git a/data/2025/2503_09xxx/2503.09567/images/67120b715e01e4e0a5691bdc5abb971e8ebe54977751e351a5a6b2de0ae0cf33.jpg b/data/2025/2503_09xxx/2503.09567/images/67120b715e01e4e0a5691bdc5abb971e8ebe54977751e351a5a6b2de0ae0cf33.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a91ae9c1a62125fee5b192a04166d35cada526f8 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/67120b715e01e4e0a5691bdc5abb971e8ebe54977751e351a5a6b2de0ae0cf33.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e3c2b645304e83e1ced5baa520a71fe14b4877e666bdc4b35b9724e54397dec +size 155929 diff --git a/data/2025/2503_09xxx/2503.09567/images/67966a02d40f9abd83c46d1aa2a00109654912dd25dd4c03cf00063a6a48b186.jpg b/data/2025/2503_09xxx/2503.09567/images/67966a02d40f9abd83c46d1aa2a00109654912dd25dd4c03cf00063a6a48b186.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5fec9e3a02bf16a31cff9a14383d5e6619da09f8 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/67966a02d40f9abd83c46d1aa2a00109654912dd25dd4c03cf00063a6a48b186.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88063a4d0774b87c995e9e6d08f0fe69bc9bedbdee5ab557de0f6f45e5193928 +size 18706 diff --git a/data/2025/2503_09xxx/2503.09567/images/6914fc78c8aeece2af825dabacd242f08c842b612001c13322264246623afb04.jpg b/data/2025/2503_09xxx/2503.09567/images/6914fc78c8aeece2af825dabacd242f08c842b612001c13322264246623afb04.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8191fc93c6724171f6f4114a0c6cc78e134ed684 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/6914fc78c8aeece2af825dabacd242f08c842b612001c13322264246623afb04.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4a6b9252f399b04bfbaa24c16d96ee92a0425fee306a372a3373426505b38c1 +size 19865 diff --git a/data/2025/2503_09xxx/2503.09567/images/6b9af6579bd26e04c798016e01125ccc0cc0c837723baed594fe92c9e6c31804.jpg b/data/2025/2503_09xxx/2503.09567/images/6b9af6579bd26e04c798016e01125ccc0cc0c837723baed594fe92c9e6c31804.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a6083e518bd15db5c25e209d79fbd7cb1375f0e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/6b9af6579bd26e04c798016e01125ccc0cc0c837723baed594fe92c9e6c31804.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a39c87824b3c5166a6ce177ccca7d2bb51aa9ba8678a7543f34454dff3016aa +size 26129 diff --git a/data/2025/2503_09xxx/2503.09567/images/748e7abf84b0255c1331edd540782869194e76185b531fae8e9affbfdea58ee8.jpg b/data/2025/2503_09xxx/2503.09567/images/748e7abf84b0255c1331edd540782869194e76185b531fae8e9affbfdea58ee8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a9904f694ee369c359610295d421212778c7fa0b --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/748e7abf84b0255c1331edd540782869194e76185b531fae8e9affbfdea58ee8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a41c7722c1b21eddd4c05c45d6a13e9125619e95c9abbf5275db60bc94918b1c +size 6526 diff --git a/data/2025/2503_09xxx/2503.09567/images/7574cfd5bdc73debbbe23c4cd13dc43c38b3f705661075e1c69e68c8876576bc.jpg b/data/2025/2503_09xxx/2503.09567/images/7574cfd5bdc73debbbe23c4cd13dc43c38b3f705661075e1c69e68c8876576bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73bddda46cc325cfbfcdcaf0266116726c216517 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/7574cfd5bdc73debbbe23c4cd13dc43c38b3f705661075e1c69e68c8876576bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97c21c719987e92fc90fac7514fa9f09076c1f0e43436e4a9f7f8b158ebe466d +size 26178 diff --git a/data/2025/2503_09xxx/2503.09567/images/75779ea3409037b107f99cc61b0546a161e6d6863edc845e12464cd3a1541651.jpg b/data/2025/2503_09xxx/2503.09567/images/75779ea3409037b107f99cc61b0546a161e6d6863edc845e12464cd3a1541651.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f541efe6c8d9db311902f472eb5810fff1e3802e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/75779ea3409037b107f99cc61b0546a161e6d6863edc845e12464cd3a1541651.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4618e0b6ff6131ab77ad182fe21a680999f3187c605a6b37599fb8ad1539e330 +size 5772 diff --git a/data/2025/2503_09xxx/2503.09567/images/75c5bea65e3eccbc79affd34b429b7f444436c52e5988975f4dec0ecb68328a3.jpg b/data/2025/2503_09xxx/2503.09567/images/75c5bea65e3eccbc79affd34b429b7f444436c52e5988975f4dec0ecb68328a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..50b911ee424f03c11e7572c5fb458e51fc9d2779 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/75c5bea65e3eccbc79affd34b429b7f444436c52e5988975f4dec0ecb68328a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fb5ceef07a6072d035945d32d162fce351a5f265b71faa64f5691e9f84b0c77 +size 30659 diff --git a/data/2025/2503_09xxx/2503.09567/images/77d6e310225283b575bdc27fdde0f240da93987fe57f7d9af2832fc35ebec190.jpg b/data/2025/2503_09xxx/2503.09567/images/77d6e310225283b575bdc27fdde0f240da93987fe57f7d9af2832fc35ebec190.jpg new file mode 100644 index 0000000000000000000000000000000000000000..88fe846fdee3722034e2062361f66b493072f272 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/77d6e310225283b575bdc27fdde0f240da93987fe57f7d9af2832fc35ebec190.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98ca67ab866155977601207aa9ea78da6f84a716e96486a47ef5395e161c1fe9 +size 10038 diff --git a/data/2025/2503_09xxx/2503.09567/images/787c5674fba7b0ce5e4ca3ac3eefd20babe3c384dc807cab022b3df606b88f7a.jpg b/data/2025/2503_09xxx/2503.09567/images/787c5674fba7b0ce5e4ca3ac3eefd20babe3c384dc807cab022b3df606b88f7a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b99591d895221ac0ee36c69080eddd30c72d3584 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/787c5674fba7b0ce5e4ca3ac3eefd20babe3c384dc807cab022b3df606b88f7a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd4405dc8a54c1b76e06093c73f541113680bbec8dc897a9daf6bd12eb97204e +size 22583 diff --git a/data/2025/2503_09xxx/2503.09567/images/7acf371a734b42dff8be38ed39013e080e5d0020e7a712fdcc41abb09ba80b65.jpg b/data/2025/2503_09xxx/2503.09567/images/7acf371a734b42dff8be38ed39013e080e5d0020e7a712fdcc41abb09ba80b65.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6404aa6a54483a068a359ac42ae25689192a5990 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/7acf371a734b42dff8be38ed39013e080e5d0020e7a712fdcc41abb09ba80b65.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4e32575694bce51297e1ec696a07c28e1d571e6e3274a2a2848d1e24a749db3 +size 28962 diff --git a/data/2025/2503_09xxx/2503.09567/images/7be1b7daf0c4a94db08288a01268f8d1a38f78cf980f847977a44854f53c8f2a.jpg b/data/2025/2503_09xxx/2503.09567/images/7be1b7daf0c4a94db08288a01268f8d1a38f78cf980f847977a44854f53c8f2a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..605f3d60c0771adb057360c3a10179aafc14d061 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/7be1b7daf0c4a94db08288a01268f8d1a38f78cf980f847977a44854f53c8f2a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9054d4e4935a18af57b0befcaea0faaf24f605e00784d8bd8ccec62122c82704 +size 16517 diff --git a/data/2025/2503_09xxx/2503.09567/images/80703458fe6b97a41337e32d746ae10f1ad5d7ce4cd1e803f369ce673d59c38c.jpg b/data/2025/2503_09xxx/2503.09567/images/80703458fe6b97a41337e32d746ae10f1ad5d7ce4cd1e803f369ce673d59c38c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..47c5d3b50ac060590863899caa9f646ecda98e0c --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/80703458fe6b97a41337e32d746ae10f1ad5d7ce4cd1e803f369ce673d59c38c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17f0d64f10ffc42bf23bdf713529d18c3cd912a1652e5c0731870e55eae79886 +size 4050 diff --git a/data/2025/2503_09xxx/2503.09567/images/81e2e27566788059519cb1c006b61eff3bd312ffd9284b18e9a21fb0bdb56552.jpg b/data/2025/2503_09xxx/2503.09567/images/81e2e27566788059519cb1c006b61eff3bd312ffd9284b18e9a21fb0bdb56552.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5e6119a56b05d776de988438142e908e112feb08 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/81e2e27566788059519cb1c006b61eff3bd312ffd9284b18e9a21fb0bdb56552.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0e12742d8627c8cb2acd0757b8df946377f7cc5ace71695bdfb2632d7252cf2 +size 11831 diff --git a/data/2025/2503_09xxx/2503.09567/images/823ca26e30f5429ff0ae86df5e048ed2430dd9bdc62c2d874445fe64c1774d87.jpg b/data/2025/2503_09xxx/2503.09567/images/823ca26e30f5429ff0ae86df5e048ed2430dd9bdc62c2d874445fe64c1774d87.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6680d5bf71448fa6c1ba97a93001880302febb71 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/823ca26e30f5429ff0ae86df5e048ed2430dd9bdc62c2d874445fe64c1774d87.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:316a1a975e95f5c361dc3894b0c2c8e3ec6ce2fc043f9cefcdfa90724fd2d480 +size 3163 diff --git a/data/2025/2503_09xxx/2503.09567/images/8906843300658ba9f577713856804416059d5d5e72ce14c0c9304e8987c15cd2.jpg b/data/2025/2503_09xxx/2503.09567/images/8906843300658ba9f577713856804416059d5d5e72ce14c0c9304e8987c15cd2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ad3b1acbdc40515588c81fa918262305a21b26a4 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/8906843300658ba9f577713856804416059d5d5e72ce14c0c9304e8987c15cd2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e923b32045798331baea30d2b654b4c9c774422a8c83e2574e837c3eb5e1a7d +size 141223 diff --git a/data/2025/2503_09xxx/2503.09567/images/8fd520586ef8e1e9b261fefe8d9414d799cbcc475fa68617bc151b1944824f09.jpg b/data/2025/2503_09xxx/2503.09567/images/8fd520586ef8e1e9b261fefe8d9414d799cbcc475fa68617bc151b1944824f09.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5afb2b86bb6d9a7bc8fcf8549cbd81ac2acd3d8a --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/8fd520586ef8e1e9b261fefe8d9414d799cbcc475fa68617bc151b1944824f09.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:838378be7028b5efa06c7dbd66a631ddcf65e8aa92847376c2bd7b41e18c1db7 +size 43818 diff --git a/data/2025/2503_09xxx/2503.09567/images/924dba4b1d5c6d25f0eff62713bafcbf9c36e9cd21483aae275897e288afdd77.jpg b/data/2025/2503_09xxx/2503.09567/images/924dba4b1d5c6d25f0eff62713bafcbf9c36e9cd21483aae275897e288afdd77.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8bf71ca9e24dc414c739263f9434369c344a1e98 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/924dba4b1d5c6d25f0eff62713bafcbf9c36e9cd21483aae275897e288afdd77.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd8cb2cea4df40ccdba98a200b7d8f27b5b4fa30e5aeb39aefbe28575771c317 +size 18325 diff --git a/data/2025/2503_09xxx/2503.09567/images/9628c4a63cf86935164b10c8341472684c37377c14dfa2282ae72c4f75176336.jpg b/data/2025/2503_09xxx/2503.09567/images/9628c4a63cf86935164b10c8341472684c37377c14dfa2282ae72c4f75176336.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46809911d84521755eaa02a37bfdb3831cbeb1a0 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/9628c4a63cf86935164b10c8341472684c37377c14dfa2282ae72c4f75176336.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45b16f63372ab997be69c405701dddf5de1b0c22316cfb39899940b776814480 +size 4469 diff --git a/data/2025/2503_09xxx/2503.09567/images/998c396b19ab3cb07b28b1eb72b14b5078de0675ec62ef33294a59363f34d6e2.jpg b/data/2025/2503_09xxx/2503.09567/images/998c396b19ab3cb07b28b1eb72b14b5078de0675ec62ef33294a59363f34d6e2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ed8c13cc8fa76edb5feedbfb0db59e55e8c4f87 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/998c396b19ab3cb07b28b1eb72b14b5078de0675ec62ef33294a59363f34d6e2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c103c83624d700f6ba7ee525fd91c756770b7d5d02de81b968298d3b96c89686 +size 3540 diff --git a/data/2025/2503_09xxx/2503.09567/images/9c6831bab18873d643e710c5d4ac73f2ffb018fd0b5a6426ad2fad8decd5da47.jpg b/data/2025/2503_09xxx/2503.09567/images/9c6831bab18873d643e710c5d4ac73f2ffb018fd0b5a6426ad2fad8decd5da47.jpg new file mode 100644 index 0000000000000000000000000000000000000000..beaab1f066d6032aa9c879ef850cdb98c84cbe0e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/9c6831bab18873d643e710c5d4ac73f2ffb018fd0b5a6426ad2fad8decd5da47.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e261d855eca3539f12f37dfc1a838f47f68f856ffcbdfda159c96b63760a31c +size 5231 diff --git a/data/2025/2503_09xxx/2503.09567/images/a00f7ae26a61e2deed5c2e110f8b5f44aca2021285ec38658a2e306eecd29366.jpg b/data/2025/2503_09xxx/2503.09567/images/a00f7ae26a61e2deed5c2e110f8b5f44aca2021285ec38658a2e306eecd29366.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28453035ab57b39ec9714752cadaa88c6618360d --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/a00f7ae26a61e2deed5c2e110f8b5f44aca2021285ec38658a2e306eecd29366.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80bb476e425177d07968cd9be6cb9066dab052593f0b4775392b9d8131030030 +size 5429 diff --git a/data/2025/2503_09xxx/2503.09567/images/aa202666c0347e30f452afb50132bf46686ba98433600b7ee8d0e4c2f30ad8f5.jpg b/data/2025/2503_09xxx/2503.09567/images/aa202666c0347e30f452afb50132bf46686ba98433600b7ee8d0e4c2f30ad8f5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..769665b612f9a12d18f4a7966cb8b7222ac14763 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/aa202666c0347e30f452afb50132bf46686ba98433600b7ee8d0e4c2f30ad8f5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ed5962f0f0b10c2e3c00d6575ec7b6a1429088c3c821aaf6e3e7c9f758bc790 +size 14148 diff --git a/data/2025/2503_09xxx/2503.09567/images/ae0384cb2d35989e0913fcc05ec7fe401f4d3acdd492815afce7dcdd64d2789c.jpg b/data/2025/2503_09xxx/2503.09567/images/ae0384cb2d35989e0913fcc05ec7fe401f4d3acdd492815afce7dcdd64d2789c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75e3a70ad1a9a948d018c138e4fa8ade675c77a7 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/ae0384cb2d35989e0913fcc05ec7fe401f4d3acdd492815afce7dcdd64d2789c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d0f722d621a4825801db3dee99d1ff79d26c94a39330669bdd6b996818c8016 +size 36733 diff --git a/data/2025/2503_09xxx/2503.09567/images/b3686b17aa6dae7dfb30b34c5e285af765d180305957e5c15bbbeed64d436326.jpg b/data/2025/2503_09xxx/2503.09567/images/b3686b17aa6dae7dfb30b34c5e285af765d180305957e5c15bbbeed64d436326.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec4507ddd108daedb98b1c4f2d64b76550f5c31c --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/b3686b17aa6dae7dfb30b34c5e285af765d180305957e5c15bbbeed64d436326.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13b5a785148e3c4bf0d3008a4bf15c9003c8fe55843b0bb8c4501828534c9da9 +size 23423 diff --git a/data/2025/2503_09xxx/2503.09567/images/b58ea3cfdd162d9e8dc0a98bea568dac497bec49f930188fabfb39d4a8af9188.jpg b/data/2025/2503_09xxx/2503.09567/images/b58ea3cfdd162d9e8dc0a98bea568dac497bec49f930188fabfb39d4a8af9188.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ea0b9195f40d6fac296b1114aed39bd995ede87 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/b58ea3cfdd162d9e8dc0a98bea568dac497bec49f930188fabfb39d4a8af9188.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a9919855c6173b9c1129afc2b3158a88810a65d550b353e655e3085e94057e1 +size 16399 diff --git a/data/2025/2503_09xxx/2503.09567/images/be56637e4c051b7d3d3a17014777899ed5d63d7f01c713db6f624e8d8196114d.jpg b/data/2025/2503_09xxx/2503.09567/images/be56637e4c051b7d3d3a17014777899ed5d63d7f01c713db6f624e8d8196114d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fda34ea5bbf9390374e6930c377f76f60af331eb --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/be56637e4c051b7d3d3a17014777899ed5d63d7f01c713db6f624e8d8196114d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48d042774a0aa952de0d44ae56d5d8491b630bd629507687232da2b3e5c45418 +size 148999 diff --git a/data/2025/2503_09xxx/2503.09567/images/c8c822bb78952ff9aac5527ba39034f466d82e73c5d2445eeca70e20cc8d4ed2.jpg b/data/2025/2503_09xxx/2503.09567/images/c8c822bb78952ff9aac5527ba39034f466d82e73c5d2445eeca70e20cc8d4ed2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db7cf525df9b7051c35352fa46989b19e7c2bdf2 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/c8c822bb78952ff9aac5527ba39034f466d82e73c5d2445eeca70e20cc8d4ed2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ddfeaad173c4e244d8cfcadc02535c8a3e6000c9d510875a64737668f180cf1 +size 16859 diff --git a/data/2025/2503_09xxx/2503.09567/images/d5927788e7155f8644d57e414178a3877fe52ffa58b3baf4651f6d732f0d157f.jpg b/data/2025/2503_09xxx/2503.09567/images/d5927788e7155f8644d57e414178a3877fe52ffa58b3baf4651f6d732f0d157f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6d0ccbecb66877c64b30de2c13c12836f3638174 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/d5927788e7155f8644d57e414178a3877fe52ffa58b3baf4651f6d732f0d157f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e4d76b061f75e02868a2369d4e52e899092bd3d61c4ee340f4cae5b4a2e87d2 +size 160294 diff --git a/data/2025/2503_09xxx/2503.09567/images/dd4b7e43b794582020a033da732daf0b1be53e45111b8e9717414d483b50896e.jpg b/data/2025/2503_09xxx/2503.09567/images/dd4b7e43b794582020a033da732daf0b1be53e45111b8e9717414d483b50896e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0fe469aa72f86b8eea5940d9911147f313c7d4f1 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/dd4b7e43b794582020a033da732daf0b1be53e45111b8e9717414d483b50896e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7f666c2f4f211cc7525d17da6ecae5bd7428c6cee164504d1f60d202608aa86 +size 15988 diff --git a/data/2025/2503_09xxx/2503.09567/images/ebcdb7892865c413666c63573d7f974aac12588169f830423b0bd269bf85e3b2.jpg b/data/2025/2503_09xxx/2503.09567/images/ebcdb7892865c413666c63573d7f974aac12588169f830423b0bd269bf85e3b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cd0e37f387571e662e520776828f70eacda2c985 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/ebcdb7892865c413666c63573d7f974aac12588169f830423b0bd269bf85e3b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:916d0b506953a93d5c64c1bb643e9723bdf79cc87d69c79f911364d04ce2abfc +size 182224 diff --git a/data/2025/2503_09xxx/2503.09567/images/f0a50a247a0dd2634591eb3435973b764f754ecd980e41927ea8e8c53cf3b966.jpg b/data/2025/2503_09xxx/2503.09567/images/f0a50a247a0dd2634591eb3435973b764f754ecd980e41927ea8e8c53cf3b966.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4b09ec8119e85f4da3870d4c663368e22531525 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/f0a50a247a0dd2634591eb3435973b764f754ecd980e41927ea8e8c53cf3b966.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2f1575175b4164c6d347c50a9080e882c972852ecc29f076c770a9c24a7ef5c +size 125554 diff --git a/data/2025/2503_09xxx/2503.09567/images/f0eadf51633e7fb658ee5728c1487fe0163f55fb50102d3c0b34bdb3de0da945.jpg b/data/2025/2503_09xxx/2503.09567/images/f0eadf51633e7fb658ee5728c1487fe0163f55fb50102d3c0b34bdb3de0da945.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7b91e34be40be9c286b0ac77874e95200df648c --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/f0eadf51633e7fb658ee5728c1487fe0163f55fb50102d3c0b34bdb3de0da945.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d52627e6fafc0cb20cecd7b3e422d98edeecb64d722a05d30308db6d3cb6902e +size 101429 diff --git a/data/2025/2503_09xxx/2503.09567/images/f2f163b377b83a2138812ae0feb6376a9a672ddaaf020fc013620363b5df6fb0.jpg b/data/2025/2503_09xxx/2503.09567/images/f2f163b377b83a2138812ae0feb6376a9a672ddaaf020fc013620363b5df6fb0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..911e5cb8b3b136f20d940c1509c1f83839a4d79b --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/f2f163b377b83a2138812ae0feb6376a9a672ddaaf020fc013620363b5df6fb0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e99d2be6637d52997b73b65fdb01676c99368afe7f213b838f225b34485fdd19 +size 4515 diff --git a/data/2025/2503_09xxx/2503.09567/images/fa1bbeeb7a7a9e97707e957eb9cfc744f2a2eba4ab0e5a7c5f73282936c28213.jpg b/data/2025/2503_09xxx/2503.09567/images/fa1bbeeb7a7a9e97707e957eb9cfc744f2a2eba4ab0e5a7c5f73282936c28213.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5bdbf1a62be2e68465dbfa95e077ab723d86e4a7 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/fa1bbeeb7a7a9e97707e957eb9cfc744f2a2eba4ab0e5a7c5f73282936c28213.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da7abd44de772981622bd11003d1e918df6493c5976bc136d5bed1776437d144 +size 311351 diff --git a/data/2025/2503_09xxx/2503.09567/images/ff164d8152d0e42a100061acaca7da8e5deb82846df7779c8c7d61fa44616288.jpg b/data/2025/2503_09xxx/2503.09567/images/ff164d8152d0e42a100061acaca7da8e5deb82846df7779c8c7d61fa44616288.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0a4a974d3bcffe447b90ba48d3804b2f24444612 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/images/ff164d8152d0e42a100061acaca7da8e5deb82846df7779c8c7d61fa44616288.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c36396a5ffb3ad306208966d918f1c1252e633548abddced7a982bf8873b04e2 +size 3047 diff --git a/data/2025/2503_09xxx/2503.09567/layout.json b/data/2025/2503_09xxx/2503.09567/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..9e78c571dc153d740e640116a2b103e765a0b10e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09567/layout.json @@ -0,0 +1,64684 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 140, + 208, + 482, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 208, + 482, + 243 + ], + "spans": [ + { + "bbox": [ + 140, + 208, + 482, + 243 + ], + "type": "text", + "content": "Qiguang Chen† Libo Qin‡ Jinhao Liu† Dengyun Peng† Jiannan Guan† Peng Wang‡ Mengkang Hu◇ Yuhang Zhou Te Gao† Wanxiang Che† LARG," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 178, + 245, + 443, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 245, + 443, + 268 + ], + "spans": [ + { + "bbox": [ + 178, + 245, + 443, + 268 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 178, + 245, + 443, + 268 + ], + "type": "text", + "content": " Research Center for Social Computing and Interactive Robotics, " + }, + { + "bbox": [ + 178, + 245, + 443, + 268 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 178, + 245, + 443, + 268 + ], + "type": "text", + "content": " Harbin Institute of Technology" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 164, + 269, + 459, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 269, + 459, + 281 + ], + "spans": [ + { + "bbox": [ + 164, + 269, + 459, + 281 + ], + "type": "inline_equation", + "content": "^{\\ddagger}" + }, + { + "bbox": [ + 164, + 269, + 459, + 281 + ], + "type": "text", + "content": " School of Computer Science and Engineering, Central South University" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 246, + 281, + 376, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 281, + 376, + 292 + ], + "spans": [ + { + "bbox": [ + 246, + 281, + 376, + 292 + ], + "type": "text", + "content": "The University of Hong Kong" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 272, + 292, + 351, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 272, + 292, + 351, + 303 + ], + "spans": [ + { + "bbox": [ + 272, + 292, + 351, + 303 + ], + "type": "text", + "content": "Fudan University" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 184, + 304, + 439, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 304, + 439, + 314 + ], + "spans": [ + { + "bbox": [ + 184, + 304, + 439, + 314 + ], + "type": "text", + "content": "{qgchen,car}@ir.hit.edu.cn,lbqin@csu.edu.cn" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 211, + 324, + 410, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 324, + 410, + 337 + ], + "spans": [ + { + "bbox": [ + 211, + 324, + 410, + 337 + ], + "type": "text", + "content": "Project: https://long-cot.github.io/" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 137, + 346, + 484, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 346, + 484, + 358 + ], + "spans": [ + { + "bbox": [ + 137, + 346, + 484, + 358 + ], + "type": "text", + "content": "Github: LightChen233/Awesome-Long-Chain-of-Thought-Reasoning" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 113, + 390, + 509, + 654 + ], + "blocks": [ + { + "bbox": [ + 113, + 390, + 509, + 654 + ], + "lines": [ + { + "bbox": [ + 113, + 390, + 509, + 654 + ], + "spans": [ + { + "bbox": [ + 113, + 390, + 509, + 654 + ], + "type": "image", + "image_path": "3bcec7826e1fbcdb6dfe89578c968d54d801a27312caf0fc018f86ba59fa632d.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 143, + 95, + 183, + 130 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 95, + 183, + 130 + ], + "spans": [ + { + "bbox": [ + 143, + 95, + 183, + 130 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 188, + 106, + 471, + 168 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 106, + 471, + 168 + ], + "spans": [ + { + "bbox": [ + 188, + 106, + 471, + 168 + ], + "type": "text", + "content": "Towards Reasoning Era: A Survey of Chain-of-Thought for Reasoning Large Language Models" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 14, + 226, + 36, + 563 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 226, + 36, + 563 + ], + "spans": [ + { + "bbox": [ + 14, + 226, + 36, + 563 + ], + "type": "text", + "content": "arXiv:2503.09567v5 [cs.AI] 18 Jul 2025" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 281, + 71, + 331, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 71, + 331, + 85 + ], + "spans": [ + { + "bbox": [ + 281, + 71, + 331, + 85 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 140, + 96, + 471, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 96, + 471, + 339 + ], + "spans": [ + { + "bbox": [ + 140, + 96, + 471, + 339 + ], + "type": "text", + "content": "Recent advancements in reasoning with large language models (RLLMs), such as OpenAI-o1 and DeepSeek-R1, have demonstrated their impressive capabilities in complex domains like mathematics and coding. A central factor in their success lies in the application of long chain-of-thought (Long CoT) characteristics, which enhance reasoning abilities and enable the solution of intricate problems. However, despite these developments, a comprehensive survey on Long CoT is still lacking, limiting our understanding of its distinctions from traditional short chain-of-thought (Short CoT) and complicating ongoing debates on issues like \"overthinking\" and \"inference-time scaling\". This survey seeks to fill this gap by offering a unified perspective on Long CoT. Specifically, (1) We first distinguish Long CoT from Short CoT and introduce a novel taxonomy to categorize current reasoning paradigms. (2) Next, we explore the key characteristics of Long CoT: deep reasoning, extensive exploration, and feasible reflection, which enable models to handle more complex tasks and produce more efficient, coherent outcomes compared to the shallower Short CoT. (3) We then investigate key phenomena such as the emergence of Long CoT with these characteristics, including overthinking, and inference-time scaling, offering insights into how these processes manifest in practice. (4) Finally, we identify significant research gaps and highlight promising future directions, including the integration of multi-modal reasoning, efficiency improvements, and enhanced knowledge frameworks. By providing a structured overview, this survey aims to inspire future research and further the development of reasoning large language models1." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 359, + 194, + 372 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 359, + 194, + 372 + ], + "spans": [ + { + "bbox": [ + 105, + 359, + 194, + 372 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 384, + 506, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 384, + 506, + 517 + ], + "spans": [ + { + "bbox": [ + 104, + 384, + 506, + 517 + ], + "type": "text", + "content": "In recent years, as shown in Figure 1, the emergence of reasoning large language models (RLLMs) such as OpenAI o1 [307] and DeepSeek R1 [227] has sparked a growing body of research into Long Chain-of-Thought (Long CoT) reasoning, greatly improving their mathematical reasoning, programming tasks, and multidisciplinary knowledge reasoning capabilities [696, 980, 722, 79, 961, 200, 1113, 793], even passing Turing Test [334]. This shift marks a significant departure from traditional approaches to task handling in large language models (LLMs) [1147, 619, 622, 599]. Unlike the shorter chain-of-thought (Short CoT) used in traditional LLMs, Long CoT reasoning entails a more detailed, iterative process of exploration and reflection within a given problem space by inference-time scaling [419, 733, 524]. This process has led to notable advancements in mathematical and logical reasoning, as well as in exploring how supervised fine-tuning (SFT) and reinforcement learning (RL) techniques can enhance the learning and exploration of extended reasoning chains [623, 550]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 521, + 506, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 521, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 104, + 521, + 506, + 588 + ], + "type": "text", + "content": "However, there is no comprehensive survey to systematically understand the main factors and recent efforts of Long CoT for RLLMs, which hinders the development of RLLMs. As a result, there are ongoing debates about the effectiveness of simple \"inference-time scaling\" for Longer CoT [864, 486] versus the argument that \"over-thinking\" from excessively long scaling can harm LLMs and introduce unnecessary complexity [103, 142, 357]. Moreover, some researchers argue that, when solving specific problems, there is no clear relationship between length and accuracy [886]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 591, + 507, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 591, + 507, + 703 + ], + "spans": [ + { + "bbox": [ + 104, + 591, + 507, + 703 + ], + "type": "text", + "content": "To address this gap, we provide an extensive and comprehensive survey of Long CoT. Specifically, as illustrated in Figure 2, we first define and examine the distinctions between Long CoT and traditional Short CoT, focusing on the following key aspects: (1) Deep Reasoning, which requires a sufficient depth of logical processing to manage an extensive set of logical nodes; (2) Extensive Exploration, which involves generating parallel uncertain nodes and transitioning from known to unknown logic; and (3) Feasible Reflection, which involves feedback and refinement of logical connections. These characteristics enable Long CoT paradigms to integrate more intricate reasoning and accommodate a broader range of logical structures, ultimately leading to more efficient and coherent outcomes. Subsequently, we systematically explore the underlying explanations for key phenomena associated with Long CoT, such as its emergence, the overthinking phenomenon," + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 711, + 497, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 711, + 497, + 723 + ], + "spans": [ + { + "bbox": [ + 117, + 711, + 497, + 723 + ], + "type": "text", + "content": "Our logo refers to a cute cartoon image - Snake Puppy. Header Image is inspired by Yaoting et al. [959]" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 73, + 504, + 266 + ], + "blocks": [ + { + "bbox": [ + 111, + 73, + 504, + 266 + ], + "lines": [ + { + "bbox": [ + 111, + 73, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 111, + 73, + 504, + 266 + ], + "type": "image", + "image_path": "be56637e4c051b7d3d3a17014777899ed5d63d7f01c713db6f624e8d8196114d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 271, + 506, + 338 + ], + "lines": [ + { + "bbox": [ + 104, + 271, + 506, + 338 + ], + "spans": [ + { + "bbox": [ + 104, + 271, + 506, + 338 + ], + "type": "text", + "content": "Figure 1: Evolution of selected Long CoT over the past three years, where colored branches represent different characteristics: deep reasoning, feasible reflection, and extensive exploration. Each characteristic is further divided into key areas: Deep reasoning includes its format and learning methods. Feasible reflection focuses on feedback and refinement techniques during reflection process as optimization strategies. Extensive exploration addresses scaling, internal, and external exploration as key improvements to Long CoT." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 361, + 504, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 361, + 504, + 406 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 504, + 406 + ], + "type": "text", + "content": "inference-time scaling during testing, and the \"Aha Moment,\" among others. To our knowledge, This is the first comprehensive survey dedicated to these specific topics. Finally, considering the extensive body of literature, we highlight promising areas for future research and suggest valuable open-resource frameworks and datasets that can serve as a foundation for future investigations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 410, + 312, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 410, + 312, + 421 + ], + "spans": [ + { + "bbox": [ + 105, + 410, + 312, + 421 + ], + "type": "text", + "content": "The main contributions of this work are as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 427, + 504, + 543 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 107, + 427, + 504, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 427, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 107, + 427, + 504, + 460 + ], + "type": "text", + "content": "- Systematic Distinction: In this work, we first introduce the concept of Long CoT reasoning and distinguish it from the traditional Short CoT, thereby providing a clear framework for understanding both paradigms and their respective characteristics." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 463, + 504, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 463, + 504, + 496 + ], + "spans": [ + { + "bbox": [ + 107, + 463, + 504, + 496 + ], + "type": "text", + "content": "- Explanation of Hot Phenomena: We systematically investigate the notable phenomena associated with Long CoT reasoning, such as overthinking, inference-time scaling, and the \"Aha Moment\", offering valuable insights into the cognitive processes involved in complex reasoning." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 498, + 504, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 498, + 504, + 543 + ], + "spans": [ + { + "bbox": [ + 107, + 498, + 504, + 543 + ], + "type": "text", + "content": "- Emerging Challenges and Frontiers: We explore the emerging challenges within the field of Long CoT reasoning and identify key research frontiers. Given the vast body of literature, we highlight areas where further inquiry could significantly advance the development of Long CoT methodologies." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 561, + 324, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 561, + 324, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 561, + 324, + 574 + ], + "type": "text", + "content": "2 Discussion of Long CoT v.s. Short CoT" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 586, + 506, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 506, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 506, + 654 + ], + "type": "text", + "content": "This section formalizes the key differences between Long Chain-of-Thought (Long CoT) and Short Chain-of-Thought (Short CoT), emphasizing reasoning depth, revisiting connections, and logical node exploration [858]. These distinctions are clearly separate from System 1 and System 2 thinking. The comparison between Long CoT and Short CoT is framed within System 2, with Long CoT involving more thorough reasoning, reflection, and exploration, while Short CoT generally prioritizes shallow and efficient logic over exhaustive reasoning." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 668, + 230, + 679 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 668, + 230, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 230, + 679 + ], + "type": "text", + "content": "2.1 Overview of Short CoT" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 689, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 506, + 723 + ], + "type": "text", + "content": "As illustrated by Figure 2, Short CoT is typically characterized by a shallow, linear reasoning process, where conclusions are drawn sequentially, often relying on a limited number of logical nodes [551]. This reasoning is usually rapid and straightforward, with simple, surface-level transitions and minimal" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 147, + 75, + 474, + 85 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 75, + 474, + 85 + ], + "spans": [ + { + "bbox": [ + 147, + 75, + 474, + 85 + ], + "type": "text", + "content": "Proof of Number Theory Problem: For any positive integer " + }, + { + "bbox": [ + 147, + 75, + 474, + 85 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 147, + 75, + 474, + 85 + ], + "type": "text", + "content": ", there exists a positive integer " + }, + { + "bbox": [ + 147, + 75, + 474, + 85 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 147, + 75, + 474, + 85 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 147, + 75, + 474, + 85 + ], + "type": "inline_equation", + "content": "m^2 + 1" + }, + { + "bbox": [ + 147, + 75, + 474, + 85 + ], + "type": "text", + "content": " is divisible by " + }, + { + "bbox": [ + 147, + 75, + 474, + 85 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 147, + 75, + 474, + 85 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 111, + 90, + 500, + 215 + ], + "blocks": [ + { + "bbox": [ + 111, + 90, + 500, + 215 + ], + "lines": [ + { + "bbox": [ + 111, + 90, + 500, + 215 + ], + "spans": [ + { + "bbox": [ + 111, + 90, + 500, + 215 + ], + "type": "image", + "image_path": "f0eadf51633e7fb658ee5728c1487fe0163f55fb50102d3c0b34bdb3de0da945.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 220, + 506, + 254 + ], + "lines": [ + { + "bbox": [ + 104, + 220, + 506, + 254 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 506, + 254 + ], + "type": "text", + "content": "Figure 2: The differences between advanced Long CoT and traditional Short CoT are characterized by three key characteristics: deep reasoning, feasible reflection, and extensive exploration. Moreover, Long CoT integrates all these characteristics to achieve substantial logical efficacy." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 277, + 504, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 277, + 504, + 300 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 504, + 300 + ], + "type": "text", + "content": "exploration of alternative paths, which restricts its generalizability [683]. Formally, given a reasoning model " + }, + { + "bbox": [ + 104, + 277, + 504, + 300 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 104, + 277, + 504, + 300 + ], + "type": "text", + "content": ", we can define the rationale of Short CoT " + }, + { + "bbox": [ + 104, + 277, + 504, + 300 + ], + "type": "inline_equation", + "content": "(\\mathsf{C}\\circ \\mathsf{T}_S)" + }, + { + "bbox": [ + 104, + 277, + 504, + 300 + ], + "type": "text", + "content": " as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 308, + 505, + 322 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 308, + 505, + 322 + ], + "spans": [ + { + "bbox": [ + 130, + 308, + 505, + 322 + ], + "type": "interline_equation", + "content": "\\mathrm {C o T} _ {S} = \\mathcal {R} \\left(\\left\\{n _ {i} \\right\\} _ {i = 1} ^ {k} | (k \\leq \\mathcal {B} _ {s}) \\wedge (j = 1 \\Leftrightarrow \\forall i \\leq k, n _ {i} \\rightarrow n _ {i + j}) \\wedge (\\forall i \\neq j \\leq k, n _ {i} \\neq n _ {j})\\right), \\tag {1}", + "image_path": "77d6e310225283b575bdc27fdde0f240da93987fe57f7d9af2832fc35ebec190.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 329, + 506, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 329, + 506, + 374 + ], + "spans": [ + { + "bbox": [ + 104, + 329, + 506, + 374 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 329, + 506, + 374 + ], + "type": "inline_equation", + "content": "n_1" + }, + { + "bbox": [ + 104, + 329, + 506, + 374 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 329, + 506, + 374 + ], + "type": "inline_equation", + "content": "n_k" + }, + { + "bbox": [ + 104, + 329, + 506, + 374 + ], + "type": "text", + "content": " represent a sequence of logical nodes, which naturally satisfy that " + }, + { + "bbox": [ + 104, + 329, + 506, + 374 + ], + "type": "inline_equation", + "content": "\\forall i, n_i \\to n_{i+1}" + }, + { + "bbox": [ + 104, + 329, + 506, + 374 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 104, + 329, + 506, + 374 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_s" + }, + { + "bbox": [ + 104, + 329, + 506, + 374 + ], + "type": "text", + "content": " denotes the upper boundary on the number of logical nodes, as defined by Chen et al. [90]. In this paradigm, the reasoning progresses sequentially from one node to the next, with minimal revisitation of previous nodes and little exploration of alternative logical paths." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 389, + 229, + 401 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 389, + 229, + 401 + ], + "spans": [ + { + "bbox": [ + 105, + 389, + 229, + 401 + ], + "type": "text", + "content": "2.2 Overview of Long CoT" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 411, + 506, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 411, + 506, + 466 + ], + "spans": [ + { + "bbox": [ + 104, + 411, + 506, + 466 + ], + "type": "text", + "content": "In contrast, Long CoT involves deeper reasoning, reflective analysis, and a broader exploration of logical structures. It facilitates reasoning across a wider range of logical steps, addressing both known and unknown elements of a problem [194, 858]. Building on this, Long CoT expands upon the constraints presented in Equation 1 based on tree structures by incorporating three critical components: deep reasoning, exploration, and reflection." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 471, + 506, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 471, + 506, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 471, + 506, + 559 + ], + "type": "text", + "content": "These components play distinct yet complementary roles in enhancing cognitive processes. Deep reasoning ensures each logical step is executed rigorously, even within complex structures, fostering robust logic across intricate relationships. Exploration encourages the identification of new pathways, revealing potential avenues that may not be immediately obvious. Reflection enables iterative analysis and reassessment of conclusions, allowing reasoning to evolve throughout problem-solving. By distinguishing these three categories, Long CoT enhances its ability to address a broader range of problems with precision and depth. As shown in Figure 3, we will now discuss these key differences in detail." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 573, + 268, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 573, + 268, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 573, + 268, + 586 + ], + "type": "text", + "content": "2.2.1 Deep Reasoning for Long CoT" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 594, + 506, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 506, + 649 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 506, + 649 + ], + "type": "text", + "content": "As shown by Figure 2, deep reasoning refers to the capability to perform deep and thorough logical analysis across multiple interconnected logical nodes, where Short CoT generally can never achieve. This capability is essential when tackling complex problems that require a massive number of logical deductions to arrive at a valid conclusion. To better define and understand deep reasoning, we frame it as a capability that primarily relaxes the first constraint in Equation 1, as expressed by the following:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 240, + 657, + 504, + 669 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 240, + 657, + 504, + 669 + ], + "spans": [ + { + "bbox": [ + 240, + 657, + 504, + 669 + ], + "type": "interline_equation", + "content": "k \\leq \\mathcal {B} _ {s} \\mapsto k \\leq \\mathcal {B} _ {l} \\wedge \\mathcal {B} _ {s} \\ll \\mathcal {B} _ {l}, \\tag {2}", + "image_path": "9628c4a63cf86935164b10c8341472684c37377c14dfa2282ae72c4f75176336.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_l" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": " represents the upper boundary for Long CoT reasoning, which can accommodate much more intricate logical nodes compared to the smaller boundary " + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_s" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": " for Short CoT. The larger boundary " + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_l" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": " alleviates issues related to insufficient depth in reasoning, thereby reducing the risk of generating unresolved answers or hallucinated responses in short-form reasoning." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "spans": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "text", + "content": "REASONING GROUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 62, + 509, + 427 + ], + "blocks": [ + { + "bbox": [ + 106, + 62, + 509, + 427 + ], + "lines": [ + { + "bbox": [ + 106, + 62, + 509, + 427 + ], + "spans": [ + { + "bbox": [ + 106, + 62, + 509, + 427 + ], + "type": "image", + "image_path": "fa1bbeeb7a7a9e97707e957eb9cfc744f2a2eba4ab0e5a7c5f73282936c28213.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 431, + 504, + 454 + ], + "lines": [ + { + "bbox": [ + 104, + 431, + 504, + 454 + ], + "spans": [ + { + "bbox": [ + 104, + 431, + 504, + 454 + ], + "type": "text", + "content": "Figure 3: Taxonomy of Long CoT, which includes deep reasoning, feasible reflection, and extensive exploration methodologies." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 125, + 479, + 263, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 479, + 263, + 491 + ], + "spans": [ + { + "bbox": [ + 125, + 479, + 263, + 491 + ], + "type": "text", + "content": "Key Difference: Reasoning Depth" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 122, + 497, + 490, + 545 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 122, + 497, + 490, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 497, + 490, + 520 + ], + "spans": [ + { + "bbox": [ + 122, + 497, + 490, + 520 + ], + "type": "text", + "content": "- Short CoT typically addresses a limited set of logical nodes, involving shallow reasoning, and struggles with problems requiring complex or intricate logical structures." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 122, + 521, + 490, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 521, + 490, + 545 + ], + "spans": [ + { + "bbox": [ + 122, + 521, + 490, + 545 + ], + "type": "text", + "content": "- Long CoT is designed to accommodate a significantly larger set of logical nodes, allowing for deeper logic and more thorough analysis during the reasoning process." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 567, + 293, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 567, + 293, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 567, + 293, + 578 + ], + "type": "text", + "content": "2.2.2 Extensive Exploration for Long CoT" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 587, + 504, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 587, + 504, + 652 + ], + "spans": [ + { + "bbox": [ + 104, + 587, + 504, + 652 + ], + "type": "text", + "content": "As shown by Figure 2, Long CoT encourages branching out to extensively explore uncertain or unknown logical nodes, thereby expanding the potential set of reasoning paths. This exploration is particularly critical when solving problems characterized by ambiguity, incomplete information, or multiple possible solutions [43, 1016, 871]. More specifically, we describe how extensive exploration primarily addresses the relaxation of the second constraint in Equation 1, which can be formalized as follows:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 178, + 658, + 504, + 671 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 658, + 504, + 671 + ], + "spans": [ + { + "bbox": [ + 178, + 658, + 504, + 671 + ], + "type": "interline_equation", + "content": "j = 1 \\Leftrightarrow \\forall i \\leq k, n _ {i} \\rightarrow n _ {i + j} \\mapsto \\exists m, \\forall i, \\forall j \\leq m, n _ {i} \\rightarrow n _ {i + j}, \\tag {3}", + "image_path": "1c2231d3720d10adabb9b0e0eb328582cefd25a3bc7b9de3b2b5d8e38fcb3e48.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "text", + "content": "where the condition indicates that for a logical node " + }, + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "inline_equation", + "content": "n_i" + }, + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "text", + "content": ", there are " + }, + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "text", + "content": " nodes that are explored in parallel. The acceptability of parallel exploration allows for a more systematic approach, enabling the exploration of previously unconsidered logical paths. This, in turn, helps maximize the understanding of all possible solutions, ultimately leading to the correct final answer." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 52 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 52 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 52 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 52, + 187, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 52, + 187, + 55 + ], + "spans": [ + { + "bbox": [ + 141, + 52, + 187, + 55 + ], + "type": "text", + "content": "REASONING GROUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 125, + 72, + 313, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 72, + 313, + 84 + ], + "spans": [ + { + "bbox": [ + 125, + 72, + 313, + 84 + ], + "type": "text", + "content": "Key Difference: Exploration of Logical Nodes" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 122, + 91, + 488, + 139 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 122, + 91, + 488, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 91, + 488, + 114 + ], + "spans": [ + { + "bbox": [ + 122, + 91, + 488, + 114 + ], + "type": "text", + "content": "- Short CoT generally restricts exploration to a fixed set of logical nodes, often resulting in oversimplified reasoning and limited exploration." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 123, + 116, + 488, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 116, + 488, + 139 + ], + "spans": [ + { + "bbox": [ + 123, + 116, + 488, + 139 + ], + "type": "text", + "content": "- Long CoT explores more various paths, including uncertain or uncharted areas, fostering more nuanced and comprehensive problem-solving." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 157, + 279, + 170 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 157, + 279, + 170 + ], + "spans": [ + { + "bbox": [ + 105, + 157, + 279, + 170 + ], + "type": "text", + "content": "2.2.3 Feasible Reflection for Long CoT" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 177, + 504, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 177, + 504, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 177, + 504, + 232 + ], + "type": "text", + "content": "As shown by Figure 2, Long CoT involves revisiting previous logical nodes to verify their connections are valid and accurate, and then correcting them or selecting an alternative logical path. Formally, feasible reflection relaxes the third constraint in Equation 1, which originally requires acyclic reasoning such that " + }, + { + "bbox": [ + 104, + 177, + 504, + 232 + ], + "type": "inline_equation", + "content": "n_i \\neq n_j" + }, + { + "bbox": [ + 104, + 177, + 504, + 232 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 104, + 177, + 504, + 232 + ], + "type": "inline_equation", + "content": "i \\neq j \\leq k" + }, + { + "bbox": [ + 104, + 177, + 504, + 232 + ], + "type": "text", + "content": ". In contrast, feasible reflection permits the reasoning path to return to a previously visited node, captured as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 209, + 239, + 505, + 252 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 239, + 505, + 252 + ], + "spans": [ + { + "bbox": [ + 209, + 239, + 505, + 252 + ], + "type": "interline_equation", + "content": "\\forall i \\neq j \\leq k, n _ {i} \\neq n _ {j} \\mapsto \\exists i < j \\leq k, n _ {i} = n _ {j}, \\tag {4}", + "image_path": "a00f7ae26a61e2deed5c2e110f8b5f44aca2021285ec38658a2e306eecd29366.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 258, + 504, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 504, + 293 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 504, + 293 + ], + "type": "text", + "content": "where this condition indicates that, for a logical node " + }, + { + "bbox": [ + 104, + 258, + 504, + 293 + ], + "type": "inline_equation", + "content": "n_{j-1}" + }, + { + "bbox": [ + 104, + 258, + 504, + 293 + ], + "type": "text", + "content": ", the subsequent node is not limited to the original next node " + }, + { + "bbox": [ + 104, + 258, + 504, + 293 + ], + "type": "inline_equation", + "content": "\\hat{n}_j" + }, + { + "bbox": [ + 104, + 258, + 504, + 293 + ], + "type": "text", + "content": ". Instead, it may transition to " + }, + { + "bbox": [ + 104, + 258, + 504, + 293 + ], + "type": "inline_equation", + "content": "n_i" + }, + { + "bbox": [ + 104, + 258, + 504, + 293 + ], + "type": "text", + "content": " (i.e., the next logical node becomes " + }, + { + "bbox": [ + 104, + 258, + 504, + 293 + ], + "type": "inline_equation", + "content": "n_j" + }, + { + "bbox": [ + 104, + 258, + 504, + 293 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 258, + 504, + 293 + ], + "type": "inline_equation", + "content": "n_j = n_i" + }, + { + "bbox": [ + 104, + 258, + 504, + 293 + ], + "type": "text", + "content": "). Practically, reflection implementation consists of two components:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 300, + 505, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 300, + 505, + 366 + ], + "spans": [ + { + "bbox": [ + 104, + 300, + 505, + 366 + ], + "type": "text", + "content": "Feedback refers to evaluating both overall and intermediate outputs for correctness and quality, also known as critique or verification. It can be derived from external sources, validation checks, or by reflecting on prior conclusions within the reasoning process. Formally, at each step " + }, + { + "bbox": [ + 104, + 300, + 505, + 366 + ], + "type": "inline_equation", + "content": "n_i" + }, + { + "bbox": [ + 104, + 300, + 505, + 366 + ], + "type": "text", + "content": ", a verification process " + }, + { + "bbox": [ + 104, + 300, + 505, + 366 + ], + "type": "inline_equation", + "content": "\\mathcal{V}_i" + }, + { + "bbox": [ + 104, + 300, + 505, + 366 + ], + "type": "text", + "content": " ensures the correctness, feasibility, and consistency of the reasoning. If an issue is identified, the process redirects " + }, + { + "bbox": [ + 104, + 300, + 505, + 366 + ], + "type": "inline_equation", + "content": "n_i" + }, + { + "bbox": [ + 104, + 300, + 505, + 366 + ], + "type": "text", + "content": " to the nearest correct node " + }, + { + "bbox": [ + 104, + 300, + 505, + 366 + ], + "type": "inline_equation", + "content": "n_j" + }, + { + "bbox": [ + 104, + 300, + 505, + 366 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 300, + 505, + 366 + ], + "type": "inline_equation", + "content": "j < i" + }, + { + "bbox": [ + 104, + 300, + 505, + 366 + ], + "type": "text", + "content": ". This relationship is formalized as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 247, + 367, + 504, + 380 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 367, + 504, + 380 + ], + "spans": [ + { + "bbox": [ + 247, + 367, + 504, + 380 + ], + "type": "interline_equation", + "content": "\\mathcal {F} _ {i}, n _ {j} \\leftarrow \\operatorname {F e e d b a c k} \\left(\\mathrm {C o T} _ {L} ^ {i}\\right) \\tag {5}", + "image_path": "f2f163b377b83a2138812ae0feb6376a9a672ddaaf020fc013620363b5df6fb0.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 384, + 504, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 384, + 504, + 407 + ], + "spans": [ + { + "bbox": [ + 104, + 384, + 504, + 407 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 384, + 504, + 407 + ], + "type": "inline_equation", + "content": "\\mathrm{CoT}_L^i = \\{n_1,\\dots ,n_i\\}" + }, + { + "bbox": [ + 104, + 384, + 504, + 407 + ], + "type": "text", + "content": " represents the current logical path up to the " + }, + { + "bbox": [ + 104, + 384, + 504, + 407 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 384, + 504, + 407 + ], + "type": "text", + "content": " -th logical node for Long CoT." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 416, + 506, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 416, + 506, + 449 + ], + "spans": [ + { + "bbox": [ + 104, + 416, + 506, + 449 + ], + "type": "text", + "content": "Refinement involves adjusting intermediate steps or modifying the logical flow to correct inconsistencies or address gaps based on the given feedback. This process can be expressed mathematically as follows:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 228, + 449, + 504, + 463 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 449, + 504, + 463 + ], + "spans": [ + { + "bbox": [ + 228, + 449, + 504, + 463 + ], + "type": "interline_equation", + "content": "\\widetilde {n} _ {i + 1} \\leftarrow \\operatorname {R e f i n e} \\left(n _ {i + 1} \\mid \\mathrm {C o T} _ {L} ^ {i}, \\mathcal {F} _ {i}, n _ {j}\\right), \\tag {6}", + "image_path": "9c6831bab18873d643e710c5d4ac73f2ffb018fd0b5a6426ad2fad8decd5da47.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 466, + 504, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 466, + 504, + 490 + ], + "spans": [ + { + "bbox": [ + 104, + 466, + 504, + 490 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 466, + 504, + 490 + ], + "type": "inline_equation", + "content": "\\widetilde{n}_{i+1}" + }, + { + "bbox": [ + 104, + 466, + 504, + 490 + ], + "type": "text", + "content": " represents the refined version of the subsequent logical node " + }, + { + "bbox": [ + 104, + 466, + 504, + 490 + ], + "type": "inline_equation", + "content": "n_{i+1}" + }, + { + "bbox": [ + 104, + 466, + 504, + 490 + ], + "type": "text", + "content": ", according to the current logic " + }, + { + "bbox": [ + 104, + 466, + 504, + 490 + ], + "type": "inline_equation", + "content": "\\mathbb{C} \\circ \\mathbb{T}_L^i" + }, + { + "bbox": [ + 104, + 466, + 504, + 490 + ], + "type": "text", + "content": ", feedback result " + }, + { + "bbox": [ + 104, + 466, + 504, + 490 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_i" + }, + { + "bbox": [ + 104, + 466, + 504, + 490 + ], + "type": "text", + "content": ", and previous logical node " + }, + { + "bbox": [ + 104, + 466, + 504, + 490 + ], + "type": "inline_equation", + "content": "n_j" + }, + { + "bbox": [ + 104, + 466, + 504, + 490 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 494, + 505, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 494, + 505, + 528 + ], + "spans": [ + { + "bbox": [ + 104, + 494, + 505, + 528 + ], + "type": "text", + "content": "Overall, incorporating reflection ensures that errors are identified and corrected promptly. This capability enables LLMs to quickly shift to alternative reasoning paths or correct their current trajectory. By doing so, error propagation is minimized, resulting in more accurate conclusions." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 125, + 535, + 291, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 535, + 291, + 547 + ], + "spans": [ + { + "bbox": [ + 125, + 535, + 291, + 547 + ], + "type": "text", + "content": "Key Difference: Feedback & Refinement" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 123, + 553, + 489, + 601 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 123, + 553, + 488, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 553, + 488, + 577 + ], + "spans": [ + { + "bbox": [ + 123, + 553, + 488, + 577 + ], + "type": "text", + "content": "- Short CoT typically moves in a straightforward, non-repetitive manner from one node to the next, so that cannot correct their logic." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 123, + 578, + 489, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 578, + 489, + 601 + ], + "spans": [ + { + "bbox": [ + 123, + 578, + 489, + 601 + ], + "type": "text", + "content": "- Long CoT allows for revisiting and revising earlier decisions by feedback and refinement, ensuring that estimizable and prior logical conclusions during the reasoning progress." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 620, + 425, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 620, + 425, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 620, + 425, + 632 + ], + "type": "text", + "content": "2.2.4 Unified Application and Development History of Three Capabilities" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 639, + 505, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 639, + 505, + 684 + ], + "spans": [ + { + "bbox": [ + 104, + 639, + 505, + 684 + ], + "type": "text", + "content": "The Long CoT discussed here represents a unified reasoning system that seamlessly integrates and applies three key capabilities: deep reasoning, reflective mechanisms, and exploration capabilities. In contrast, during the Short CoT era, these capabilities developed independently, each evolving in isolation." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 104, + 689, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 505, + 723 + ], + "type": "text", + "content": "As shown in Figure 2, early efforts primarily focused on enhancing deep reasoning within traditional CoT paradigms. This was followed by the gradual introduction of reflective mechanisms, which were initially based on human-designed pipelines. Over time, exploration capabilities were added, and" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "spans": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "text", + "content": "REASONING GROUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "content": "these components were ultimately merged, giving rise to the modern concept of Long CoT, a unified approach to reasoning that seeks to enhance all three capabilities in harmony." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 100, + 506, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 100, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 100, + 506, + 189 + ], + "type": "text", + "content": "The progression of Long CoT is gradual, rather than a sudden emergence through isolated models like o1 [307] and R1 [227]. Instead, it develops gradually. For example, earlier systems, such as ToT [955], enhance exploration but lack reflective mechanisms, disqualifying them as Long CoT [95]. While GoT [48] incorporates self-reflection based on ToT, its original model still lacked robust deep reasoning, preventing it from qualifying as Long CoT at that time. It is also notable that modern Long CoT systems, often neglect earlier technologies. This article addresses this gap by tracing the evolution of each capability, with the final section offering a comprehensive analysis of the integrated Long CoT system." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 193, + 506, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 193, + 506, + 238 + ], + "spans": [ + { + "bbox": [ + 104, + 193, + 506, + 238 + ], + "type": "text", + "content": "In summary, Long CoT and Short CoT represent distinct paradigms. Long CoT features a deeper, broader, and more reflective reasoning process, enhancing both accuracy and coherence. Short CoT, by contrast, is better suited to simpler, well-defined problems. This distinction highlights the scalability and adaptability of Long CoT, making it particularly effective for more complex reasoning." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 125, + 244, + 362, + 257 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 244, + 362, + 257 + ], + "spans": [ + { + "bbox": [ + 125, + 244, + 362, + 257 + ], + "type": "text", + "content": "Key Difference: Unified Application of Three Capabilities" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 119, + 261, + 490, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 261, + 490, + 295 + ], + "spans": [ + { + "bbox": [ + 119, + 261, + 490, + 295 + ], + "type": "text", + "content": "It is important to highlight that Long CoT integrates these three distinct capabilities to perform complex reasoning. In contrast, traditional Short CoT optimization typically focuses on only one of these characteristics." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 315, + 296, + 329 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 315, + 296, + 329 + ], + "spans": [ + { + "bbox": [ + 105, + 315, + 296, + 329 + ], + "type": "text", + "content": "3 Long CoT Analysis & Evaluation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 339, + 293, + 352 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 339, + 293, + 352 + ], + "spans": [ + { + "bbox": [ + 105, + 339, + 293, + 352 + ], + "type": "text", + "content": "3.1 Analysis & Explanation for Long CoT" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 360, + 506, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 360, + 506, + 426 + ], + "spans": [ + { + "bbox": [ + 104, + 360, + 506, + 426 + ], + "type": "text", + "content": "Research on Long CoT has significantly enhanced RLLMs by improving reasoning accuracy, reducing errors, and supporting dynamic decision-making. However, several phenomena and their corresponding mechanisms remain inadequately summarized. This section addresses key topics, including the mechanisms of Long CoT and their underlying principles [644, 63, 545, 642]. Methodologically, two main perspectives have emerged to explain Long CoT: (1) External Behavior Analysis (§ 3.1.1) and (2) Internal Mechanism Analysis (§ 3.1.2)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 437, + 301, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 437, + 301, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 301, + 449 + ], + "type": "text", + "content": "3.1.1 Long CoT External Behavior Analysis" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 456, + 504, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 504, + 479 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 504, + 479 + ], + "type": "text", + "content": "The primary research stream focuses on explaining RLLM behaviors for Long CoT [25]. As illustrated in Figure 4, six key phenomena are identified and discussed for Long CoT in this part." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 487, + 506, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 487, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 506, + 597 + ], + "type": "text", + "content": "Long CoT Emergence Phenomenon Research shows that contextual examples improve large models' generative abilities by guiding the formation of reasoning chains [1012, 671, 417, 343, 532, 846, 1017, 1141]. Wang et al. [759] and Lippmann and Yang [461] demonstrate that these examples standardize reasoning chain generation relevant to the answers both in in-context-learning and supervised-finetuning. In an experiment by Madaan et al. [538], removing problem-specific entities from contextual examples, while retaining only the logical structure, led to similar performance as using complete examples, highlighting the logical structure imitation of Long CoT during inference. From a learning perspective, Ye et al. [963] analyzes and reveals the three-stage developmental trajectory of Long CoT: early memorization, followed by in-distribution generalization, and ultimately cross-distribution generalization, thereby enabling the model to exhibit Long CoT capabilities." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 601, + 507, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 601, + 507, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 601, + 507, + 723 + ], + "type": "text", + "content": "More recently, Stechly et al. [688] and Wang and Zhou [815] have shown that modifying the decoding process or designing specific prompts can activate the Long CoT within pre-trained models. They propose that CoT is embedded during pre-training and requires specific activation [941]. Further, Sadr et al. [642] focus the Long CoT source from the training data, and build on this with the notion of \"model attribution\", to specifically identify the training data most influential for specific outputs. Building on this, Guo et al. [227] and Xie et al. [886] investigate using rule-based reinforcement learning to directly activate Long CoT during pre-training, aiming to enhance performance [881]. Furthermore, Gandhi et al. [194] identify four key cognitive behaviors, including verification, backtracking, sub-target setting, and backward chaining, which successfully facilitate Long CoT. Qwen series models [926] inherently demonstrate these behaviors, which can be easily triggered by rule-based reinforcement. In contrast, the models of Llama series [168] lack these" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 143, + 53, + 184, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 53, + 184, + 56 + ], + "spans": [ + { + "bbox": [ + 143, + 53, + 184, + 56 + ], + "type": "text", + "content": "REASONING GROU" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 71, + 207, + 190 + ], + "blocks": [ + { + "bbox": [ + 111, + 71, + 207, + 190 + ], + "lines": [ + { + "bbox": [ + 111, + 71, + 207, + 190 + ], + "spans": [ + { + "bbox": [ + 111, + 71, + 207, + 190 + ], + "type": "image", + "image_path": "5a5b622a5ef9a492838399c45ff5d29022e17e93ea38f8784aa310a395d4009d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 198, + 506, + 265 + ], + "lines": [ + { + "bbox": [ + 104, + 198, + 506, + 265 + ], + "spans": [ + { + "bbox": [ + 104, + 198, + 506, + 265 + ], + "type": "text", + "content": "Figure 4: Analysis of the six classic phenomena of Long CoT external behavior: (a) emergence of Long CoT in current RLLMs; (b) reasoning boundaries and limitations of current Long CoT systems; (c) overthinking caused by scaling beyond RLLMs' reasoning boundaries, leading to performance decay; (d) inference-time scaling, discussing mainstream scaling methods, corresponding scaling laws and their limitations; (e) use of process reward model (PRM) or outcome reward model (ORM); (f) exploration of the \"aha\" moment and its underlying causes." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 210, + 72, + 290, + 190 + ], + "blocks": [ + { + "bbox": [ + 210, + 72, + 290, + 190 + ], + "lines": [ + { + "bbox": [ + 210, + 72, + 290, + 190 + ], + "spans": [ + { + "bbox": [ + 210, + 72, + 290, + 190 + ], + "type": "image", + "image_path": "b58ea3cfdd162d9e8dc0a98bea568dac497bec49f930188fabfb39d4a8af9188.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 294, + 72, + 402, + 190 + ], + "blocks": [ + { + "bbox": [ + 294, + 72, + 402, + 190 + ], + "lines": [ + { + "bbox": [ + 294, + 72, + 402, + 190 + ], + "spans": [ + { + "bbox": [ + 294, + 72, + 402, + 190 + ], + "type": "image", + "image_path": "7574cfd5bdc73debbbe23c4cd13dc43c38b3f705661075e1c69e68c8876576bc.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 405, + 72, + 498, + 190 + ], + "blocks": [ + { + "bbox": [ + 405, + 72, + 498, + 190 + ], + "lines": [ + { + "bbox": [ + 405, + 72, + 498, + 190 + ], + "spans": [ + { + "bbox": [ + 405, + 72, + 498, + 190 + ], + "type": "image", + "image_path": "7acf371a734b42dff8be38ed39013e080e5d0020e7a712fdcc41abb09ba80b65.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 291, + 504, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 291, + 504, + 346 + ], + "spans": [ + { + "bbox": [ + 104, + 291, + 504, + 346 + ], + "type": "text", + "content": "capabilities and thus requires example-based reinforcement learning to improve significantly [65]. Moreover, Wang et al. [812] identify a pretraining scaling law that explains how increasing calculation size in RLLMs enhances their reasoning capabilities. Wang et al. [796] further explore the scaling law of Long CoT, showing that more fine-grained Long CoT granularity leads to more efficient and effective generalization performance." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 360, + 506, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 360, + 506, + 469 + ], + "spans": [ + { + "bbox": [ + 104, + 360, + 506, + 469 + ], + "type": "text", + "content": "Reasoning Boundary Phenomenon Recent research has highlighted the upper bounds and limitations of RLLMs across various reasoning tasks [303, 283, 684, 261, 185, 252]. Specifically, Bi et al. [53] investigate these bounds in code generation, showing that RLLMs struggle with tasks that exceed certain complexity thresholds [600], especially when imitating Long CoT samples of varying complexity. In the context of upper-bound performance, Merrill and Sabharwal [548] and Li et al. [430] focus on single-step arithmetic tasks, concluding that model performance is constrained by input length. Moreover, Feng et al. [177] proposes a mathematical model indicating that fixed-size models cannot produce accurate numerical answers beyond specific limits. However, increasing the number of reasoning steps improves a model's capability requirements to solve more complex problems." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 475, + 506, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 506, + 596 + ], + "type": "text", + "content": "Inspired by these explorations, Chen et al. [90] first define the \"reasoning boundary\" phenomenon and quantify these limits, showing that surpassing an RLLM's reasoning capacity leads to performance decline [92]. Similarly, Zhou et al. [1130] introduce GSM-Infinite, linking different upper limits to accuracy levels. Chen et al. [90] also examine the interaction between these boundaries across tasks of varying complexity, providing insights into the effectiveness of Long CoT strategies [1085]. Moreover, Amiri et al. [12] propose a \"tight lower bound\" for Long CoT further guiding reasoning error reductions. Further, Baeumel et al. [28] suggest that due to its reliance on a single-digit lookahead heuristic, there are inherent boundaries in performing addition with multiple operands, which thus hinders the fundamental limitation of LLMs in scaling to more complex numerical reasoning. Liu et al. [483] further investigate the role of reinforcement learning in expanding these reasoning boundaries instead of relying solely on pretraining capabilities." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "text", + "content": "Overthinking Phenomenon Research has highlighted the overthinking phenomenon [103, 330, 574, 142, 357, 595], where performance improves with longer reasoning chains up to a threshold, after which it declines. In contrast, Xie et al. [886] and Ma et al. [534] find no significant correlation between reasoning length and accuracy. To explain this, one line of research suggests that Long CoT strategies [21, 441], like avoiding \"snowball errors\" [192]. Alternatively, Chen et al. [90], Wolf et al. [851] highlight a performance drop when the reasoning boundaries are exceeded, providing an explanation for the overthinking phenomenon. This suggests that reasoning length and logical complexity should be kept below a certain boundary [1080]. Building on this, Wu et al. [867] mathematically determine the feasible reasoning length for Long CoT. Finally, Chen et al. [93] introduces Ohm's law of Long CoT, which accurately predicts and controls performance." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 142, + 47, + 187, + 52 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 47, + 187, + 52 + ], + "spans": [ + { + "bbox": [ + 142, + 47, + 187, + 52 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 142, + 52, + 187, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 52, + 187, + 55 + ], + "spans": [ + { + "bbox": [ + 142, + 52, + 187, + 55 + ], + "type": "text", + "content": "REASONING GROUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 139 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 139 + ], + "type": "text", + "content": "Inference-Time Scaling Phenomenon Recent advances in inference-time scaling algorithms [524, 843] have garnered significant attention, particularly for their ability to extend reasoning length and improve performance [524, 455, 875]. Specifically, Brown et al. [57] identify a phenomenon called \"Large Language Monkeys\", in which a series of reasoning tasks show that with enough trials, a correct result can be achieved. Additionally, o1 [307] and R1 [227] demonstrated that directly scaling the length of model inference improves final performance." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 144, + 506, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 144, + 506, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 144, + 506, + 312 + ], + "type": "text", + "content": "To understand inference-time scaling, we will discuss these two paradigms: (1) Sequential Scaling: Sequential scaling involves increasing the reasoning path length. While this can enhance performance, studies by Jin et al. [330] show that, beyond a certain point, longer reasoning paths can degrade performance due to error accumulation. They suggest an optimal path length that depends on the model's capabilities and task complexity [15, 652, 31]. Furthermore, Chen et al. [90] and Wu et al. [867] explain that excessive exploration lengths beyond the RLLM's inherent reasoning boundary lead to performance decay, which guides RLLMs for deeper reasoning capabilities [32]. (2) Parallel Scaling: Parallel scaling involves performing multiple reasoning steps and verifying the results. While it shows promise, Parashar et al. [583] and Wang et al. [820] argue that simply increasing inference time does not guarantee improved performance. Wu et al. [864] show that the computational FLOPs " + }, + { + "bbox": [ + 104, + 144, + 506, + 312 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 144, + 506, + 312 + ], + "type": "text", + "content": " of inference are correlated with the lower bound of performance error, which scales with " + }, + { + "bbox": [ + 104, + 144, + 506, + 312 + ], + "type": "inline_equation", + "content": "\\log N" + }, + { + "bbox": [ + 104, + 144, + 506, + 312 + ], + "type": "text", + "content": ". Additionally, Chen et al. [93] establish an upper bound for parallel scaling, showing that RLLMs cannot exceed Pass@k verification through various verifiers. They further argue that sampling optimization cannot exceed the model's internal reasoning limitations, demonstrating that for " + }, + { + "bbox": [ + 104, + 144, + 506, + 312 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 144, + 506, + 312 + ], + "type": "text", + "content": " samples, accuracy is proportional to " + }, + { + "bbox": [ + 104, + 144, + 506, + 312 + ], + "type": "inline_equation", + "content": "\\frac{m}{(k / \\log N + b)^2}" + }, + { + "bbox": [ + 104, + 144, + 506, + 312 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 144, + 506, + 312 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 144, + 506, + 312 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 144, + 506, + 312 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 144, + 506, + 312 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 144, + 506, + 312 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 104, + 144, + 506, + 312 + ], + "type": "text", + "content": " are model-dependent constants." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 319, + 506, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 319, + 506, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 319, + 506, + 506 + ], + "type": "text", + "content": "PRM & ORM Selection Phenomenon As RLLMs evolve, it is crucial to navigate the debate around the selection between process supervision and outcome supervision, two key reinforcement learning paradigms for complex reasoning tasks. The phenomenon of choosing between these two approaches has become a pivotal issue, as it is essential to differentiate and decide which supervision strategy is more suitable for specific tasks [899, 187, 1059]. While process supervision is intuitively advantageous for long-term reward assignments, the exact relationship between the two approaches remains unclear. It is commonly believed that process supervision is more challenging due to the trajectory-level coverage problem, which demands significant effort to collect fine-grained supervision data [1102, 679]. Additionally, PRM faces the issue of reward hacking [13, 152, 573, 30, 399], where agents exploit flaws in the reward function to produce unintended behaviors [227]. Addressing this to surpass rule-based reward systems has become an important research area [227, 886, 594]. Furthermore, Lampinen et al. [368] and Tan [708] establish a causal link between intermediate steps and final answers in qualitative experiments. Building on this, Jia et al. [317] demonstrate that, under the standard data coverage assumption, reinforcement learning with outcome supervision is not statistically more challenging than process supervision, aside from polynomial factors. More strictly, He et al. [247] mathematically demonstrate that outcome-level rewards suffice for online reinforcement learning in RLLMs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 513, + 506, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 513, + 506, + 679 + ], + "spans": [ + { + "bbox": [ + 104, + 513, + 506, + 679 + ], + "type": "text", + "content": "Aha Moment Phenomenon Earlier, Guo et al. [227] demonstrated that direct RL using rule-based rewards can trigger the aha moment, fostering natural self-reflection without supervision [172]. Following this, Team [721], Xie et al. [886] replicate this phenomenon. Further, Zhou et al. [1119] and Meng et al. [547] further extend this phenomenon to multimodal scenarios. However, Liu et al. [498] argue that the aha moment may not emerge in R1-Zero-like training. Instead, they observe that self-reflection patterns, such as superficial self-reflection (SSR), appear at epoch 0, the stage of base models. In this case, self-reflections do not necessarily lead to correct answers. Upon closer examination of R1-Zero training via RL, they find that the increasing response length results not from self-reflection, but from RL optimizing well-designed rule-based rewards. Moreover, Yang et al. [939] demonstrate that the \"aha moment\" is externally marked by increased use of anthropomorphic language during self-reflection and a dynamic adjustment of uncertainty in response to problem difficulty. This process enables the model to maintain reasoning without succumbing to \"Reasoning Collapse.\" Internally, it is characterized by a clear distinction between anthropomorphic traits and logical reasoning, with anthropomorphic language intensifying as the problem becomes more complex." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 689, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 505, + 723 + ], + "type": "text", + "content": "Reinforcement Learning Entropy Phenomenon In reinforcement learning for Long CoT, the entropy mechanism is a crucial factor influencing the performance of RLLMs. Policy entropy measures the diversity and exploratory strength of a model's outputs. By managing this entropy" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 172 + ], + "type": "text", + "content": "effectively, a model preserves exploration and thus excels on complex reasoning tasks. Earlier, Jang and Kim [310] investigate how initial entropy affects exploration in deep RL and proposed an entropy-aware initialization to encourage effective exploration. Building on this, Zhang et al. [1036] developed an Entropy-Regularized PRM that balances policy updates against large deviations from the starting distribution, thereby improving reasoning. Cheng et al. [116] found that high-entropy regions correlate positively with three exploratory reasoning behaviors: (1) key tokens linking logical steps, (2) self-verification and correction, and (3) rare behaviors underrepresented in the base model. Most recently, Agarwal et al. [5] introduced an Entropy Minimization method and demonstrated its strong impact on LLM performance in mathematical, physical, and coding tasks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 176, + 506, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 176, + 506, + 276 + ], + "spans": [ + { + "bbox": [ + 104, + 176, + 506, + 276 + ], + "type": "text", + "content": "However, recent research indicates that, during early training, policy entropy declines sharply, causing the model to converge prematurely on specific output patterns and limiting further reasoning improvement [144]. In reinforcement learning, policy entropy " + }, + { + "bbox": [ + 104, + 176, + 506, + 276 + ], + "type": "inline_equation", + "content": "(H)" + }, + { + "bbox": [ + 104, + 176, + 506, + 276 + ], + "type": "text", + "content": " and downstream task performance " + }, + { + "bbox": [ + 104, + 176, + 506, + 276 + ], + "type": "inline_equation", + "content": "(R)" + }, + { + "bbox": [ + 104, + 176, + 506, + 276 + ], + "type": "text", + "content": " follow an exponential relation: " + }, + { + "bbox": [ + 104, + 176, + 506, + 276 + ], + "type": "inline_equation", + "content": "R = -a\\cdot e^{H} + b" + }, + { + "bbox": [ + 104, + 176, + 506, + 276 + ], + "type": "text", + "content": ", so a drop in entropy produces a rapid performance decline until saturation. This \"policy entropy collapse\" is common without entropy control, as reduced entropy constrains exploration and stalls reasoning gains [144]. To counter this collapse, two methods, Clip-Cov and KL-Cov, regulate entropy by constraining updates on high-covariance tokens. Clip-Cov clips their update magnitudes, whereas KL-Cov imposes a Kullback-Leibler penalty. Empirical results show both techniques prevent collapse and enhance reasoning performance [144]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 288, + 309, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 288, + 309, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 288, + 309, + 300 + ], + "type": "text", + "content": "3.1.2 Long CoT Internal Mechanism Analysis" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 307, + 499, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 307, + 499, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 307, + 499, + 319 + ], + "type": "text", + "content": "The second stream of research investigates the internal mechanisms of Long CoT-related RLLMs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 329, + 506, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 329, + 506, + 461 + ], + "spans": [ + { + "bbox": [ + 104, + 329, + 506, + 461 + ], + "type": "text", + "content": "Reasoning Internal Mechanism Recent studies have explored the internal mechanisms underlying the coherent rationale outputs of Long CoT, with particular emphasis on attention mechanisms [675, 632]. These studies primarily examine neural substructures in RLLMs, framing CoT reasoning from a white-box perspective [819, 992, 233, 169]. Weston and Sukhbaatar [849] introduces the concept of System 2 Attention (S2A), which demonstrates Long CoT generation by selectively focusing attention on relevant information. Additionally, Li et al. [407] explore gradient distributions between direct output and Long CoT layers, revealing that Long CoT layers help maintain stability by distinguishing relevant from irrelevant reasoning [840]. Finally, Zhang et al. [1068] conceptualize RLLMs as finite state automata, offering further insight into how internal dynamics influence external behavior. Despite Short CoT's struggles with self-correction, Bertolazzi et al. [47] show that these models rely on consistency heads (attention heads) to assess the alignment of numerical values in arithmetic solutions through internal shortcuts." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 470, + 506, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 470, + 506, + 615 + ], + "spans": [ + { + "bbox": [ + 104, + 470, + 506, + 615 + ], + "type": "text", + "content": "Knowledge Incorporating Mechanism Current RLLMs primarily focus on mathematics and coding but have shown potential for generalization to other knowledge-rich domains, sparking growing interest in the mechanism for integrating domain-specific knowledge into Long CoT [860, 886, 1105]. Prystawski et al. [609] suggest that generative models store entity knowledge learned during pre-training independently, with the reasoning process in Long CoT linking this knowledge across entities. Radha and Goktas [630] recently introduced the Probabilistic Mixture Model (PMM), which categorizes model outputs into reasoning, memorization, and guessing. They also propose an Information-Theoretic Consistency (ITC) analysis to quantify the relationship between model confidence and strategy selection. Additionally, Jin et al. [331] define \"Concept Depth\" as the lowest layers at which complex concepts are understood, demonstrating varying levels of knowledge integration in RLLMs. Ou et al. [572] examine RLLM knowledge internalization through knowledge loop evolution, arguing that new knowledge acquisition is shaped by its connection to existing knowledge, with the loop evolving from formation to optimization and from shallow to deep." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 627, + 228, + 639 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 627, + 228, + 639 + ], + "spans": [ + { + "bbox": [ + 105, + 627, + 228, + 639 + ], + "type": "text", + "content": "3.2 Long CoT Evaluations" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 647, + 171, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 647, + 171, + 658 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 171, + 658 + ], + "type": "text", + "content": "3.2.1 Metrics" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 667, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 505, + 723 + ], + "type": "text", + "content": "In benchmarking, various metrics assess model performance across reasoning tasks, each focusing on different aspects of reasoning ability. These metrics evaluate both RLLMs' effectiveness in achieving desired outcomes and their learning efficiency. As a result, metrics for RLLMs have gained increasing attention in recent research. For mathematical or code-related tasks, three key metrics are commonly used: Accuracy, Pass@k, and Cons@k based on regex extraction:" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 73, + 504, + 121 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 107, + 73, + 340, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 73, + 340, + 84 + ], + "spans": [ + { + "bbox": [ + 107, + 73, + 340, + 84 + ], + "type": "text", + "content": "Accuracy measures the proportion of correct outputs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 86, + 493, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 86, + 493, + 97 + ], + "spans": [ + { + "bbox": [ + 107, + 86, + 493, + 97 + ], + "type": "text", + "content": "- Pass@k evaluates the likelihood of generating at least one correct solution within " + }, + { + "bbox": [ + 107, + 86, + 493, + 97 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 107, + 86, + 493, + 97 + ], + "type": "text", + "content": " attempts." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 99, + 504, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 99, + 504, + 121 + ], + "spans": [ + { + "bbox": [ + 107, + 99, + 504, + 121 + ], + "type": "text", + "content": "- Cons@k assesses consistency by determining the model's ability to consistently produce correct or logically coherent solutions across multiple attempts." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "spans": [ + { + "bbox": [ + 104, + 125, + 505, + 159 + ], + "type": "text", + "content": "In scientific or commonsense question-answering tasks, evaluation often uses Exact Match (EM) and Accuracy based on regex extraction, where EM determines whether the model's output exactly matches the expected solution." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 163, + 479, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 479, + 175 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 479, + 175 + ], + "type": "text", + "content": "For feedback techniques like ORM or PRM, Rank and Best-of-N metrics are often used:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 180, + 505, + 226 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 106, + 180, + 504, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 180, + 504, + 201 + ], + "spans": [ + { + "bbox": [ + 106, + 180, + 504, + 201 + ], + "type": "text", + "content": "- Rank measures whether the reward model correctly prioritizes the best reasoning processes from the top " + }, + { + "bbox": [ + 106, + 180, + 504, + 201 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 106, + 180, + 504, + 201 + ], + "type": "text", + "content": " candidates." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 204, + 505, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 204, + 505, + 226 + ], + "spans": [ + { + "bbox": [ + 106, + 204, + 505, + 226 + ], + "type": "text", + "content": "- Best-of-N selects the highest-scoring solution from " + }, + { + "bbox": [ + 106, + 204, + 505, + 226 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 106, + 204, + 505, + 226 + ], + "type": "text", + "content": " generated reasoning trajectories, indirectly measuring the reward model's effectiveness based on final outcomes." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 239, + 223, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 239, + 223, + 251 + ], + "spans": [ + { + "bbox": [ + 105, + 239, + 223, + 251 + ], + "type": "text", + "content": "3.2.2 Decoding Strategies" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 258, + 504, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 504, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 504, + 315 + ], + "type": "text", + "content": "Decoding strategies are essential for controlling the inference process. Common approaches include Greedy Decoding, Beam Search, and Major@k. Both Greedy Decoding and Beam Search limit the sampling range to reduce randomness, guiding the model toward more consistent outputs. In contrast, Major@k identifies the most reliable solution by selecting the one with the highest consistency from a set of " + }, + { + "bbox": [ + 104, + 258, + 504, + 315 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 258, + 504, + 315 + ], + "type": "text", + "content": " candidate solutions." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 327, + 192, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 327, + 192, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 192, + 338 + ], + "type": "text", + "content": "3.2.3 Benchmarks" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 346, + 504, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 346, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 346, + 504, + 392 + ], + "type": "text", + "content": "In the realm of Benchmarks, the focus lies on assessing the reasoning capabilities of RLLMs across diverse domains. There are two primary categories: (1) Outcome Benchmarks, which focus on the holistic view of Long CoT reasoning, and (2) Process Benchmarks, which concentrate on the local view of the Long CoT process or individual capabilities." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 401, + 504, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 401, + 504, + 424 + ], + "spans": [ + { + "bbox": [ + 104, + 401, + 504, + 424 + ], + "type": "text", + "content": "Outcome Benchmarks In the realm of Outcome Benchmarks, the first focus lies on evaluating the logical reasoning capabilities:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 430, + 506, + 655 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 107, + 430, + 506, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 430, + 506, + 517 + ], + "spans": [ + { + "bbox": [ + 107, + 430, + 506, + 517 + ], + "type": "text", + "content": "- Complex Mathematics: A central focus in complex mathematics is evaluating benchmarks like GSM8K [141] and MATH [253], which assess basic mathematical problem-solving abilities [1125, 1112]. Recent additions, such as AIME 2024 [8], AIME 2025 [571], MATH-500 [449], AMC 2023 [9], USAMO [598], OlympiadBench [239], and OlympiadArena [298], expand the evaluation of LLM performance in mathematics. Moreover, Putnam-AXIOM [224] and FrontierMath [210] introduce more complex problems that challenge future reasoning systems. Additionally, ThinkBench [291] and MATH-Perturb [288] focus on robust evaluation for Long CoT [38, 987]." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 520, + 506, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 520, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 107, + 520, + 506, + 586 + ], + "type": "text", + "content": "- Complex Coding: Complex coding benchmarks are also vital, with competitions like Codeforces, SWEbench [327], CodeContests [427], and LiveCodeBench [309] evaluating LLM coding and problem-solving skills. Notable additions such as MHPP [148], ProBench [934], HumanEval Pro, MBPP Pro [993], and EquiBench [833] enhance the scope and complexity of coding challenges. Moreover, some studies have explored applying these benchmarks in real-world code development scenarios for automatic code generation and evaluation [243, 744]." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 589, + 506, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 589, + 506, + 655 + ], + "spans": [ + { + "bbox": [ + 107, + 589, + 506, + 655 + ], + "type": "text", + "content": "- Commonsense Puzzle: Commonsense puzzle benchmarks, including LiveBench [850], BIG-Bench Hard [705] and ZebraLogic [450], assess models' ability to reason about commonsense situations. The ARC [131] and DRE-Bench [947] is often viewed as a challenging commonsense-based AGI test. JustLogic [87] further contributes to the evaluation of deductive reasoning and commonsense problem-solving. Moreover, Li et al. [382] introduce QuestBench, a benchmark designed to evaluate the ability of RLLMs to generate insightful and meaningful questions." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 660, + 504, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 660, + 504, + 684 + ], + "spans": [ + { + "bbox": [ + 104, + 660, + 504, + 684 + ], + "type": "text", + "content": "The second focus area concerns Knowledge Benchmarks, essential for evaluating a model's capability in complex reasoning across various tasks for out of distribution evaluation [776]:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 689, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 689, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 689, + 506, + 723 + ], + "type": "text", + "content": "- Scientific Reasoning: Scientific Reasoning benchmarks, such as GPQA Diamond [637], MMLU-Pro [821], and SuperGPQA [165], assess multi-domain reasoning in fields like chemistry, biology, and physics [157]. These benchmarks test models' ability to not only accumulate knowledge" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 114, + 72, + 504, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 72, + 504, + 115 + ], + "spans": [ + { + "bbox": [ + 114, + 72, + 504, + 115 + ], + "type": "text", + "content": "but also integrate it for problem-solving. Humanity's Last Exam (HLE) [602] further challenges models by requiring deep interdisciplinary reasoning across scientific disciplines. Further, Chung et al. [140] propose TPBench to evaluate the effectiveness of RLLMs in solving theoretical physics problems." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 118, + 506, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 118, + 506, + 207 + ], + "spans": [ + { + "bbox": [ + 107, + 118, + 506, + 207 + ], + "type": "text", + "content": "- Medical Reasoning: In the realm of Medical Reasoning, the need for complex, domain-specific, and accurate reasoning is paramount [1094, 1024, 905, 589]. Benchmarks, such as MedQA [328], JAMA Clinical Challenge [76], LLMEval-Med [1049] and Medbullets [76], simulate diagnostic and treatment decision-making processes, reflecting real-world medical practice. These benchmarks evaluate a model's handling of medical knowledge and reasoning, from diagnosis to treatment planning. Additionally, MedXpertQA [1150] introduces a comprehensive evaluation framework combining text and multimodal data, specifically assessing AI's reasoning capabilities in healthcare." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 220, + 224, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 220, + 224, + 232 + ], + "spans": [ + { + "bbox": [ + 105, + 220, + 224, + 232 + ], + "type": "text", + "content": "3.2.4 Process Evaluations" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 240, + 506, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 240, + 506, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 240, + 506, + 319 + ], + "type": "text", + "content": "Deep Reasoning Benchmarks Recent progress in RLLMs underscores the need for specialized benchmarks to evaluate their deep reasoning abilities in Long CoT [375, 1133]. Notably, Lin et al. [450] introduces ZebraLogic, a framework for assessing logical reasoning, especially in complex non-monotonic scenarios. Similarly, BigGSM [90] and GSM-Ranges [670] focus on perturbing numerical values to test logical and arithmetic reasoning in edge cases beyond the models' training distribution. ROSCOE [212], ReCEval [606], DiVeRSe [425], HLV [71], and CoT-Kinetics [51] are designed to assess each step in the deep reasoning process during Long CoT tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 328, + 506, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 328, + 506, + 397 + ], + "spans": [ + { + "bbox": [ + 104, + 328, + 506, + 397 + ], + "type": "text", + "content": "Exploration Benchmarks Several studies assess RLLMs' exploration capabilities in Long CoT tasks. Specifically, Sys2Bench [583] evaluates the exploration and scaling abilities of RLLMs, emphasizing generalization across diverse tasks. BanditBench [566] extends this by testing model performance in interactive environments, offering insights into practical applications. Additionally, Heyman and Zylberberg [254] introduce a graph coloring problem to assess reasoning and spatial exploration in complex problem-solving scenarios." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 406, + 507, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 406, + 507, + 561 + ], + "spans": [ + { + "bbox": [ + 104, + 406, + 507, + 561 + ], + "type": "text", + "content": "Reflection Benchmarks Reflection benchmarks measure RLLMs' ability to identify, reflect upon, and correct errors in Long CoT reasoning. These benchmarks fall into two categories: feedback and refinement. (1) Feedback Benchmark: These benchmarks assess the ability of LLMs to detect errors and respond to feedback for improvement. For example, Lambert et al. [367] introduces RewardBench to evaluate RLLMs' reward capabilities. This framework is extended by Multimodal RewardBench[960], and CodeCriticBench [1025] to include multimodal and code contexts, respectively. Benchmarks such as ProcessBench [1102], PRMBench [679], MR-Ben [1021], and DeltaBench [250] focus on error detection and correction across various tasks at the step level. Additionally, RealL Mistake [337] and JudgeBench [709] address more real-world error evaluation. (2) Refinement Benchmark: These benchmarks focus on error correction in complex tasks. CriticBench [456] assesses critique-correction capabilities, while MLDebugging [287], and ErrorRadar [922] specializes in coding or multimodal reasoning error detection and refinement. FinerReason [72] introduces a commonsense puzzle for broader feedback and refinement evaluations. Medec [1] adapts error correction to healthcare, addressing medical issues." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 571, + 230, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 571, + 230, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 230, + 582 + ], + "type": "text", + "content": "3.2.5 Advanced Evaluation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 590, + 507, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 507, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 507, + 723 + ], + "type": "text", + "content": "Agentic & Embodied Reasoning Agentic and Embodied reasoning requires models to demonstrate an understanding of real-world interactions, tool use, and adaptive reasoning in response to change. To assess real-world understanding, Wang et al. [798] introduce a benchmark that evaluates agents' ability to reason about physical concepts. Zhang et al. [1064] extend this by assessing agents' interactions with real-world physics. Additionally, realistic tasks often demand complex planning and tool usage, necessitating benchmarks to evaluate agent reasoning. These benchmarks assess agents' abilities to navigate and complete tasks in digital environments. Building on this, Huang et al. [283] propose a framework for evaluating decision-making in multi-agent, competitive settings. Nath et al. [562] introduce ToolComp, a benchmark designed to evaluate multi-step tool-use reasoning. To analyze adaptive reasoning in the face of real-world change, OSWorld [887], CogAgent [260], Mobile-Agent-E [828], WebShop [954], WebArena [1126], WGSR-Bench [972], and WebGames [735] assess AI systems across domains such as operating systems, mobile GUIs, browser tasks, and interactive" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 97 + ], + "type": "text", + "content": "entertainment [1106, 780, 512, 552]. Hu et al. [272] present Text2World, which evaluates agents' ability to generate interactive environments from text to test agent adaptability [995]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 107, + 506, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 107, + 506, + 142 + ], + "spans": [ + { + "bbox": [ + 104, + 107, + 506, + 142 + ], + "type": "text", + "content": "Multimodal Reasoning Multimodal reasoning refers to a system's ability to integrate and reason across diverse input types, including text, images [316]. This capability is crucial for solving complex problems that require information from diverse formats." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 147, + 506, + 407 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 107, + 147, + 506, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 147, + 506, + 203 + ], + "spans": [ + { + "bbox": [ + 107, + 147, + 506, + 203 + ], + "type": "text", + "content": "- Complex Mathematics: Mathematical reasoning often integrates both textual and visual components, such as equations, graphs, or diagrams [921]. Specifically, challenges like MathVista [508], MathVision [782], MathVerse [1054], M3CoT-Math [91], CMMaTH [433], EnigmaEval [763], CoMT-Geometry [125], and PGPS9K [1050] aim to advance multimodal reasoning in mathematics, improving the evaluation of multimodal Long CoT logic." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 204, + 506, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 204, + 506, + 260 + ], + "spans": [ + { + "bbox": [ + 107, + 204, + 506, + 260 + ], + "type": "text", + "content": "- Complex Code: The second area of focus involves code-related reasoning, where systems interpret textual descriptions and code snippets. Benchmarks like HumanEval-V [1035], Code-Vision [767], Plot2Code [852], and ChartMimic [931] evaluate systems' capabilities to generate or interpret code from natural language and multimodal inputs for assessing systems that integrate natural language processing with programming tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 262, + 506, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 262, + 506, + 317 + ], + "spans": [ + { + "bbox": [ + 107, + 262, + 506, + 317 + ], + "type": "text", + "content": "- **Complex Science:** This area involves integrating scientific texts with related diagrams or experimental data. Benchmarks like ScienceQA [507], M3CoT-Science [91], BMMR [874], and ScienceBoard [698] evaluate how well models combine science information with Long CoT reasoning across various scientific domains [966]. Further, Guo et al. [229] propose MolPuzzle for the evaluation of molecular structure elucidation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 319, + 506, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 319, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 107, + 319, + 506, + 407 + ], + "type": "text", + "content": "- Commonsense Puzzle: This area focuses on commonsense reasoning, where systems combine reasoning cues and images to make deeper conclusions. Chen et al. [91] introduce M3CoT-Commensense, which incorporates commonsense Long CoT reasoning for complex multimodal interactions. Further, PuzzleVQA [128], MMReason [953] and LEGO-Puzzles [711] focus more on abstract and spatial puzzle reasoning, respectively. Additionally, Wang et al. [760] propose two benchmarks: Clue-Visual Question Answering (CVQA), which tests visual comprehension through three task types, and Clue of Password-Visual Question Answering (CPVQA), which features two task types focusing on the interpretation and application of visual data." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 420, + 507, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 420, + 507, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 420, + 507, + 510 + ], + "type": "text", + "content": "AI for Research Recent advancements in AI have significantly advanced scientific research [94, 1124, 817, 215], with platforms like SciWorld [798] improving the research process. Simultaneously, Pricope [608] and Chan et al. [67] introduce a machine-learning platform to evaluate the potential of RLLMs in automating experiments. Several studies also examine RLLMs' ability to generate innovative research ideas. For instance, Si et al. [672] conduct evaluations with over 100 NLP researchers to assess RLLMs' creativity, revealing notable limitations [404, 856, 726]. Additionally, Li et al. [434] introduce SolutionBench, a benchmark for assessing systems' ability to generate feasible solutions for complex engineering problems." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 525, + 282, + 539 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 525, + 282, + 539 + ], + "spans": [ + { + "bbox": [ + 105, + 525, + 282, + 539 + ], + "type": "text", + "content": "4 Deep Reasoning for Long CoT" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 552, + 506, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 506, + 640 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 506, + 640 + ], + "type": "text", + "content": "Deep reasoning capabilities primarily require profound depth and comprehensiveness in cognitive and reasoning processes. In the absence of such capabilities, RLLMs suffer significant performance declines [758, 823]. Current methods for enhancing deep reasoning can be categorized into two main approaches: (1) Deep Reasoning Format (" + }, + { + "bbox": [ + 104, + 552, + 506, + 640 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 104, + 552, + 506, + 640 + ], + "type": "text", + "content": " 4.1), which involves utilizing various reasoning execution formats to maximize the reasoning step length " + }, + { + "bbox": [ + 104, + 552, + 506, + 640 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 552, + 506, + 640 + ], + "type": "text", + "content": " within reasoning boundary " + }, + { + "bbox": [ + 104, + 552, + 506, + 640 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_l" + }, + { + "bbox": [ + 104, + 552, + 506, + 640 + ], + "type": "text", + "content": " in Equation (2), by selecting the most suitable reasoning format; and (2) Deep Reasoning Learning (" + }, + { + "bbox": [ + 104, + 552, + 506, + 640 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 104, + 552, + 506, + 640 + ], + "type": "text", + "content": " 4.2), which focuses on improving the model's internal capabilities to enhance its deep reasoning abilities, thereby extending the reasoning boundary " + }, + { + "bbox": [ + 104, + 552, + 506, + 640 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_l" + }, + { + "bbox": [ + 104, + 552, + 506, + 640 + ], + "type": "text", + "content": " in Equation (2) intrinsically." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 656, + 234, + 668 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 656, + 234, + 668 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 234, + 668 + ], + "type": "text", + "content": "4.1 Deep Reasoning Format" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": "As illustrated in Figure 5, deep reasoning formats can be categorized into three main types: natural language (" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": " 4.1.1), structured language (" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": " 4.1.2), and latent-space reasoning (" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": " 4.1.3), the latter of which is further subdivided into token-, vector-, and manager-driven latent reasoning. The reasoning performance across these formats is presented in Table 1." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 114, + 73, + 243, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 73, + 243, + 83 + ], + "spans": [ + { + "bbox": [ + 114, + 73, + 243, + 83 + ], + "type": "text", + "content": "(a) Natural Language Deep Reasoning" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 120, + 88, + 295, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 88, + 295, + 113 + ], + "spans": [ + { + "bbox": [ + 120, + 88, + 295, + 113 + ], + "type": "text", + "content": "To predict the output of the given input for Conway's Game of Life, we need to apply the rules of the game to each cell on the board. The rules are as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 114, + 295, + 153 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 121, + 114, + 295, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 114, + 295, + 129 + ], + "spans": [ + { + "bbox": [ + 121, + 114, + 295, + 129 + ], + "type": "text", + "content": "1. Any live cell with fewer than two live neighbors dies (underpopulation)..." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 121, + 130, + 190, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 130, + 190, + 137 + ], + "spans": [ + { + "bbox": [ + 121, + 130, + 190, + 137 + ], + "type": "text", + "content": "Given Input Board: ..." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 121, + 138, + 201, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 138, + 201, + 145 + ], + "spans": [ + { + "bbox": [ + 121, + 138, + 201, + 145 + ], + "type": "inline_equation", + "content": "\\spadesuit" + }, + { + "bbox": [ + 121, + 138, + 201, + 145 + ], + "type": "text", + "content": " Step-by-Step Analysis: ..." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 121, + 145, + 268, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 145, + 268, + 153 + ], + "spans": [ + { + "bbox": [ + 121, + 145, + 268, + 153 + ], + "type": "inline_equation", + "content": "\\spadesuit" + }, + { + "bbox": [ + 121, + 145, + 268, + 153 + ], + "type": "text", + "content": " Final Output: After applying the rules to each cell..." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 270, + 130, + 299, + 162 + ], + "blocks": [ + { + "bbox": [ + 270, + 130, + 299, + 162 + ], + "lines": [ + { + "bbox": [ + 270, + 130, + 299, + 162 + ], + "spans": [ + { + "bbox": [ + 270, + 130, + 299, + 162 + ], + "type": "image", + "image_path": "ff164d8152d0e42a100061acaca7da8e5deb82846df7779c8c7d61fa44616288.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 310, + 73, + 452, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 73, + 452, + 84 + ], + "spans": [ + { + "bbox": [ + 310, + 73, + 452, + 84 + ], + "type": "text", + "content": "(b) Structured Language Deep Reasoning" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 313, + 89, + 329, + 104 + ], + "blocks": [ + { + "bbox": [ + 313, + 89, + 329, + 104 + ], + "lines": [ + { + "bbox": [ + 313, + 89, + 329, + 104 + ], + "spans": [ + { + "bbox": [ + 313, + 89, + 329, + 104 + ], + "type": "image", + "image_path": "5627b8fe0330637f914d05c2ea3b75f4df43c678ec6ae3f0e9b7da8f94f4f43f.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 330, + 88, + 424, + 103 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 88, + 424, + 103 + ], + "spans": [ + { + "bbox": [ + 330, + 88, + 424, + 103 + ], + "type": "text", + "content": "import necessary packages from collections import Cou" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 104, + 410, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 104, + 410, + 111 + ], + "spans": [ + { + "bbox": [ + 313, + 104, + 410, + 111 + ], + "type": "text", + "content": "import necessary packages" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 112, + 422, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 112, + 422, + 118 + ], + "spans": [ + { + "bbox": [ + 313, + 112, + 422, + 118 + ], + "type": "text", + "content": "from collections import Counter" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 118, + 481, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 118, + 481, + 126 + ], + "spans": [ + { + "bbox": [ + 313, + 118, + 481, + 126 + ], + "type": "text", + "content": "all class and function definitions in the code" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 126, + 358, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 126, + 358, + 133 + ], + "spans": [ + { + "bbox": [ + 313, + 126, + 358, + 133 + ], + "type": "text", + "content": "file, if any" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 133, + 395, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 133, + 395, + 140 + ], + "spans": [ + { + "bbox": [ + 313, + 133, + 395, + 140 + ], + "type": "text", + "content": "class Solution(object):" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 326, + 140, + 449, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 140, + 449, + 153 + ], + "spans": [ + { + "bbox": [ + 326, + 140, + 449, + 153 + ], + "type": "text", + "content": "defgameOfLifeInfinite(self, live): ctr = Counter((I, J) for i, j i" + } + ] + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 450, + 133, + 494, + 162 + ], + "blocks": [ + { + "bbox": [ + 450, + 133, + 494, + 162 + ], + "lines": [ + { + "bbox": [ + 450, + 133, + 494, + 162 + ], + "spans": [ + { + "bbox": [ + 450, + 133, + 494, + 162 + ], + "type": "image", + "image_path": "998c396b19ab3cb07b28b1eb72b14b5078de0675ec62ef33294a59363f34d6e2.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 116, + 167, + 232, + 178 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 167, + 232, + 178 + ], + "spans": [ + { + "bbox": [ + 116, + 167, + 232, + 178 + ], + "type": "text", + "content": "(c) Latent Space Deep Reasoning" + } + ] + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 117, + 178, + 140, + 237 + ], + "blocks": [ + { + "bbox": [ + 117, + 178, + 140, + 237 + ], + "lines": [ + { + "bbox": [ + 117, + 178, + 140, + 237 + ], + "spans": [ + { + "bbox": [ + 117, + 178, + 140, + 237 + ], + "type": "image", + "image_path": "823ca26e30f5429ff0ae86df5e048ed2430dd9bdc62c2d874445fe64c1774d87.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 252, + 506, + 285 + ], + "lines": [ + { + "bbox": [ + 104, + 252, + 506, + 285 + ], + "spans": [ + { + "bbox": [ + 104, + 252, + 506, + 285 + ], + "type": "text", + "content": "Figure 5: Three main categories of deep reasoning formats: natural language, structured language, and latent-space reasoning (subdivided into token-, vector-, and manager-driven latent reasoning), with examples drawn from Li et al. [401]." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 140, + 178, + 259, + 243 + ], + "blocks": [ + { + "bbox": [ + 140, + 178, + 259, + 243 + ], + "lines": [ + { + "bbox": [ + 140, + 178, + 259, + 243 + ], + "spans": [ + { + "bbox": [ + 140, + 178, + 259, + 243 + ], + "type": "image", + "image_path": "aa202666c0347e30f452afb50132bf46686ba98433600b7ee8d0e4c2f30ad8f5.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 259, + 165, + 381, + 229 + ], + "blocks": [ + { + "bbox": [ + 259, + 165, + 381, + 229 + ], + "lines": [ + { + "bbox": [ + 259, + 165, + 381, + 229 + ], + "spans": [ + { + "bbox": [ + 259, + 165, + 381, + 229 + ], + "type": "image", + "image_path": "09f798353b1e4615f84c4d824a90ec1e55d3d23579c20ec2917c35a81ade4452.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 275, + 230, + 367, + 244 + ], + "lines": [ + { + "bbox": [ + 275, + 230, + 367, + 244 + ], + "spans": [ + { + "bbox": [ + 275, + 230, + 367, + 244 + ], + "type": "text", + "content": "Reasoning Vector Driven Latent Space Deep Reasoning" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 381, + 165, + 497, + 230 + ], + "blocks": [ + { + "bbox": [ + 381, + 165, + 497, + 230 + ], + "lines": [ + { + "bbox": [ + 381, + 165, + 497, + 230 + ], + "spans": [ + { + "bbox": [ + 381, + 165, + 497, + 230 + ], + "type": "image", + "image_path": "3effed9f4c545b03b2ac2c365b4d87fe9724f58c4dddd4294def63e3d2f5672e.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 397, + 230, + 480, + 244 + ], + "lines": [ + { + "bbox": [ + 397, + 230, + 480, + 244 + ], + "spans": [ + { + "bbox": [ + 397, + 230, + 480, + 244 + ], + "type": "text", + "content": "Reasoning Manager Driven Latent Space Deep Reasoning" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 308, + 287, + 320 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 308, + 287, + 320 + ], + "spans": [ + { + "bbox": [ + 105, + 308, + 287, + 320 + ], + "type": "text", + "content": "4.1.1 Natural Language Deep Reasoning" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 104, + 327, + 506, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 327, + 506, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 327, + 506, + 427 + ], + "type": "text", + "content": "Traditionally, researchers have sought to adapt natural language for intuitive and free-flowing deep reasoning [836, 1118, 303, 617, 1070, 765, 205]. Early work by Wei et al. [836] demonstrated that the use of natural language Long CoT significantly enhances the reasoning capabilities of RLLMs. Further, the Natural Program framework [460] allows RLLMs to engage in deeper natural language reasoning by ensuring a more structured and rigorous logical analysis. More recently, CodeI/O [401] has introduced a technique that reorganizes code-based reasoning patterns into natural language formats, further boosting the reasoning potential of RLLMs [36]. Similarly, Li et al. [387] propose CoRT, which integrates code into reasoning to facilitate a mixture of formats, resulting in improved cognitive performance." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 440, + 301, + 452 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 440, + 301, + 452 + ], + "spans": [ + { + "bbox": [ + 105, + 440, + 301, + 452 + ], + "type": "text", + "content": "4.1.2 Structured Language Deep Reasoning" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 104, + 459, + 506, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 459, + 506, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 459, + 506, + 581 + ], + "type": "text", + "content": "Structured language deep reasoning encompasses various approaches designed to program [100, 464, 687, 591, 198, 845, 830, 1044] or symbolic language [605, 158, 451, 372, 933, 604, 37, 40, 797, 380] format for enhanced deep reasoning. In this context, most studies focus on utilizing code to better enhance the mathematical reasoning capabilities [389, 107, 978, 85]. Xu et al. [897] propose a neural-symbol self-training framework guided by the environment, addressing both the scarcity of symbolic data and the limitations of symbolic processing in LLMs. Additionally, Liao et al. [443] present SKIntern, which refines symbolic RLLMs through curriculum learning and linear attenuation, enabling the internalization of symbolic knowledge with fewer examples, reducing computational costs, and accelerating inference. Furthermore, Ranaldi et al. [634] introduce QuaSAR, a CoT variant that directs LLMs to operate at higher abstraction levels through quasi-symbolic reasoning, thus improving natural language reasoning and providing more precise structural representations." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 594, + 265, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 594, + 265, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 265, + 605 + ], + "type": "text", + "content": "4.1.3 Latent Space Deep Reasoning" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "text", + "content": "Latent space deep reasoning encompasses techniques designed to enhance the reasoning abilities of LLMs by leveraging operations within continuous latent spaces [684, 151, 640, 324]. These approaches can be categorized into three main paradigms: (1) Reasoning Token-Driven Latent Space Deep Reasoning: Early work [810, 1013] introduce the concept of \"planning tokens\" or \"thought tokens\" to guide reasoning within latent spaces [949, 1008]. Further, Coconut [236] expands on this through the maintenance of multiple alternative reasoning paths, increasing both complexity and efficiency [1069, 706]. At the extreme, Heima [662] condenses the entire Long CoT process into a single token, yielding substantial computational savings. (2) Reasoning Vector Driven Latent Space Deep Reasoning: Building on the previous paradigm, LTM [356] conceptualizes the layers of LLMs as \"thought blocks\" and introduces the concept of \"thought vectors\" for each layer. This" + } + ] + } + ], + "index": 35 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "spans": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "text", + "content": "REASONING GROUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 111, + 70, + 499, + 443 + ], + "blocks": [ + { + "bbox": [ + 111, + 70, + 499, + 443 + ], + "lines": [ + { + "bbox": [ + 111, + 70, + 499, + 443 + ], + "spans": [ + { + "bbox": [ + 111, + 70, + 499, + 443 + ], + "type": "table", + "html": "
ModelBase ModelGSM8kMATHGPQAOlympiadBenchLiveCodeBench
Latent Space Deep Reasoning
No-CoT [151]Mistral-7B [318]38.0----
SQ-VAE [810]Llama-2-7B [743]40.07.0---
RecurrentBlock-3.5B [204]-42.1----
ICoT-SI [151]Mistral-7B [318]51.0----
Natural Language Deep Reasoning
Self-Rewarding [114]Llama-2-7B [743]40.010.7---
Llama-3.1-8B [168]-56.720.3---
MetaMath [983]Llama-2-7B [743]66.5----
OVM [979]Llama-2-7B [743]73.7----
NuminaMath-7B-CoT [397]-75.455.2-19.9-
Qwen2-7B [925]-79.944.2-21.3-
Qwen2-Math-7B [927]-80.450.4-38.2-
Internlm2-math-plus-7B [974]-84.054.4-18.8-
OMI2 [401]Qwen2.5-Coder-7B [301]84.172.336.2-27.2
Llama-3.1-70B [168]-85.541.4---
CODEI/O++ [401]Qwen2.5-Coder-7B [301]85.772.140.6-29.1
CODEI/O [401]Qwen2.5-Coder-7B [301]86.471.943.3-28.5
WI [401]Qwen2.5-Coder-7B [301]87.071.439.1-26.0
WI (Full) [401]Qwen2.5-Coder-7B [301]87.071.142.9-27.6
OMI2 (Full) [401]Qwen2.5-Coder-7B [301]88.573.240.9-28.4
DeepSeekMath-7B-RL [658]-88.251.7-19.0-
Llama-3.1-405B [168]-89.053.8---
CoMAT [371]GPT-4 [3]93.7-40.4--
CoT [634]GPT-4 [3]94.5-41.850.2-
FCoT [523]GPT-4 [3]95.0----
Qwen2.5-Math-7B-Instruct [927]-95.283.6-41.6-
MathPrompter [303]GPT-4 [3]95.6----
Qwen2.5-Math-72B-Instruct [927]-95.985.9-49.0-
DeepSeek-R1-Distill-Qwen-7B [227]--92.8-49.137.6
DeepSeek-R1-Distill-Qwen-32B [227]--94.3-62.157.2
Structured Language Deep Reasoning
STaR [1012]Llama-2-7B [743]58.216.0---
ENVISIONS [897]Llama-2-7B [743]59.019.0---
MAmmoTH [1006]Code-Llama-7B [639]59.4----
MathCoder-CL [783]Code-Llama-7B [639]67.830.2---
ToRA-Code [217]Llama-2-7B [743]72.6----
Brain [107]Code-Llama-7B [639]74.0----
DeepSeek-Coder-7B [226]-77.444.4---
SIaM [978]Qwen-2-Math-Base81.550---
OC-SFT-1 [401]Qwen2.5-Coder-7B [301]86.770.937.7-27.5
PyEdu [401]Qwen2.5-Coder-7B [301]85.871.440.9-25.8
Qwen2.5-Math-7B-Instruct [927]-94.685.2-55.6-
Qwen2.5-Math-72B-Instruct [927]-95.888.1-60.6-
QuaSAR [634]GPT-4 [3]96.5-55.444.6-
MathDivide [687]GPT-4 [3]96.8---
", + "image_path": "ebcdb7892865c413666c63573d7f974aac12588169f830423b0bd269bf85e3b2.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 448, + 506, + 471 + ], + "lines": [ + { + "bbox": [ + 104, + 448, + 506, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 448, + 506, + 471 + ], + "type": "text", + "content": "Table 1: Performance of various deep reasoning formats, sorted primarily by GSM8K scores. “-” indicates that the paper did not report this score." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 491, + 506, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 491, + 506, + 646 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 506, + 646 + ], + "type": "text", + "content": "approach allows for the scaling of inference-time computations by implicitly performing reasoning within the latent space through recurrent depth. (3) Reasoning Manager Driven Latent Space Deep Reasoning: Inspired by these, Schone et al. [647], Geiping et al. [204], and Saunshi et al. [646] propose a mechanism similar to a continuous reasoning manager, which iteratively governs a trained \"recurrent block\" as a recurrent \"thought block\" [511]. This method integrates deeper model layers during reasoning, enhancing performance without needing specialized training data, and even outperforming larger RLLMs. Additionally, ITT [109] leverages the original transformer layer as a recurrent \"thought block\", selecting key tokens via adaptive token routing and controlling reasoning depth with residual thinking connections, enabling more efficient processing of critical tokens. Further, System-1.5 Reasoning [808] defines two dynamic shortcuts. The Model Depth Shortcut (DS) lets non-critical tokens exit early via lightweight adapter branches while routing critical tokens through deeper Transformer layers, thus supporting adaptive, vertical reasoning. The Step Shortcut (SS) reuses hidden states across decoding steps to bypass trivial iterations and enable horizontal reasoning in latent space." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 658, + 241, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 658, + 241, + 670 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 241, + 670 + ], + "type": "text", + "content": "4.2 Deep Reasoning Learning" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": "Insufficient deep reasoning in RLLMs can significantly degrade performance [758, 823]. As a result, research has focused on improving reasoning through training. Supervised fine-tuning (SFT) [1058] stabilizes model outputs by serving as a memory process [883], while reinforcement learning (RL) enables generalization and self-learning [227, 137, 276, 898]. Recent studies for deep reasoning" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 52 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 52 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 52 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 52, + 187, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 52, + 187, + 55 + ], + "spans": [ + { + "bbox": [ + 141, + 52, + 187, + 55 + ], + "type": "text", + "content": "REASONING GROUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 115, + 89, + 296, + 149 + ], + "blocks": [ + { + "bbox": [ + 136, + 73, + 272, + 87 + ], + "lines": [ + { + "bbox": [ + 136, + 73, + 272, + 87 + ], + "spans": [ + { + "bbox": [ + 136, + 73, + 272, + 87 + ], + "type": "text", + "content": "(a) Deep Reasoning Imitation" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 115, + 89, + 296, + 149 + ], + "lines": [ + { + "bbox": [ + 115, + 89, + 296, + 149 + ], + "spans": [ + { + "bbox": [ + 115, + 89, + 296, + 149 + ], + "type": "image", + "image_path": "787c5674fba7b0ce5e4ca3ac3eefd20babe3c384dc807cab022b3df606b88f7a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 304, + 89, + 496, + 148 + ], + "blocks": [ + { + "bbox": [ + 320, + 74, + 477, + 87 + ], + "lines": [ + { + "bbox": [ + 320, + 74, + 477, + 87 + ], + "spans": [ + { + "bbox": [ + 320, + 74, + 477, + 87 + ], + "type": "text", + "content": "(b) Deep Reasoning Self-Learning" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 89, + 496, + 148 + ], + "lines": [ + { + "bbox": [ + 304, + 89, + 496, + 148 + ], + "spans": [ + { + "bbox": [ + 304, + 89, + 496, + 148 + ], + "type": "image", + "image_path": "04fef9422d7990eb4d902d3c902905109bd7fe0911512bee51a344a37488531e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 159, + 504, + 194 + ], + "lines": [ + { + "bbox": [ + 104, + 159, + 504, + 194 + ], + "spans": [ + { + "bbox": [ + 104, + 159, + 504, + 194 + ], + "type": "text", + "content": "Figure 6: The different learning strategies of deep reasoning learning, including deep reasoning imitation of the data from advanced deep reasoning systems, like advanced RLLMs, MCTS, etc.; deep reasoning self-learning from preference-based RL by implicit reward." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 216, + 504, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 216, + 504, + 281 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 504, + 281 + ], + "type": "text", + "content": "learning have explored using SFT to imitate advanced reasoning in RLLMs and applying RL to enhance self-improvement in reasoning. As illustrated in Figure 6, this section outlines two key approaches to improve deep reasoning: (1) Deep Reasoning Imitation (§ 4.2.1), which involves learning reasoning from human-annotated or distilled data through SFT, and (2) Deep Reasoning Self-Learning (§ 4.2.2), where models improve reasoning through preference-based RL with implicit rewards. The performance of these methods is shown in Table 2." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 294, + 249, + 306 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 294, + 249, + 306 + ], + "spans": [ + { + "bbox": [ + 105, + 294, + 249, + 306 + ], + "type": "text", + "content": "4.2.1 Deep Reasoning Imitation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 312, + 506, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 312, + 506, + 554 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 506, + 554 + ], + "type": "text", + "content": "Deep reasoning in RLLMs can be effectively achieved by mimicking advanced reasoning systems, such as human reasoning [558, 61, 115, 403], advanced RLLMs [227, 58, 957, 370, 102], and scaling-augmented RLLMs [410, 1003, 596, 1136, 41]. This approach enables the model to learn complex reasoning patterns and generalize across tasks [937, 416]. Specifically, (1) Imitation from Human: Earlier, Cobbe et al. [141] first propose the deep reasoning imitation paradigm using human examples. ALT [558] improves RLLM reasoning by generating larger datasets of human-annotated logical templates, which fosters deeper reasoning [241]. To enhance diversity, EIT [61] promotes simpler human-generated plans, while LLMs contribute more nuanced reasoning, facilitating collaboration between human input and AI. (2) Imitation from Advanced RLLMs: A body of work utilizes zero-shot prompting to guide large teacher RLLMs in generating reasoning rationale, which is then used to fine-tune smaller RLLMs, marking the beginning of deep reasoning imitation [256, 352, 938, 521]. Additionally, AceMath [500] applies few-shot prompting to distill Long CoT samples from advanced LLMs, followed by multi-stage quality-guided SFT to enhance performance. Chen et al. [107] separate the data synthesis process into planning and reasoning stages, thereby improving reasoning quality. DART-Math [738] effectively distills complex queries requiring deeper reasoning during synthesis, advancing deep reasoning capabilities. Further, Ahmad et al. [7] propose OpenCodeReasoning, expanding this paradigm to the code scenarios. (3) Imitation from Scaling-augmented RLLMs: Earlier, Bansal et al. [34] enhance data quality by scaling the sampling size and length, boosting imitation performance [481, 1005]. Yang et al. [927] and Zhao et al. [1090] further improve data quality by scaling sampling and selecting samples through sample feature or an additional reward model. Additionally, Li et al. [410] identify optimal deep reasoning paths through MCTS, advancing imitation effectiveness." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 558, + 506, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 558, + 506, + 669 + ], + "spans": [ + { + "bbox": [ + 104, + 558, + 506, + 669 + ], + "type": "text", + "content": "Recent studies [299, 550] show that distilling knowledge from advanced RLLM APIs like O1 [307] and R1 [227] significantly enhances the performance of smaller LLMs [424, 223]. This method, employing supervised fine-tuning, boosts model performance on complex mathematical reasoning tasks, sometimes surpassing the teacher models' performance. Building on these findings, LIMO [967], S1 [560], and RedStar [902] argue that a large number of imitation samples is unnecessary. They demonstrate that even a minimal set of samples can activate deep reasoning capabilities in foundational LLMs. For practical applications, Turtel et al. [747] showcase how these techniques can predict future events beyond a model's knowledge cutoff. Sun et al. [701], Yang et al. [928] and Zhao et al. [1093] further enhance deep reasoning imitation by selecting high-quality samples from large datasets, thereby improving the quality of the imitation data." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 681, + 268, + 693 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 681, + 268, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 681, + 268, + 693 + ], + "type": "text", + "content": "4.2.2 Deep Reasoning Self-Learning" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "While simple imitation can yield strong performance, current models still rely heavily on human annotations or outputs from more advanced models for both imitation and distillation [502]. To" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 188, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 188, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 188, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 109, + 72, + 501, + 381 + ], + "blocks": [ + { + "bbox": [ + 109, + 72, + 501, + 381 + ], + "lines": [ + { + "bbox": [ + 109, + 72, + 501, + 381 + ], + "spans": [ + { + "bbox": [ + 109, + 72, + 501, + 381 + ], + "type": "table", + "html": "
ModelData SizeBase ModelGSM8KMATHMATH-500AIME2024GPQAOlympiadBench
Deep Reasoning Imitation
SFT [938]200KLlama-3.1-8B [168]---54.13.5-
Retro-Enh [115]14MLlama-3-8B [168]45.121.7----
Query-Exp [115]24MLlama-3-8B [168]51.323.1----
Res-Div [115]14MLlama-3-8B [168]53.023.2----
MetaMath [738]0.40MMistral-7B [318]76.529.8---5.9
ALT-FLDx2 [558]100KLlama-3.1-70B [168]83.324.4----
EIT [61]15KLlama-2-70B [743]84.132.5----
MathScale [738]2.0MMistral-7B [318]74.835.2----
Tutor-Amp [115]11MLlama-3-8B [168]64.435.9----
MMIQC [738]2.3MMistral-7B [318]75.437.4---9.4
VRT [738]0.59MMistral-7B [318]82.338.7---8.7
KPMath-Plus [738]1.6MMistral-7B [318]82.146.8----
Llama-2-70B-Xwin-Math-V1.1 [385]1.4MLlama-2-70B [743]90.252.5---16.3
DART-Math-Mistral-7B [738]591KMistral-7B [318]81.145.5---14.7
DART-Math-Llama-3-70B [738]591KLlama-3-70B [168]89.656.1---20.0
Rejection Sampling [410]197KQwen2.5-7B [926]87.170.0-10.0-27.1
Evol-Instruct-7B [514]905KQwen2.5-Math-7B [927]88.5-77.416.7--
FastMCTS [410]288KQwen2.5-7B [926]88.974.0-20.0-27.5
KPDDS-7B [295]800KQwen2.5-Math-7B [927]89.9-76.010.0--
DeepSeek-R1-Distill-Qwen-7B [227]800KQwen2.5-7B-Instruct [926]91.7-91.643.3--
Openmathinstruct-7B [740]14MQwen2.5-Math-7B [927]92.0-79.610.0--
NuminaMath [967]100KQwen2.5-Math-7B [927]92.9-81.820.0--
PromptCoT-DS-7B [1090]115KDeepSeek-R1-Distill-Qwen-7B [227]92.6-93.060.0--
PromptCoT-Qwen-7B [1090]905KQwen2.5-Math-7B [927]93.3-84.026.7--
AceMath-7B-Instruct [500]1.2MQwen2-Math-7B-Instruct [927]93.783.1---42.2
AceMath-72B-Instruct [500]1.2MQwen2.5-Math-72B-Instruct [927]96.486.1---48.4
NuminaMath [967]100KQwen2.5-32B-Instruct [926]--59.26.525.836.7
OpenThoughts [967]114KQwen2.5-32B-Instruct [926]--80.650.242.956.3
Sky-T1-32B-Preview [724]17KQwen2.5-32B-Instruct [926]--82.443.356.8-
Journey Learning [299]5KQwen2.5-Math-72B [927]--87.243.3--
STILL-2 [550]3.9KQwen2.5-32B-Instruct [926]--90.246.755.1-
Bespoke-32B [362]17KQwen2.5-32B-Instruct [926]--93.063.358.1-
s1 [560]1KQwen2.5-32B-Instruct [926]--93.056.759.6-
DeepSeek-R1-Distill-Qwen-32B [227]800KQwen2.5-32B-Instruct [926]--94.372.662.1-
LIMO [967]817Qwen2.5-32B-Instruct [926]--94.815.866.766.8
Deep Reasoning Self-Learning
DPO [302]40KDeepSeek-Math-7B-Base [658]74.834.9----
RefT [302]40KDeepSeek-Math-7B-Base [658]71.436.0----
Self-Explore [302]40KDeepSeek-Math-7B-Base [658]78.637.7----
SimPO [723]10KQwen2.5-Math-7B-Instruct [927]88.840.056.6---
DPO [446]11KDeepSeek-Math-7B-Instruct [658]-48.7----
TPO [446]11KDeepSeek-Math-7B-Instruct [658]-51.3----
DPO [446]11KQwen2-7B-Instruct [925]-54.3----
TPO [446]11KQwen2-7B-Instruct [925]-55.5----
MCTS [74]15KDeepSeek-Math-7B-Base [658]83.264.0----
SBS [74]15KDeepSeek-Math-7B-Base [658]84.166.3----
FastMCTS+Branch-DPO [410]152KFastMCTS-7B [410]89.975.4-20.0-29.6
", + "image_path": "0a51aaca5f29b7a1155025d64a0e6be21201b8129c276ddc53c0a7fa47545014.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 387, + 504, + 410 + ], + "lines": [ + { + "bbox": [ + 104, + 387, + 504, + 410 + ], + "spans": [ + { + "bbox": [ + 104, + 387, + 504, + 410 + ], + "type": "text", + "content": "Table 2: Performance of various deep reasoning learning methods, sorted primarily by Math or Math-500 scores. “-” indicates that the paper did not report this score." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "text", + "content": "address this limitation, recent research has focused on enabling more advanced reasoning through techniques like self-play and self-learning [948, 1077, 409, 624]. Specifically, self-learning methods can be classified into two paradigms, differentiated by their sampling strategies:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 514, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 506, + 723 + ], + "type": "text", + "content": "(1) Self-Learning from Direct Sampling: The earliest method, STaR [1012], utilizes In-Context Learning (ICL) to sample deep reasoning results [657] and uses the correctness of the final answer as an implicit reward for self-learning [258, 581, 582, 1059, 826, 462]. Further, ReST [225] extends this by introducing a Grow-Improve paradigm, where self-generated reasoning is first annotated with rewards and then enhanced via offline RL algorithms. However, these approaches can be fragile, especially when the reward process lacks robustness. Inspired by the Expectation-Maximization (EM) algorithm, Singh et al. [674] propose a method that generates rewards and iteratively optimizes LLMs to achieve the best performance on a validation set, significantly improving robustness. To further strengthen the reward process, a series of work introduces a method to adapt incorrect solutions, training a verifier [155, 262] or utilize entropy [809, 1040] to select or refine the reward process and improve self-learning quality. (2) Self-Learning from Tree Search: Early deep learning methods, such as EXIT [18], combined MCTS with deep neural networks for reinforcement learning, iteratively self-training the network to guide the tree search and enhance reasoning. Building on this, CPO [1065] and TPO [446] align each step of Long CoT reasoning with the corresponding tree search path, using Tree of Thoughts (ToT) [955] preference information to support deeper reasoning [951, 302]. Li [422] propose Policy-Guided Tree Search (PGTS), integrating RL with structured tree exploration for more efficient navigation of reasoning paths. Further developments, such as AlphaMath [74], AlphaLLM-CPL [814], and TongGeometry [1029], refine MCTS behavior through stepwise trajectory pair extraction and curriculum preference learning, boosting LLM reasoning abilities [611, 412, 872]." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 125, + 72, + 282, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 72, + 282, + 85 + ], + "spans": [ + { + "bbox": [ + 125, + 72, + 282, + 85 + ], + "type": "text", + "content": "Takeaways: Imitation & Self-Learning" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 123, + 91, + 489, + 163 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 123, + 91, + 488, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 91, + 488, + 114 + ], + "spans": [ + { + "bbox": [ + 123, + 91, + 488, + 114 + ], + "type": "text", + "content": "- Imitating deep reasoning from advanced RLLMs, and scaling-augmented methods like MCTS can help models learn complex reasoning patterns with fewer samples." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 123, + 116, + 488, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 116, + 488, + 138 + ], + "spans": [ + { + "bbox": [ + 123, + 116, + 488, + 138 + ], + "type": "text", + "content": "- Self-learning techniques, including reinforcement learning and tree search, allow RLLMs to enhance their reasoning abilities over time." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 123, + 140, + 489, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 140, + 489, + 163 + ], + "spans": [ + { + "bbox": [ + 123, + 140, + 489, + 163 + ], + "type": "text", + "content": "- The combination of imitation from advanced RLLMs and self-learning techniques strengthens RLLM reasoning, leading to strong performance on complex tasks." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 186, + 294, + 200 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 186, + 294, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 294, + 200 + ], + "type": "text", + "content": "5 Feasible Reflection for Long CoT" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 210, + 504, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 210, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 210, + 504, + 266 + ], + "type": "text", + "content": "Feasible Reflection is a pivotal component of Long CoT reasoning, enabling LLMs to handle complex tasks through iterative feedback and refinement [406, 192]. Specifically, it comprises two primary stages: (1) Feedback (" + }, + { + "bbox": [ + 104, + 210, + 504, + 266 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 104, + 210, + 504, + 266 + ], + "type": "text", + "content": " 5.1), which generates feedback signals " + }, + { + "bbox": [ + 104, + 210, + 504, + 266 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_i" + }, + { + "bbox": [ + 104, + 210, + 504, + 266 + ], + "type": "text", + "content": " to correct node " + }, + { + "bbox": [ + 104, + 210, + 504, + 266 + ], + "type": "inline_equation", + "content": "n_j" + }, + { + "bbox": [ + 104, + 210, + 504, + 266 + ], + "type": "text", + "content": " in Equation (5); and (2) Refinement (" + }, + { + "bbox": [ + 104, + 210, + 504, + 266 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 104, + 210, + 504, + 266 + ], + "type": "text", + "content": " 5.2), which adjusts the subsequent node " + }, + { + "bbox": [ + 104, + 210, + 504, + 266 + ], + "type": "inline_equation", + "content": "n_{i+1}" + }, + { + "bbox": [ + 104, + 210, + 504, + 266 + ], + "type": "text", + "content": " according to the feedback in Equation (6)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 278, + 171, + 289 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 278, + 171, + 289 + ], + "spans": [ + { + "bbox": [ + 105, + 278, + 171, + 289 + ], + "type": "text", + "content": "5.1 Feedback" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 299, + 504, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 299, + 504, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 299, + 504, + 365 + ], + "type": "text", + "content": "Feedback refers to the process of providing evaluations of both overall outputs and the processes that lead to them, with the goal of assessing their accuracy and quality [394, 396, 838, 220, 862]. This process, also referred to as critique or verification, can be executed using either natural language or structured data formats, which serve as the foundation for tree-search methods [113]. Specifically, as shown in Figure 7, feedback can be categorized into three distinct types: (1) Overall Feedback (" + }, + { + "bbox": [ + 104, + 299, + 504, + 365 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 104, + 299, + 504, + 365 + ], + "type": "text", + "content": " 5.1.1); (2) Process Feedback (" + }, + { + "bbox": [ + 104, + 299, + 504, + 365 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 104, + 299, + 504, + 365 + ], + "type": "text", + "content": " 5.1.2); (3) Hybrid Feedback (" + }, + { + "bbox": [ + 104, + 299, + 504, + 365 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 104, + 299, + 504, + 365 + ], + "type": "text", + "content": " 5.1.3)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 376, + 212, + 387 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 376, + 212, + 387 + ], + "spans": [ + { + "bbox": [ + 105, + 376, + 212, + 387 + ], + "type": "text", + "content": "5.1.1 Overall Feedback" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 395, + 504, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 395, + 504, + 450 + ], + "spans": [ + { + "bbox": [ + 104, + 395, + 504, + 450 + ], + "type": "text", + "content": "The overall feedback focuses on providing a global view of the entire process and results, rather than assessing each step individually. This feedback significantly enhances reasoning skills and reward modeling in reinforcement learning for RLLMs. Specifically, as shown in Figure 7 (a), the overall feedback can be categorized into three main sources: Outcome Reward Model, Rule Extraction, and RLLMs Feedback. The performance across these categories is summarized in Table 3." + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 155, + 467, + 254, + 557 + ], + "blocks": [ + { + "bbox": [ + 155, + 467, + 254, + 557 + ], + "lines": [ + { + "bbox": [ + 155, + 467, + 254, + 557 + ], + "spans": [ + { + "bbox": [ + 155, + 467, + 254, + 557 + ], + "type": "image", + "image_path": "924dba4b1d5c6d25f0eff62713bafcbf9c36e9cd21483aae275897e288afdd77.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 259, + 470, + 354, + 557 + ], + "blocks": [ + { + "bbox": [ + 259, + 470, + 354, + 557 + ], + "lines": [ + { + "bbox": [ + 259, + 470, + 354, + 557 + ], + "spans": [ + { + "bbox": [ + 259, + 470, + 354, + 557 + ], + "type": "image", + "image_path": "367cca6990189dfda7e049a2d562809a0e9869ca5351f1d2d1d1e74c0f9bcafd.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 356, + 470, + 452, + 557 + ], + "blocks": [ + { + "bbox": [ + 356, + 470, + 452, + 557 + ], + "lines": [ + { + "bbox": [ + 356, + 470, + 452, + 557 + ], + "spans": [ + { + "bbox": [ + 356, + 470, + 452, + 557 + ], + "type": "image", + "image_path": "dd4b7e43b794582020a033da732daf0b1be53e45111b8e9717414d483b50896e.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 156, + 559, + 264, + 657 + ], + "blocks": [ + { + "bbox": [ + 156, + 559, + 264, + 657 + ], + "lines": [ + { + "bbox": [ + 156, + 559, + 264, + 657 + ], + "spans": [ + { + "bbox": [ + 156, + 559, + 264, + 657 + ], + "type": "image", + "image_path": "6914fc78c8aeece2af825dabacd242f08c842b612001c13322264246623afb04.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 663, + 504, + 718 + ], + "lines": [ + { + "bbox": [ + 104, + 663, + 504, + 718 + ], + "spans": [ + { + "bbox": [ + 104, + 663, + 504, + 718 + ], + "type": "text", + "content": "Figure 7: The feedback capabilities framework for feasible reflection consists of Overall Feedback and Process Feedback. Overall Feedback includes the Outcome Reward Model (ORM) in a value format, rule extraction for correctness judgment, and overall RLLMs based on RLLMs. Process Feedback includes the Process Reward Model (PRM) in a value format and step-level RLLMs, also based on RLLMs." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 273, + 560, + 449, + 657 + ], + "blocks": [ + { + "bbox": [ + 273, + 560, + 449, + 657 + ], + "lines": [ + { + "bbox": [ + 273, + 560, + 449, + 657 + ], + "spans": [ + { + "bbox": [ + 273, + 560, + 449, + 657 + ], + "type": "image", + "image_path": "75c5bea65e3eccbc79affd34b429b7f444436c52e5988975f4dec0ecb68328a3.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 188, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 188, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 188, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 52 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 52 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 52 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 52, + 184, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 52, + 184, + 55 + ], + "spans": [ + { + "bbox": [ + 141, + 52, + 184, + 55 + ], + "type": "text", + "content": "REASONING GROUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 121, + 69, + 489, + 312 + ], + "blocks": [ + { + "bbox": [ + 121, + 69, + 489, + 312 + ], + "lines": [ + { + "bbox": [ + 121, + 69, + 489, + 312 + ], + "spans": [ + { + "bbox": [ + 121, + 69, + 489, + 312 + ], + "type": "table", + "html": "
ModelBase ModelChatChat_HardSafetyReasoningOverall
RLLMs
GPT-4o-mini [3]-95.060.780.883.780.1
Llama3.1-70B-Instruct [168]-97.270.286.082.884.0
Llama3.1-405B-Instruct [168]-97.274.687.177.684.1
GPT-4 [3]-95.374.386.987.686.0
GPT-4o [3]-96.176.186.688.186.7
Gemini-1.5-pro [719]-92.380.687.992.088.2
Self-taught Evaluator [803]Llama-3.1-70B-Instruct [168]96.684.281.091.588.3
SFR-LLMA-3.1-8B-Judge [791]Llama-3.1-70B-Instruct [168]95.577.786.295.188.7
SFR-NeMo-12B-Judge [791]Mistral-NeMo-Instruct-12B [725]97.282.286.595.190.3
SFR-LLMA-3.1-70B-Judge [791]Llama-3.1-70B-Instruct [168]96.984.891.697.692.7
Skywork-Critic-Llama-3.1-70B [791]Llama-3.1-70B-Instruct [168]96.687.993.195.593.3
LMUnit [641]Llama-3.1-70B-Instruct [168]----93.4
EvalPlanner [643]Llama-3.1-70B-Instruct [168]97.589.493.095.593.9
Outcome Reward Models
tulu-v2.5-13b-uf-rm [306]TULU-2-13B [305]39.442.355.547.446.1
Prometheus-2-7B [353]Mistral-7B-Instruct-v0.2 [318]85.549.177.176.572.0
Prometheus-8x7b-v2 [353]Mixtral-8x7B-Instruct [319]93.047.180.577.474.5
Critic-RM-Rank [991]Llama-3.1-70B-Instruct [168]97.058.084.092.082.8
RM [689]Llama-3.1-70B-Instruct [168]98.374.583.888.086.4
SynRM [968]Llama-3.1-70B-Instruct [168]97.576.886.388.587.3
CLoud [17]Llama-3-70B-Instruct [168]98.075.687.689.087.6
FLAME-RM-24B [753]PaLM-2-24B [16]92.275.789.693.887.8
SteerLM-RM 70B [829]Llama-2-70B-chat [743]91.380.390.692.888.8
Llama-3-OffsetBias-RM-8B [585]Llama-3-8B-Instruct [168]97.281.886.891.989.4
InternLM-20B-Reward [62]InternLM2-8B-Instruct [62]98.976.589.995.890.2
ArmoRM-Llama3-8B-v0.1 [771]Llama-3-8B-Instruct [168]96.976.892.297.390.8
Nemotron-4-340B-Reward [829]Nemotron-4-340B [4]95.887.192.293.692.2
Skywork-Reward-Llama-3.1-8B [466]Llama-3.1-70B-Instruct [168]95.887.390.696.292.5
Skywork-Reward-Gemma-2-27B [466]Gemma-2-27B-it [720]95.891.492.096.193.8
", + "image_path": "67120b715e01e4e0a5691bdc5abb971e8ebe54977751e351a5a6b2de0ae0cf33.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 317, + 504, + 340 + ], + "lines": [ + { + "bbox": [ + 104, + 317, + 504, + 340 + ], + "spans": [ + { + "bbox": [ + 104, + 317, + 504, + 340 + ], + "type": "text", + "content": "Table 3: Performance of various overall feedback methods, sorted primarily by Overall scores in RewardBench [367]. “-” indicates that the paper did not report this score." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 369, + 506, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 369, + 506, + 479 + ], + "spans": [ + { + "bbox": [ + 104, + 369, + 506, + 479 + ], + "type": "text", + "content": "Overall Feedback from Outcome Reward Model Since many tasks cannot be directly evaluated using accuracy or other standard metrics, research has increasingly focused on Outcome Reward Models (ORM), which provide value-based rewards for more general and quantifiable feedback [1127, 986, 467]. In 2021, OpenAI [141] has proposed a \"Gen-Verifier\" paradigm, which uses a specialized ORM to evaluate the accuracy of generated rationales, showing significant progress in feedback capabilities [658]. Ji et al. [315] introduce a trained knowledge scorer to analyze hallucinations in the reasoning process, providing feedback to RLLMs and improving the accuracy of their outputs over time. Moreover, Generative Reward Models [1048] use next-token prediction for overall feedback, which seamlessly integrates with instruction adjustments, leveraging inference-time calculations to improve ORM feedback." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 484, + 507, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 484, + 507, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 484, + 507, + 552 + ], + "type": "text", + "content": "However, specifically trained ORMs are often costly and not sufficiently robust. Building on this, Self-Rewarding Language Models (SRLMs) [1129] incorporate a self-consistency framework, optimizing feedback to improve model alignment and consistency [1047]. Yu et al. [991] introduce Critic-RM, combining RLLM-generated natural language criticism with corresponding feedback. This method filters high-quality feedback while jointly fine-tuning reward prediction and criticism generation, optimizing ORM performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 568, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 568, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 568, + 506, + 723 + ], + "type": "text", + "content": "Overall Feedback from Rule Extraction Although ORM has achieved significant improvements, its accuracy still falls short of " + }, + { + "bbox": [ + 104, + 568, + 506, + 723 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 104, + 568, + 506, + 723 + ], + "type": "text", + "content": ", preventing it from outperforming rule-based answer correction feedback [955, 234, 1079]. Previous studies, such as STaR [1012], ReST [225], and ReFT [745], have demonstrated that feedback based on final answer rewards is more effective than both PRM and ORM in mathematical scenarios [197]. Furthermore, Guo et al. [227] and Xie et al. [886] introduce a multi-stage RL framework that incorporates rule-based rewards, significantly enhancing both output accuracy and length while mitigating reward hacking through simple yet robust rules [30], such as format validation and result verification. In coding scenarios where direct rule-based feedback is difficult, OpenCodeInterpreter [1108], AceCoder [1014], O1-Coder [1076], and VerMCTS [56] address this challenge by implementing an automated test-case synthesis pipeline, deriving rewards based on program performance [564, 216, 1115]. Additionally, Ma et al. [536] propose an automated approach to training a test case generator, which alleviates the scarcity of test cases and demonstrates that increasing the number of test cases correlates with improved reward quality. Moreover, Ma et al. [535] decompose problem-solving into structured coding subtasks: file localization, function" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "content": "localization, line localization, and code editing generation, and applies multi-viewed rule-based rewards." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 196 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 196 + ], + "type": "text", + "content": "Overall Feedback from RLLMs Research on feedback from RLLMs centers on detecting errors and biases through natural language feedback, also known as LLM-as-Judge, self-reflection or self-critique [274, 336, 29, 638, 549, 802, 1002, 895, 529]. This method has led to significant improvements across various tasks, particularly in self-correction [848, 1109, 206, 184, 1075]. Huang et al. [286] contend that traditional LLMs struggle to generate effective feedback without external signals, requiring the development of RLLMs with enhanced feedback capabilities [645, 398]. As a result, many studies leverage RLLMs' error-identification strengths, often stemming from their pretraining phase, to improve feedback generation and correction [965, 39, 40, 282]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 198, + 506, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 198, + 506, + 376 + ], + "spans": [ + { + "bbox": [ + 104, + 198, + 506, + 376 + ], + "type": "text", + "content": "Earlier, McAleese et al. [544] found that training RLLMs to learn self-critique and deep reasoning can further boost performance. Zhang et al. [1062] propose a self-contrast mechanism that compares multiple perspectives, identifies differences, and summarizes insights to resolve inconsistencies. However, these methods often offer task-independent feedback. To address this, Hao et al. [235] introduce AutoRace, which tailors evaluation criteria for specific tasks. The Reversal of Thought (RoT) framework [999] introduces a novel paradigm combining reverse reasoning with self-reflection, helping models identify the limits of their knowledge and enhance reasoning efficiency. Furthermore, ACR [1116] implements a scoring system for coding tasks, using LLM-as-a-Judge for quality assessment and LLM-as-a-Critic for critiquing low-quality code, improving consistency across benchmarks. Zheng et al. [1107] integrate code execution error data and feedback from RLLMs to improve code generation performance. Liu et al. [484] present AGSER, a method using attention-guided self-reflection to address hallucinations by splitting input queries into attentive and nonattentive components. Finally, Saha et al. [643] introduce EvalPlanner, which separates feedback into planning and reasoning components for more streamlined expression using existing RLLMs. More comprehensively, Hu et al. [274] outline the complete pipeline, key insights, and practical lessons for training RLLMs to function as judges." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 384, + 214, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 384, + 214, + 396 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 214, + 396 + ], + "type": "text", + "content": "5.1.2 Process Feedback" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 404, + 506, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 404, + 506, + 461 + ], + "spans": [ + { + "bbox": [ + 104, + 404, + 506, + 461 + ], + "type": "text", + "content": "Techniques combine process feedback with MCTS or RL rewards to provide automated, step-by-step guidance, reducing the need for labor-intensive annotations while enhancing reasoning capabilities [749, 344]. These techniques can be categorized into two main types based on the source of feedback: process reward models (PRMs) and prompted LLMs. The performance comparison are mainly shown in Table 4." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 470, + 507, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 470, + 507, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 470, + 507, + 723 + ], + "type": "text", + "content": "Process Feedback from Process Rewarded Model Recent studies highlight the significance of feedback in developing effective PRMs for complex reasoning tasks, particularly in a step-level view [134, 423, 528]. (1) Process Annotated PRM Training: Earlier, Lightman et al. [449] demonstrate that training process feedback with human-annotated data (PRM800K) surpasses outcome supervision in creating reliable reward models. However, this approach requires significant human effort. To address this, Wang et al. [792] introduce Math-Shepherd, a dataset that generates step-by-step supervision using a Tree Search-inspired method [73, 1001]. Following this, methods like QwQ [731], Skywork-o1 [570], AceMath [500], and PRIME [143] adopt similar techniques to enhance PRM performance. Additionally, Zhang et al. [1036] propose entropy regularization to improve model convergence. Rather than focusing solely on the first error step, Full-Step-DPO [903] assigns rewards for the entire reasoning chain, including error steps. VersaPRM [1015] extends PRMs across multiple domains, broadening their applicability. Similarly, Gu et al. [219] and Zhang et al. [1074] suggest training models with student preferences aligned to teacher preferences, ensuring effective preference distillation. Further, Wang et al. [807] propose VisualPRM400K and expand this paradigm to multimodal scenarios. (2) Outcome Annotated PRM Training: Alternative approaches, such as ReST-MCTS* [1032], OVM [979], Implicit PRM [1000], AutoPSV [506], and DVO [1038], leverage outcome supervision or implicit feedback to train PRMs, reducing the need for extensive human-annotated data [891, 643]. UAS [981] incorporates uncertainty-aware value models [275] into feedback predictions [495, 167, 945, 1089]. Additionally, Aurora [710] utilizes ensemble prompting strategies and reference answers for reverse verification, training stronger PRMs that better align with the Long CoT data distribution. Furthermore, PAV [651] suggests that rewards should reflect reasoning progress, as measured by changes in the likelihood of producing a correct future response before and after each step. Yang et al. [932], Lee et al. [376], Yoon et al. [975] extend these paradigms" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 312, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 751 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 751 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 68, + 504, + 278 + ], + "blocks": [ + { + "bbox": [ + 106, + 68, + 504, + 278 + ], + "lines": [ + { + "bbox": [ + 106, + 68, + 504, + 278 + ], + "spans": [ + { + "bbox": [ + 106, + 68, + 504, + 278 + ], + "type": "table", + "html": "
ProcessBenchPRMBench
GSM8KMATHOlympiadBenchOmniMATHSimplicitySoundnessSensitivity
Process Reward Models
Qwen2.5-Math-7B-PRM [1102]Qwen2.5-Math-7B [927]39.452.239.433.1---
Math-Shepherd-PRM-7B [792]Mistral-7B [318]47.929.524.823.847.145.760.7
RLHFlow-PRM-Mistral-8B [156]Mistral-7B [318]50.433.413.815.846.757.568.5
RLHFlow-PRM-DeepSeek-8B [156]DeepSeek-7B [52]38.833.816.916.947.657.568.1
Skywork-PRM-1.5B [466]Qwen2.5-Math-1.5B-Instruct [926]59.048.019.319.233.628.648.8
Skywork-PRM-7B [466]Qwen2.5-Math-7B-Instruct [926]70.853.622.921.038.432.754.3
Qwen2-1.5B-PRM800k [700]Qwen2-Math-1.5B-Instruct [927]34.055.334.241.0---
Qwen2-1.5B-Math-Shepherd [700]Qwen2-Math-1.5B-Instruct [927]48.934.19.813.7---
Qwen2-1.5B-Epic50k [700]Qwen2-Math-1.5B-Instruct [927]55.636.120.230.0---
Qwen2.5-Math-7B-PRM800KQwen2.5-Math-7B-Instruct [927]68.262.650.744.3---
Qwen2.5-Math-PRM-7B [1102]Qwen2.5-Math-7B-Instruct [927]82.477.667.566.3---
Universal-PRM-7B [710]Qwen2.5-Math-7B-Instruct [927]85.877.767.666.4---
Critic Model
Llama-3.1-8B-Instruct [168]-27.526.718.519.2---
GPT-4o [3]-61.953.948.344.659.770.975.8
QwQ-32B-Preview [731]Qwen2.5-32B-Instruct [926]62.352.746.243.9---
DeepSeek-R1-Distill-Qwen-14B [227]Qwen2.5-14B-Instruct [926]67.338.829.932.1---
Dyve-14B [1111]DeepSeek-R1-Distill-Qwen-14B [227]68.558.349.047.2---
Qwen2.5-72B-Instruct [926]-76.261.854.652.2---
SCRIT [713]Qwen2.5-72B-Instruct [926]80.260.032.527.8---
ol-mini [307]-93.288.987.282.464.672.175.5
LLemma-PRM800k-7B [679]LLemma-7B [26]----51.450.966.0
LLemma-MetaMath-7B [679]LLemma-7B [26]----50.349.066.0
LLemma-oprn-7B [679]LLemma-7B [26]----49.049.864.1
MATHMinos-Mistral-7B [195]Mistral-7B [318]----51.454.466.5
ReasonEval-7B [877]LLemma-7B [26]----55.563.971.0
ReasonEval-34B [877]LLemma-34B [26]----51.563.073.1
Gemini-2.0-flash-exp [679]-----62.767.375.4
Gemini-2.0-thinking-exp-1219 [679]-----66.271.875.3
", + "image_path": "f0a50a247a0dd2634591eb3435973b764f754ecd980e41927ea8e8c53cf3b966.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 285, + 506, + 308 + ], + "lines": [ + { + "bbox": [ + 105, + 285, + 506, + 308 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 506, + 308 + ], + "type": "text", + "content": "Table 4: Performance of various process feedback methods on ProcessBench [1102] and PRM-Bench [679]. “-” indicates that the paper did not report this score." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 329, + 506, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 329, + 506, + 385 + ], + "spans": [ + { + "bbox": [ + 104, + 329, + 506, + 385 + ], + "type": "text", + "content": "to the token level. Moreover, Chen et al. [110] expand these into interactive agent scenarios, allowing for automatically learning reward models from the environment without additional manual annotation. Wang et al. [832] equip a dual-layer MLP module to evaluate the reward at each step, successfully integrating the policy model and PRM into a unified interface without additional process annotations, reducing over " + }, + { + "bbox": [ + 104, + 329, + 506, + 385 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 104, + 329, + 506, + 385 + ], + "type": "text", + "content": " of PRM parameters for efficient reasoning." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 396, + 506, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 396, + 506, + 670 + ], + "spans": [ + { + "bbox": [ + 104, + 396, + 506, + 670 + ], + "type": "text", + "content": "Process Feedback from RLLMs As PRM training remains heavily dependent on manually annotated data, recent research has explored methods for enabling models to generate their natural language feedback to optimize performance [910]. These approaches fall into two primary categories: (1) Model-Driven Feedback Reasoning: Earlier work such as React [956] and Reflexion [669] enhances RLLMs with natural language feedback at each action and reasoning step [196, 135, 89], improving decision-making in diverse tasks. Similarly, Step-DPO [365] uses RLLM to self-verify step-level positive and negative pairs for training through the DPO paradigm, achieving strong performance. Additionally, Sun et al. [702] propose a dynamic error classification framework that adapts based on model outputs, improving performance in mathematical reasoning tasks by addressing specific error patterns in math word problems. Furthermore, Xie et al. [889] and He et al. [245] iteratively apply MCTS to collect preference data, utilizing its forward-looking capabilities to decompose instance-level rewards into more precise step-level signals, thereby enhancing feedback accuracy. However, step-wise feedback often suffers from reliability issues, which can be mitigated by uncertainty quantification [973, 969], improving the reliability of step-wise verification in reward models for mathematical reasoning tasks. Moreover, Fu et al. [187] define the CoT Average Causal Effect (CACE) to capture causal relationships between steps, resulting in a causalized Long CoT where all steps are both correct and comprehensible. (2) Environment-Driven Feedback Reasoning: Given the increasing complexity of large models, there is growing interest in combining prompt-based LLMs with external environments to generate more interpretable and controllable feedback [885, 271]. For example, ORPS [996] and Drori et al. [162] minimize dependence on human annotations by using execution feedback, enabling models to autonomously refine their solutions. Additionally, Shrestha et al. [670] contribute by translating model outputs into Python code, helping to identify logical errors, gain insights into flawed reasoning processes, and guide improvements in mathematical reasoning. Xu et al. [897] integrate reasoning models with an interactive environment, enabling learning in more dynamic scenarios and creating a more generalizable self-learning framework." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 681, + 217, + 693 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 681, + 217, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 681, + 217, + 693 + ], + "type": "text", + "content": "5.1.3 Hybrid Feedbacks" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 700, + 504, + 723 + ], + "type": "text", + "content": "Given the respective advantages and limitations of Overall Feedback and Process Feedback, recent studies have sought to combine both for optimal feedback. Specifically, Zhang et al. [1078] propose" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 143, + 53, + 183, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 53, + 183, + 57 + ], + "spans": [ + { + "bbox": [ + 143, + 53, + 183, + 57 + ], + "type": "text", + "content": "REASONING SONG" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 128 + ], + "type": "text", + "content": "a consensus filtering mechanism that integrates Monte Carlo estimation with an LLM-as-judge to enhance both overall and stepwise feedback, thus improving reasoning accuracy. In a similar vein, Lin et al. [454] introduce Step-KTO, a framework combining stepwise process-level and outcome-level binary feedback, using PRM and ORM to guide language models toward coherent reasoning, with a focus on error correction through reflection mechanisms." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 126, + 135, + 216, + 147 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 135, + 216, + 147 + ], + "spans": [ + { + "bbox": [ + 126, + 135, + 216, + 147 + ], + "type": "text", + "content": "Takeaways: Feedback" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 122, + 153, + 488, + 237 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 122, + 153, + 488, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 153, + 488, + 176 + ], + "spans": [ + { + "bbox": [ + 122, + 153, + 488, + 176 + ], + "type": "text", + "content": "- Evolving Feedback Models: Feedback mechanisms, including overall, process, and hybrid feedback, are crucial for improving the reasoning capabilities of RLLMs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 123, + 178, + 488, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 178, + 488, + 201 + ], + "spans": [ + { + "bbox": [ + 123, + 178, + 488, + 201 + ], + "type": "text", + "content": "- Innovative Approaches in Process Feedback: Process feedback using techniques like PRMs with MCTS enhances Long CoT, though challenges like reward hacking remain." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 123, + 203, + 488, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 203, + 488, + 237 + ], + "spans": [ + { + "bbox": [ + 123, + 203, + 488, + 237 + ], + "type": "text", + "content": "- Self-Reflection and Model-Driven Feedback: Self-reflection and model-driven feedback improve RLLM performance by enabling error detection, task-specific insights, and more autonomous learning." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 257, + 179, + 267 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 257, + 179, + 267 + ], + "spans": [ + { + "bbox": [ + 105, + 257, + 179, + 267 + ], + "type": "text", + "content": "5.2 Refinement" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 274, + 504, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 274, + 504, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 504, + 319 + ], + "type": "text", + "content": "Refinement refers to the process of addressing errors in reasoning based on prior feedback. As shown in Figure 8, refinement methods can be grouped into three primary categories: prompt-based refinement generation (§ 5.2.1), SFT-based refinement imitation (§ 5.2.2), and RL-based refinement learning (§ 5.2.3)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 330, + 301, + 342 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 330, + 301, + 342 + ], + "spans": [ + { + "bbox": [ + 105, + 330, + 301, + 342 + ], + "type": "text", + "content": "5.2.1 Prompt-based Refinement Generation" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 349, + 506, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 349, + 506, + 503 + ], + "spans": [ + { + "bbox": [ + 104, + 349, + 506, + 503 + ], + "type": "text", + "content": "Research on prompt-based refine generation focuses on enhancing the performance of LLMs through iterative self-refinement mechanisms [578, 1091, 98, 469, 1028, 754, 818, 546]. A prominent approach involves prompting RLLMs to generate initial outputs, followed by self-feedback that iteratively refines and improves performance across tasks such as dialogue generation and mathematical reasoning [645, 539, 1101, 669, 549, 345, 750, 482], which even much reduce the hallucinations [289, 315]. Noteworthy methods, like Self-Backtracking [944], Refiner [590], and BackMath [1055], allow LLMs to adjust their reasoning autonomously, reducing unnecessary complexity in decision-making [868]. Further, Havrilla et al. [238] extend the paradigm by integrating overall-level and step-level refinements, improving refinement performance. Yang et al. [950] propose a method to decompose the self-correction capability of LLMs into \"confidence\" and \"critique\" capacities, designing probabilistic metrics to evaluate them and exploring the role of reflection mechanisms in model behavior. Additionally, MCTSr [1033], LLM2 [930], ReST-MCTS* [1032] and ReARTeR [703] emphasize dynamic reflection through iterative error correction and confidence adjustments, allowing models to autonomously refine reasoning strategies [186]. He et al. [240]" + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 112, + 538, + 287, + 615 + ], + "blocks": [ + { + "bbox": [ + 112, + 523, + 292, + 536 + ], + "lines": [ + { + "bbox": [ + 112, + 523, + 292, + 536 + ], + "spans": [ + { + "bbox": [ + 112, + 523, + 292, + 536 + ], + "type": "text", + "content": "(a) Prompt-based Refinement Generation" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 112, + 538, + 287, + 615 + ], + "lines": [ + { + "bbox": [ + 112, + 538, + 287, + 615 + ], + "spans": [ + { + "bbox": [ + 112, + 538, + 287, + 615 + ], + "type": "image", + "image_path": "b3686b17aa6dae7dfb30b34c5e285af765d180305957e5c15bbbeed64d436326.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 304, + 540, + 494, + 615 + ], + "blocks": [ + { + "bbox": [ + 321, + 527, + 480, + 539 + ], + "lines": [ + { + "bbox": [ + 321, + 527, + 480, + 539 + ], + "spans": [ + { + "bbox": [ + 321, + 527, + 480, + 539 + ], + "type": "text", + "content": "(b) SFT-based Refinement Imitation" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 540, + 494, + 615 + ], + "lines": [ + { + "bbox": [ + 304, + 540, + 494, + 615 + ], + "spans": [ + { + "bbox": [ + 304, + 540, + 494, + 615 + ], + "type": "image", + "image_path": "6b9af6579bd26e04c798016e01125ccc0cc0c837723baed594fe92c9e6c31804.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 115, + 638, + 182, + 683 + ], + "blocks": [ + { + "bbox": [ + 115, + 638, + 182, + 683 + ], + "lines": [ + { + "bbox": [ + 115, + 638, + 182, + 683 + ], + "spans": [ + { + "bbox": [ + 115, + 638, + 182, + 683 + ], + "type": "image", + "image_path": "75779ea3409037b107f99cc61b0546a161e6d6863edc845e12464cd3a1541651.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 696, + 504, + 719 + ], + "lines": [ + { + "bbox": [ + 104, + 696, + 504, + 719 + ], + "spans": [ + { + "bbox": [ + 104, + 696, + 504, + 719 + ], + "type": "text", + "content": "Figure 8: The three main categories of refinement methods, including Prompt-based Refinement Generation, SFT-based Refinement Imitation, and RL-based Refinement Learning." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 184, + 640, + 215, + 673 + ], + "blocks": [ + { + "bbox": [ + 184, + 640, + 215, + 673 + ], + "lines": [ + { + "bbox": [ + 184, + 640, + 215, + 673 + ], + "spans": [ + { + "bbox": [ + 184, + 640, + 215, + 673 + ], + "type": "image", + "image_path": "51fec61d82ab2a769606104af5832df56e4604f317836d062424f65c9e9866bf.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 272, + 638, + 306, + 679 + ], + "blocks": [ + { + "bbox": [ + 216, + 647, + 274, + 668 + ], + "lines": [ + { + "bbox": [ + 216, + 647, + 274, + 668 + ], + "spans": [ + { + "bbox": [ + 216, + 647, + 274, + 668 + ], + "type": "text", + "content": "Reinforcement Learning" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 272, + 638, + 306, + 679 + ], + "lines": [ + { + "bbox": [ + 272, + 638, + 306, + 679 + ], + "spans": [ + { + "bbox": [ + 272, + 638, + 306, + 679 + ], + "type": "image", + "image_path": "23630b42c465d84d800277ffb7ad33291ea526c1dea42266eee59f4ed6d6ce9b.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 310, + 639, + 347, + 673 + ], + "blocks": [ + { + "bbox": [ + 310, + 639, + 347, + 673 + ], + "lines": [ + { + "bbox": [ + 310, + 639, + 347, + 673 + ], + "spans": [ + { + "bbox": [ + 310, + 639, + 347, + 673 + ], + "type": "image", + "image_path": "80703458fe6b97a41337e32d746ae10f1ad5d7ce4cd1e803f369ce673d59c38c.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 354, + 637, + 418, + 681 + ], + "blocks": [ + { + "bbox": [ + 239, + 624, + 386, + 636 + ], + "lines": [ + { + "bbox": [ + 239, + 624, + 386, + 636 + ], + "spans": [ + { + "bbox": [ + 239, + 624, + 386, + 636 + ], + "type": "text", + "content": "(c) RL-based Refinement Learning" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 354, + 637, + 418, + 681 + ], + "lines": [ + { + "bbox": [ + 354, + 637, + 418, + 681 + ], + "spans": [ + { + "bbox": [ + 354, + 637, + 418, + 681 + ], + "type": "image", + "image_path": "748e7abf84b0255c1331edd540782869194e76185b531fae8e9affbfdea58ee8.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 422, + 637, + 482, + 681 + ], + "lines": [ + { + "bbox": [ + 422, + 637, + 482, + 681 + ], + "spans": [ + { + "bbox": [ + 422, + 637, + 482, + 681 + ], + "type": "text", + "content": "Aha! I think " + }, + { + "bbox": [ + 422, + 637, + 482, + 681 + ], + "type": "inline_equation", + "content": "1 + 1 = 3" + }, + { + "bbox": [ + 422, + 637, + 482, + 681 + ], + "type": "text", + "content": " should be corrected " + }, + { + "bbox": [ + 422, + 637, + 482, + 681 + ], + "type": "inline_equation", + "content": "1 + 1 = 2!" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 143, + 49, + 184, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 49, + 184, + 56 + ], + "spans": [ + { + "bbox": [ + 143, + 49, + 184, + 56 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 149 + ], + "type": "text", + "content": "extend this paradigm to multi-agent scenarios, improving both reasoning and agent system performance [936, 1128]. Moreover, Yuksekgonul et al. [1009] and Peng et al. [593] further expand the paradigm by enabling automatic prompt optimization driven by LLMs. This approach facilitates more generalized and automated refinement of input prompts across a range of tasks, as opposed to focusing solely on refining output results. However, without oracle feedback, RLLM's self-refinement process fails, causing instability in both intermediate and final answers, leading to biases in simple factual queries and introducing cognitive biases in complex tasks [1051, 908]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 164, + 277, + 175 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 164, + 277, + 175 + ], + "spans": [ + { + "bbox": [ + 105, + 164, + 277, + 175 + ], + "type": "text", + "content": "5.2.2 SFT-based Refinement Imitation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 184, + 506, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 184, + 506, + 371 + ], + "spans": [ + { + "bbox": [ + 104, + 184, + 506, + 371 + ], + "type": "text", + "content": "Recent advancements in reflection-based reasoning for LLMs have led to frameworks that enhance model reasoning through self-refinement and error correction. A key approach is directly supervised fine-tuning, which allows models to learn error correction processes from advanced LLMs, thereby improving their reflective capabilities [14, 104, 406, 822, 99, 873]. Notable frameworks, such as rStar [615], improve smaller language models through self-play mutual reasoning, while Recursive Introduction [627] and RealCritic [714] use iterative feedback mechanisms to identify and correct errors to better self-improve [393]. Yan et al. [924] propose constructing step-wise self-correction data and implementing a training strategy that uses the above-constructed data to equip LLMs with spontaneous step-level self-correction capacities. Building upon these, Gao et al. [196] and Zhang et al. [1027] propose Math-Minos, which employs step-by-step natural language feedback as rationale tags, offering both correctness and detailed explanations for each step to train feedback mechanisms that justify and refine the reasoning process. Journey Learning [623] employs MCTS to parse node backtracking as natural language refinement, enhancing supervised fine-tuning and, thereby, improving reasoning performance. Additionally, approaches like ProgCo [682] emphasize iterative feedback and program-driven refinement to enhance critique and self-correction. Expanding these ideas to multimodal settings, frameworks, such as R3V [120] and MM-Verify [697], focus on integrating visual and textual reasoning [519, 813]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 385, + 272, + 397 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 385, + 272, + 397 + ], + "spans": [ + { + "bbox": [ + 105, + 385, + 272, + 397 + ], + "type": "text", + "content": "5.2.3 RL-based Refinement Learning" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 406, + 506, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 406, + 506, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 406, + 506, + 581 + ], + "type": "text", + "content": "In recent research, several approaches have been proposed to enhance the performance of refinement through reinforcement learning [673, 1056]. Earlier, Kumar et al. [358] observed that SFT of RLLMs often fails to promote self-refinement behaviors. This limitation stems from a distributional mismatch between data collection strategies and model responses, as well as the risk of behavioral collapse. To address this, SCoRe [358] enhances self-refinement by training the model on its own self-generated correction trajectories and employing regularization to guide the learning process. This method prioritizes fostering self-refinement during testing, rather than merely maximizing reward for specific prompts [1018]. Further, Guo et al. [227] demonstrate that applying outcome-level rewarded RL can trigger an \"Aha moment,\" activating the model's natural feedback and refinement behaviors without the need for human guidance. Moreover, Guo et al. [227], Zeng et al. [1017] and Ma et al. [529] explore initializing LLMs with iterative self-verification and self-correction behaviors, which are strengthened through supervised fine-tuning and further enhanced by outcome-level RL. Ma et al. [529] and Yang et al. [935] extend these capabilities with process-level RL, minimizing resource usage while enabling adaptive reasoning refinements during inference. More recently, Lee et al. [374] introduce an intrinsic verifier module to decide when refinements should be applied, using RL to further encourage self-refinement when errors are detected." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 125, + 592, + 223, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 592, + 223, + 604 + ], + "spans": [ + { + "bbox": [ + 125, + 592, + 223, + 604 + ], + "type": "text", + "content": "Takeaways: Refinement" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 123, + 609, + 489, + 715 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 123, + 609, + 488, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 609, + 488, + 643 + ], + "spans": [ + { + "bbox": [ + 123, + 609, + 488, + 643 + ], + "type": "text", + "content": "- Prompt-Based Refinement for Iterative Improvement: Iterative self-refinement through feedback loops helps LLMs improve reasoning and reduce errors like hallucinations but requires stable feedback to maintain accuracy." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 123, + 646, + 488, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 646, + 488, + 679 + ], + "spans": [ + { + "bbox": [ + 123, + 646, + 488, + 679 + ], + "type": "text", + "content": "- Supervised Fine-Tuning (SFT) for Error Correction: Supervised fine-tuning enhances LLMs by using iterative feedback and self-correction strategies to improve reasoning accuracy, especially for smaller models." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 123, + 681, + 489, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 681, + 489, + 715 + ], + "spans": [ + { + "bbox": [ + 123, + 681, + 489, + 715 + ], + "type": "text", + "content": "- Reinforcement Learning (RL) for Refinement: Reinforcement learning enhances self-refinement in LLMs by using self-generated corrections and adaptive strategies, reducing human intervention and resource consumption." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "spans": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "text", + "content": "REASONING GROUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 72, + 293, + 159 + ], + "blocks": [ + { + "bbox": [ + 111, + 72, + 293, + 159 + ], + "lines": [ + { + "bbox": [ + 111, + 72, + 293, + 159 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 293, + 159 + ], + "type": "image", + "image_path": "ae0384cb2d35989e0913fcc05ec7fe401f4d3acdd492815afce7dcdd64d2789c.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 168, + 506, + 212 + ], + "lines": [ + { + "bbox": [ + 104, + 168, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 104, + 168, + 506, + 212 + ], + "type": "text", + "content": "Figure 9: Schematic representations of two common inference-time scaling strategies: (a) sequential scaling, which extends the length of Long CoT but is constrained by the reasoning boundaries of RLLMs; and (b) parallel scaling, which increases the sample size and aggregates multiple outcomes, yet does not surpass the performance of Pass@k." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 296, + 72, + 498, + 159 + ], + "blocks": [ + { + "bbox": [ + 296, + 72, + 498, + 159 + ], + "lines": [ + { + "bbox": [ + 296, + 72, + 498, + 159 + ], + "spans": [ + { + "bbox": [ + 296, + 72, + 498, + 159 + ], + "type": "image", + "image_path": "651c3a02f7c05e2fa7e8a9730a03db50638cef9382a4885f455c35d277bec9cc.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 237, + 313, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 237, + 313, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 237, + 313, + 251 + ], + "type": "text", + "content": "6 Extensive Exploration for Long CoT" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 265, + 504, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 265, + 504, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 265, + 504, + 310 + ], + "type": "text", + "content": "Exploration is a key capability in Long CoT reasoning, allowing models to navigate complex problem spaces through strategic branching and iterative refinement [1019, 381, 784, 751]. Recent studies emphasize exploration mechanisms, such as hypothesis branching and error backtracking via reflection, as essential for overcoming the constraints of linear reasoning paths [227]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 314, + 506, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 314, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 506, + 392 + ], + "type": "text", + "content": "Current research focuses on three key areas: (1) Exploration Scaling (§ 6.1), which explores the breadth and depth of exploration and its impact on downstream applications, particularly in improving the size of the exploration path " + }, + { + "bbox": [ + 104, + 314, + 506, + 392 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 314, + 506, + 392 + ], + "type": "text", + "content": " in Equation (3); (2) Internal Exploration (§ 6.2), which focuses on training models to develop internal exploration capabilities, enabling more efficient and effective generation of " + }, + { + "bbox": [ + 104, + 314, + 506, + 392 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 314, + 506, + 392 + ], + "type": "text", + "content": " exploration paths " + }, + { + "bbox": [ + 104, + 314, + 506, + 392 + ], + "type": "inline_equation", + "content": "\\{n_{i+j}\\}_{j=1}^{m}" + }, + { + "bbox": [ + 104, + 314, + 506, + 392 + ], + "type": "text", + "content": " in Equation (3); and (3) External Exploration (§ 6.3), which examines how models can leverage external systems to enhance their exploratory abilities, facilitating the selection of the most effective path " + }, + { + "bbox": [ + 104, + 314, + 506, + 392 + ], + "type": "inline_equation", + "content": "n_{i+j}" + }, + { + "bbox": [ + 104, + 314, + 506, + 392 + ], + "type": "text", + "content": " from the " + }, + { + "bbox": [ + 104, + 314, + 506, + 392 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 314, + 506, + 392 + ], + "type": "text", + "content": " exploration paths in Equation (3)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 409, + 216, + 422 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 409, + 216, + 422 + ], + "spans": [ + { + "bbox": [ + 105, + 409, + 216, + 422 + ], + "type": "text", + "content": "6.1 Exploration Scaling" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 431, + 506, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 431, + 506, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 431, + 506, + 498 + ], + "type": "text", + "content": "Recent advances in inference-time scaling algorithms [333, 843, 57, 1053, 112] have attracted significant interest, particularly in scaling reasoning length to improve performance [524, 568, 405, 779]. Following Chen et al. [93], as shown in Figure 9, exploration scaling can be understood through two paradigms: (1) sequential scaling, akin to a series of resistors, which connects multiple reasoning processes using reflection; and parallel scaling, similar to parallel resistors, where a unified verification/feedback mechanism selects the most effective reasoning processes." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 515, + 217, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 515, + 217, + 528 + ], + "spans": [ + { + "bbox": [ + 104, + 515, + 217, + 528 + ], + "type": "text", + "content": "6.1.1 Sequential Scaling" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 536, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 506, + 723 + ], + "type": "text", + "content": "Sequential scaling refers to extending the reasoning output within a single model generation, significantly boosting model performance [383, 1052, 348]. Early works by Fu et al. [189] and Jaech et al. [307] show that increasing the length of the reasoning path can greatly improve performance. Tian et al. [736] enhances model reasoning iteratively by using prior answers as prompts for each successive round, thus enabling sequential scaling of the reasoning process. Building on this, later studies [314, 391] further explore enhancing logical depth through tree-based searches within a fixed compute budget, resulting in notable performance gains [11, 614]. Building upon this, Muennighoff et al. [560] introduce a inference-time scaling method that improves reasoning by fine-tuning and budget forcing, yielding substantial gains with additional computing at inference time. To address the constraints of attention spans, some studies focus on expanding reasoning length in latent spaces. Geiping et al. [204] and Chen et al. [109] enhance inference-time reasoning performance by implicitly scaling computation in latent space through recurrent depth. Setlur et al. [653] identified three core aspects of sequential scaling: (1) linking skills to asymmetric capabilities in base LLMs, such as connecting easy verification with difficult exploration; (2) enhancing exploration in reinforcement learning by utilizing the \"negative\" gradient of error trajectories, which extends search paths and links additional asymmetries; and (3) creating dynamic exploration by aligning task difficulty with training token budgets through tailored curricula." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 188, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 188, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 188, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "spans": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "text", + "content": "REASONING GROUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 205, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 205, + 84 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 205, + 84 + ], + "type": "text", + "content": "6.1.2 Parallel Scaling" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 91, + 506, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 91, + 506, + 147 + ], + "spans": [ + { + "bbox": [ + 104, + 91, + 506, + 147 + ], + "type": "text", + "content": "Parallel scaling refers to the process of increasing the number of reasoning iterations during model generation and then verify these results to get the final output, which significantly enhances model performance [2, 864, 57, 485, 59, 1139]. Initially, Wang et al. [816] introduce the concept of self-consistency, demonstrating that multiple sampling processes followed by majority voting for effective exploration." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 156, + 506, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 156, + 506, + 410 + ], + "spans": [ + { + "bbox": [ + 104, + 156, + 506, + 410 + ], + "type": "text", + "content": "Verification Optimization The primary focus of recent research is optimizing verification, which can be categorized into two types: (1) Overall Verification: Recent works [1120, 831] divide the scaling process into two stages: \"reasoning\" and \"self-verification.\" By replacing majority voting in self-consistency with self-verification, these approaches show significant improvements [1083, 81, 1149, 364, 426]. In code scenarios, WoT [1071], CISC [716] and S* [392] scale the Long CoT in parallel, using output confidence or code execution results for verification, effectively assessing reasoning quality [635, 203, 278, 1134]. Further, Nye et al. [569] and Weir et al. [842], Stoisser et al. [690] train RLLMs to simulate code execution, removing the need for test cases in code-related parallel scaling. Chain-of-Verification [93] introduces meta-verification, sampling multiple verification instances to identify the correct one. Kim et al. [351], Chen et al. [111], and Vacareanu et al. [750] validate this approach empirically by evaluating answer correctness based on reasoning path properties. Moreover, Li et al. [421] tune a specific RLLM to verify and aggregate answers, showing improved performance. This suggests that PRM cannot replace a specially trained RLLM for verification due to training goal biases [1078]. Finally, Kang et al. [341] leverage self-uncertainty to select the best results. (2) Step Verification: Building on this, numerous researchers have explored step-level or finer-grained verification [84, 460]. Notably, DIVERSE [425], SSC-CoT [1098], and Fine-grained Self-Consistency [93] combine diverse reasoning paths with step-level verification. In addition, a series of works [676, 864, 517, 770, 853, 486] try to investigate how optimal scaling strategies based on MCTS can enhance smaller language models' performance. Their findings show that a 1B RLLM can outperform a 405B model on complex tasks through parallel scaling [988]. Despite these advancements in verification, Chen et al. [93] demonstrate that these strategies cannot surpass Best-of-N methods, suggesting that breakthroughs cannot solely rely on optimization-based verification [106]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 418, + 506, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 418, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 418, + 506, + 540 + ], + "type": "text", + "content": "Sampling Optimization Another key area of research focuses on generating diverse but less paths or strategies for efficient scaling [871, 765, 80, 668, 444, 681]. For instance, Zeng et al. [1020] aggregate the shortest yet most varied reasoning paths for better scalability. Similarly, Du et al. [164] adjust the sampling temperature to increase diversity, leading to improved scaling. Zhang et al. [1045] and Liu et al. [470] optimize both candidate solution generation (e.g., prompts, temperature, and top-p) and reward mechanisms (such as self-evaluation and reward types), offering diverse strategies for parallel scaling. Moreover, Qin et al. [617], Luo et al. [520], and Yu et al. [990] enhance RLLM reasoning by scaling sampling across multiple natural and programming languages or varied expressions. Finally, Yang et al. [943] introduces a method where a small set of seed data, with varied response lengths, guides the model to engage in deeper reasoning by selecting the shortest correct responses across various inference efforts." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 125, + 548, + 257, + 560 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 548, + 257, + 560 + ], + "spans": [ + { + "bbox": [ + 125, + 548, + 257, + 560 + ], + "type": "text", + "content": "Takeaways: Exploration Scaling" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 123, + 566, + 488, + 660 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 123, + 566, + 488, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 566, + 488, + 599 + ], + "spans": [ + { + "bbox": [ + 123, + 566, + 488, + 599 + ], + "type": "text", + "content": "- Exploration Mechanisms in Long CoT Reasoning: Exploration strategies like hypothesis branching and error backtracking are vital for overcoming limitations in linear reasoning paths and enhancing model performance." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 123, + 601, + 488, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 601, + 488, + 624 + ], + "spans": [ + { + "bbox": [ + 123, + 601, + 488, + 624 + ], + "type": "text", + "content": "- Scaling Exploration: Exploration can be scaled through sequential and parallel strategies to improve reasoning depth and efficiency." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 123, + 627, + 488, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 627, + 488, + 660 + ], + "spans": [ + { + "bbox": [ + 123, + 627, + 488, + 660 + ], + "type": "text", + "content": "- Verification and Sampling Optimization: Refining verification techniques and optimizing sampling for diverse reasoning paths are key to improving exploration efficiency and performance in Long CoT tasks." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 681, + 219, + 694 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 681, + 219, + 694 + ], + "spans": [ + { + "bbox": [ + 105, + 681, + 219, + 694 + ], + "type": "text", + "content": "6.2 Internal Exploration" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "content": "As noted in Chu et al. [137], Shen et al. [661], and Yang et al. [938], SFT serves as a memory process, while RL enhances generalization [359, 82]. Specifically, SFT stabilizes the model's output format," + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "spans": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "text", + "content": "REASONING GROUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 71, + 499, + 161 + ], + "blocks": [ + { + "bbox": [ + 111, + 71, + 499, + 161 + ], + "lines": [ + { + "bbox": [ + 111, + 71, + 499, + 161 + ], + "spans": [ + { + "bbox": [ + 111, + 71, + 499, + 161 + ], + "type": "image", + "image_path": "55a2cddee6720d6d5b6d79848689909b6e03f9c8563319f2fff7f35746a40240.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 112, + 169, + 493, + 244 + ], + "blocks": [ + { + "bbox": [ + 112, + 169, + 493, + 244 + ], + "lines": [ + { + "bbox": [ + 112, + 169, + 493, + 244 + ], + "spans": [ + { + "bbox": [ + 112, + 169, + 493, + 244 + ], + "type": "image", + "image_path": "302920f94ae85e94ce64fd964759f21a7a4160de1d28055d6f3573f758563039.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 255, + 506, + 289 + ], + "lines": [ + { + "bbox": [ + 104, + 255, + 506, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 506, + 289 + ], + "type": "text", + "content": "Figure 10: Two primary approaches for optimizing Internal Exploration: improving RL strategy through reference and value models, and designing reward strategies: either rule-based or model-based rewarding to enhance RL performance." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 312, + 504, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 312, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 504, + 357 + ], + "type": "text", + "content": "whereas RL improves its generalization capacity, which can increase learning efficiency by up to eight times in tasks such as mathematical reasoning [650]. Consequently, as shown in Figure 10, leading research emphasizes the role of RL and reward strategies in enhancing the exploration capabilities of LLMs without external assistance. The performance comparison is presented in Table 5." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 366, + 196, + 378 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 366, + 196, + 378 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 196, + 378 + ], + "type": "text", + "content": "6.2.1 RL Strategies" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 383, + 506, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 383, + 506, + 416 + ], + "spans": [ + { + "bbox": [ + 104, + 383, + 506, + 416 + ], + "type": "text", + "content": "Recent advancements in RL strategies for exploration have led to notable improvements in various tasks, particularly in reasoning tasks [699, 369, 313, 542, 882, 1017, 985, 268, 1010, 628, 150, 176, 686]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 422, + 506, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 506, + 597 + ], + "type": "text", + "content": "(1) Reward-free RL: The first series of work focuses on RL optimization algorithms. Additionally, OREO [773] propose an offline RL method that optimizes the soft Bellman equation, improving credit assignment for multi-step reasoning tasks and outperforming existing approaches in fields like mathematics and agent control. Liu et al. [476] propose Direct Advantage Policy Optimization, a novel offline RL method that leverages a separately trained critic to evaluate the accuracy of each reasoning step. This technique provides dense feedback for policy optimization, addressing both sparse rewards and training instability. Further, some research focuses on adjusting the focus of RL algorithms to optimize exploration in targeted aspects. Specifically, CPL [801], cDPO [457], and Focused-DPO [1043] enhance exploration in Long CoT by prioritizing critical or error-prone areas through preference optimization, improving accuracy in those regions. Bartoldson et al. [42] further adjusts the replay strategy of the training data, aiming to optimize reasoning performance. Li et al. [420] introduce Learning Impact Measurement (LIM), an automated method for evaluating and prioritizing training samples based on their alignment with model learning trajectories. This approach enables efficient resource use and scalable implementation. For instance, ThinkPO [942] uses short CoT reasoning outputs as rejected answers and longer ones as chosen answers for the same question, applying DPO to encourage prioritization of longer reasoning outputs [1131]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 601, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 601, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 601, + 506, + 723 + ], + "type": "text", + "content": "(2) Reward-based RL: Reward-model-based RL refers to approaches that use a reward model or a verifier to guide learning and decision-making in the absence of explicit rewards [1046, 174, 649, 279, 825, 847, 970]. Earlier, Proximal Policy Optimization (PPO) was first introduced by Schulman et al. [648], which alternates between interacting with the environment to collect data and optimizing a surrogate objective function via stochastic gradient ascent, surpassing DPO [306]. Subsequently, ReMax [436] eliminates the need for additional value models in PPOs. By incorporating variance reduction and REINFORCE [704] techniques, it reduces over four hyperparameters, resulting in lower GPU memory usage and faster training. Building on this, DeepSeekMath [658] proposes Group Relative Policy Optimization (GRPO), replacing traditional value models with improved sampling strategies, thus significantly accelerating learning and achieving performance on par with GPT-4 in mathematics. Hu [265] and Liu et al. [499] further refine GRPO with REINFORCE++ and Dr. GRPO," + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 188, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 188, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 188, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 52 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 52 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 52 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 52, + 187, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 52, + 187, + 55 + ], + "spans": [ + { + "bbox": [ + 141, + 52, + 187, + 55 + ], + "type": "text", + "content": "REASONING GROUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 111, + 70, + 500, + 373 + ], + "blocks": [ + { + "bbox": [ + 111, + 70, + 500, + 373 + ], + "lines": [ + { + "bbox": [ + 111, + 70, + 500, + 373 + ], + "spans": [ + { + "bbox": [ + 111, + 70, + 500, + 373 + ], + "type": "table", + "html": "
MethodBackboneGSM8KAIME 2024MATH 500GPQALiveCodeBench
Base Model
GPT-4o [3]-92.99.376.653.633.4
Llama-3.1-70B-Instruct [168]-94.113.368.0--
Claude 3.5 Sonnet [19]--16.078.365.038.9
Qwen2.5-Coder-32B-Instruct [301]--20.071.233.825.0
Qwen2.5-70B-Instruct [926]--20.079.449.033.0
Llama-3.3-70B-Instruct [168]--36.773.950.534.8
DeepSeek-V3 [463]--39.290.2-36.2
SFT Strategies
DeepSeek-R1-Distill-Llama-70B [227]--70.0--57.9
DeepSeek-R1-Distill-Qwen-32B [227]--72.6--54.6
START [388]QwQ-32B-preview [731]-66.794.463.647.3
RL Strategies
DPO [631]DeepSeekMath 7B [658]82.4----
KTO [171]DeepSeekMath 7B [658]82.5----
OREO [773]DeepSeekMath 7B [658]86.9----
PPO [648]GLM4-9B-SFT [211]85.5--31.524.3
GRPO [658]GLM4-9B-SFT [211]86.1--31.722.8
Eurus-2-7B-PRIME [143]Qwen2.5-Math-7B-Base [927]-26.779.2--
Search-o1 [418]QwQ-32B-preview [731]-56.786.463.633.0
Reward Strategies
OpenMath2 [739]Llama-3.1-70B [168]94.113.371.8--
Satori [661]Qwen-2.5-Math-7B93.923.383.6--
T1-SFT [264]Qwen2.5-32B [926]-24.983.449.5-
T1 [264]Qwen2.5-32B [926]-50.692.456.1-
DeepSeek-R1-lite [227]--52.591.658.551.6
rStar-Math [222]Qwen2.5-Math-7B [927]95.253.390.0--
QwQ-32B-preview [731]-95.553.390.658.240.6
ol-preview [307]--56.785.573.353.6
o3-mini-low [307]--60.0--61.8
ol-mini [307]--63.690.0-53.8
Kimi k1.5 [722]--77.596.2-62.5
QwQ-32B [731]--79.5--73.1
o3-mini-medium [307]--79.6--72.3
DeepSeek-R1 [227]--79.897.3-71.6
o1 [307]--83.396.4-67.4
o3-mini-high [307]--87.3--84.6
", + "image_path": "8906843300658ba9f577713856804416059d5d5e72ce14c0c9304e8987c15cd2.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 377, + 504, + 401 + ], + "lines": [ + { + "bbox": [ + 104, + 377, + 504, + 401 + ], + "spans": [ + { + "bbox": [ + 104, + 377, + 504, + 401 + ], + "type": "text", + "content": "Table 5: Performance of various internal exploration methods on different benchmarks, primarily ordered by AIME 2024. “-” indicates that the paper did not report this score." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 464, + 506, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 506, + 564 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 506, + 564 + ], + "type": "text", + "content": "respectively, simplifying the algorithm and enhancing its training. Additionally, Vassoyan et al. [752] and [1121] improve exploration efficiency in smaller models by modifying the KL penalty, thus enhancing performance under distribution shifts. Huang et al. [277] introduce Decoupled Value Policy Optimization (DVPO), a streamlined framework that replaces reward modeling with a pretrained global value model (GVM) and eliminates the interdependence between actor and critic. To address the high-quality demands of reward models, Cui et al. [143] propose PRIME (Process Reinforcement through IMplicit rEwards), which integrates the SFT model as a PRM within a unified reinforcement learning framework, enabling online updates through policy rollouts and outcome labels via implicit process rewards." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 568, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 568, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 568, + 506, + 723 + ], + "type": "text", + "content": "More recently, Liang et al. [439] introduce Self-aware Weakness-driven Problem Synthesis, a reinforcement-learning method that generates challenges tailored to an RLLM's specific weaknesses [863, 183]. By concentrating training on its most difficult aspects, the model achieves more focused and effective reasoning improvements [680]. Wang et al. [805] introduce ROLL, a method designed to support R1-level large-scale training of RLLMs, enabling the efficient exploration and optimization of reasoning paths within the Mixture-of-Experts (MOE) structure [788]. Fu et al. [188] introduce AReaL, a large-scale asynchronous reinforcement learning system for language reasoning, which enhances the efficiency and effectiveness of training RLLMs. Ma et al. [526] propose a novel method combining interleaved SFT and RL to address challenging questions where RL typically fails. This approach enables RLLMs to learn from mistakes and enhance reasoning abilities. Huang et al. [297] and Fu et al. [190] further improve exploration efficiency by integrating SFT and RL with prefix sampling. Frurthermore, Yan et al. [917] and Liang et al. [437] guide RLLMs in reasoning under off-policy reinforcement learning [413, 773], improving both training sample efficiency and learning stability [559]." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "spans": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "text", + "content": "REASONING GROUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 217, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 217, + 84 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 217, + 84 + ], + "type": "text", + "content": "6.2.2 Reward Strategies" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 92, + 506, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 92, + 506, + 345 + ], + "spans": [ + { + "bbox": [ + 104, + 92, + 506, + 345 + ], + "type": "text", + "content": "Rule-rewarded RL The studies explore advancements in training advanced RLLMs using rule-rewarded RL to enhance exploration strategies and reasoning accuracy [296]. These efforts primarily focus on three types of rewards: (1) Correctness Rewarding: Correctness rewards are fundamental for guiding RLLMs toward accurate answers. Specifically, Singh et al. [674] introduce a binary reward system (positive or negative) to facilitate exploration, achieving simple yet effective performance improvements. Similarly, the DeepSeek-R1 [227] employs rule-extracted accuracy as an RL reward, scaling this approach to larger scenarios and training sizes, thereby enhancing both exploration and reasoning tasks [522, 170]. Furthermore, O1-CoderZhang et al. [1076], StepCoder [161], and SWE-RL [841] address challenges in code generation by developing a test case generator, which standardizes code testing, ensuring accurate generation [893, 994]. (2) Format Rewarding: Further, format rewards are used to encourage better reasoning paradigms. Guo et al. [227] introduce this concept to effectively guide reasoning and exploration [886]. Xie et al. [886] expanded on this with a three-stage, rule-based RL approach, enabling the Qwen-7B model to learn complex multi-path exploration, which significantly improved both output format and corresponding length consistency. Additionally, Wu et al. [855] propose TAPO (Thought-Augmented Policy Optimization), a framework that integrates external high-level guidance (\"thought patterns\") into RL, successfully balancing model exploration with external guidance. (3) Scaling rewarding: Moreover, scaling rewards are applied to promote longer reasoning chains and broader exploration. Recent studies [90, 583, 349] highlight the need for progressively scaled reasoning lengths to overcome the limitations of current reasoning approaches. As a result, research has focused on scaling exploration [886, 962]. However, excessive scaling can lead to inefficiency and overcomplicated reasoning [142]. Kimi-K1.5 [722], Yang et al. [943] and Arora and Zanette [22] proposed Long2Short techniques, favoring shorter, more accurate reasoning may also significantly improve efficiency and performance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 360, + 506, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 360, + 506, + 482 + ], + "spans": [ + { + "bbox": [ + 104, + 360, + 506, + 482 + ], + "type": "text", + "content": "Model-rewarded RL It refers to a class of techniques in which RL algorithms are enhanced by leveraging additional reward models, to guide exploration and improve decision-making processes [693]. Earlier in 2021, OpenAI [141] propose a \"Gen-Verifier\" paradigm to train a correctness-oriented ORM and used ORM-rewarded RL to surpass SFT performance. Recently, with rapid advancements in PRM, several studies [755, 1032, 518] have scaled reinforcement learning by enhancing exploration through step-level correctness rewarding [659, 1042]. Building on this, Hou et al. [264] introduce entropy rewards and dynamic regularization to further optimize the reasoning process [116]. STeCa [768] identifies suboptimal actions during exploration by comparing step-level rewards and adjusting trajectories to improve deep reasoning. Additionally, the Kimi-K1.5 model [722] extends PRM paradigms into multimodal scenarios, achieving state-of-the-art performance in multi-modal reasoning tasks through a streamlined reinforcement learning framework." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 125, + 494, + 257, + 505 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 494, + 257, + 505 + ], + "spans": [ + { + "bbox": [ + 125, + 494, + 257, + 505 + ], + "type": "text", + "content": "Takeaways: Internal Exploration" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 123, + 510, + 490, + 605 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 123, + 510, + 490, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 510, + 490, + 544 + ], + "spans": [ + { + "bbox": [ + 123, + 510, + 490, + 544 + ], + "type": "text", + "content": "- SFT and RL Synergy: The combination of Self-Feedback Training (SFT) and Reinforcement Learning (RL) improves model output stability and generalization, enhancing learning efficiency in reasoning tasks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 123, + 547, + 490, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 547, + 490, + 581 + ], + "spans": [ + { + "bbox": [ + 123, + 547, + 490, + 581 + ], + "type": "text", + "content": "- Advancements in RL Exploration: Recent RL strategies, including reward-model-free and reward-model-based approaches, optimize exploration and reasoning, improving efficiency in tasks like multi-step reasoning." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 123, + 582, + 490, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 582, + 490, + 605 + ], + "spans": [ + { + "bbox": [ + 123, + 582, + 490, + 605 + ], + "type": "text", + "content": "- Reward Strategies: Correctness, format, and scaling rewards help refine exploration and reasoning accuracy by guiding models toward better performance in specific areas." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 634, + 221, + 646 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 634, + 221, + 646 + ], + "spans": [ + { + "bbox": [ + 105, + 634, + 221, + 646 + ], + "type": "text", + "content": "6.3 External Exploration" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 655, + 506, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 655, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 104, + 655, + 506, + 722 + ], + "type": "text", + "content": "The exploration of coding strategies in AI systems is advancing through innovative frameworks aimed at enhancing search efficiency and decision-making quality. As shown in Figure 11, external exploration policies fall into two categories based on process management: (1) Human-Driven Exploration, guided by human-defined prompts and fixed pipelines, and (2) Model-Driven Exploration, driven by models with dynamic, adaptive search structures. The detailed performance comparison is presented in Table 6." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "spans": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "text", + "content": "REASONING GROUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 254, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 254, + 84 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 254, + 84 + ], + "type": "text", + "content": "6.3.1 Human-driven Exploration" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 91, + 506, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 91, + 506, + 245 + ], + "spans": [ + { + "bbox": [ + 104, + 91, + 506, + 245 + ], + "type": "text", + "content": "Human-driven exploration refers to human-designed constant pipeline exploration for long-term exploration [479, 422]. Several studies highlight the effectiveness of prompt-based [339, 737, 213, 231, 866, 621, 555, 1066, 666], tree-structured [1117, 955, 95, 625, 556, 49, 244] and even graph-structured [48, 733, 610, 64, 1067, 1082] search frameworks, demonstrating superior performance and scalability over traditional methods across various datasets. Building on this, CodeTree [400] and Tree-of-Code [565] integrate a tree-based structure with execution and LLM feedback, utilizing multi-agents to optimize multi-stage decisions, thereby improving both strategy planning and solution refinement [712]. Cheng et al. [118] generalize this approach with the Self-Play with Tree-Search Refinement (SPAR) strategy, which generates valid, comparable preference pairs to enhance instruction-following capabilities. Bi et al. [54] and Light et al. [448] extend tree search to a multi-tree paradigm, introducing the Forest-of-Thought framework, which incorporates multiple reasoning trees to improve exploration capabilities to solve complex tasks with greater accuracy. Furthermore, Li et al. [388] explores the integration of Python tools into Long CoT frameworks by both prompting and training, performing test-time scaling more effectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 255, + 250, + 267 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 255, + 250, + 267 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 250, + 267 + ], + "type": "text", + "content": "6.3.2 Model-driven Exploration" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 274, + 504, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 274, + 504, + 308 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 504, + 308 + ], + "type": "text", + "content": "Building on previous research, model-feedback-assisted exploration has advanced significantly, which is driven by model and dynamic adaptive search structure, with optimization emerging as a central focus. Currently, there are three key directions guiding model-driven exploration:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 316, + 506, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 506, + 492 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 506, + 492 + ], + "type": "text", + "content": "Enhancing Exploration Logics Recent efforts have focused on improving exploration structures during iterations for better logical quality. (1) **Beam Search:** Earlier, Xie et al. [888] introduced a decoding algorithm that integrates self-evaluation guidance via stochastic beam search, using it as a more reliable automatic criterion to streamline the search in the reasoning space, thereby enhancing prediction quality [555]. Similarly, Zhu et al. [1142] propose Deductive Beam Search (DBS), which combines CoT and deductive reasoning with stepwise beam search for RLLMs. (2) " + }, + { + "bbox": [ + 104, + 316, + 506, + 492 + ], + "type": "inline_equation", + "content": "A^*" + }, + { + "bbox": [ + 104, + 316, + 506, + 492 + ], + "type": "text", + "content": " Search: On another front, Lehnert et al. [378] present Searchformer, which predicts " + }, + { + "bbox": [ + 104, + 316, + 506, + 492 + ], + "type": "inline_equation", + "content": "A^*" + }, + { + "bbox": [ + 104, + 316, + 506, + 492 + ], + "type": "text", + "content": " algorithm dynamics to improve task performance and reduce search steps [101]. Later, Kang et al. [338] introduce the MindStar (" + }, + { + "bbox": [ + 104, + 316, + 506, + 492 + ], + "type": "inline_equation", + "content": "M^*" + }, + { + "bbox": [ + 104, + 316, + 506, + 492 + ], + "type": "text", + "content": ") framework, which optimizes reasoning paths through beam search and Levin tree search methods, further enhancing reasoning performance. (3) " + }, + { + "bbox": [ + 104, + 316, + 506, + 492 + ], + "type": "inline_equation", + "content": "MCTS" + }, + { + "bbox": [ + 104, + 316, + 506, + 492 + ], + "type": "text", + "content": " Search: Building on the advantages of MCTS, a series of studies, such as Macro-o1 [1095], STILL-1 [323], SRA-MCTS [896], and RFTT [1046], utilize MCTS to guide more effective exploration [1039, 411, 335, 321, 1110, 613, 586, 452]. Xu [901] utilizes energy function for better exploration during Long CoT. Yao et al. [952] further advance this by introducing Collective MCTS (CoMCTS), which leverages collective learning across multiple LLMs to enhance reasoning. Further, MC-NEST [629] integrates Nash Equilibrium strategies to balance exploration and exploitation, improving LLM decision-making in multi-step" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 151, + 510, + 309, + 666 + ], + "blocks": [ + { + "bbox": [ + 151, + 510, + 309, + 666 + ], + "lines": [ + { + "bbox": [ + 151, + 510, + 309, + 666 + ], + "spans": [ + { + "bbox": [ + 151, + 510, + 309, + 666 + ], + "type": "image", + "image_path": "6462f102f8623b3fc4af62f2c0f413f3392b4362b8f808630fa2fdef3362d761.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 674, + 506, + 719 + ], + "lines": [ + { + "bbox": [ + 104, + 674, + 506, + 719 + ], + "spans": [ + { + "bbox": [ + 104, + 674, + 506, + 719 + ], + "type": "text", + "content": "Figure 11: External exploration policies can be classified into two categories based on the management role of the process: (1) Human-Driven Exploration, which is guided by human-defined prompts and fixed pipelines, and (2) Model-Driven Exploration, which is driven by models and employs dynamic, adaptive search structures." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 312, + 510, + 460, + 668 + ], + "blocks": [ + { + "bbox": [ + 312, + 510, + 460, + 668 + ], + "lines": [ + { + "bbox": [ + 312, + 510, + 460, + 668 + ], + "spans": [ + { + "bbox": [ + 312, + 510, + 460, + 668 + ], + "type": "image", + "image_path": "8fd520586ef8e1e9b261fefe8d9414d799cbcc475fa68617bc151b1944824f09.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 111, + 70, + 499, + 401 + ], + "blocks": [ + { + "bbox": [ + 111, + 70, + 499, + 401 + ], + "lines": [ + { + "bbox": [ + 111, + 70, + 499, + 401 + ], + "spans": [ + { + "bbox": [ + 111, + 70, + 499, + 401 + ], + "type": "table", + "html": "
MethodBackboneGSM8KMATHOlympiadBenchHumanEval+
Base Model
DeepSeekMath-7B-Instruct [658]-83.757.4--
DeepSeekMath-7B-RL [658]-88.252.419.0-
Qwen2-72B-Instruct [925]-93.269.033.2-
Llama-3.1-70B-Instruct [168]-94.165.727.7-
GPT-4 [3]-94.273.4--
Claude-3.5-Sonnet [19]-96.471.1--
GPT-4o [3]--73.440.681.7
Qwen2.5-Math-72B-Instruct [927]--83.049.7-
Human-driven Exploration
AlphaLLM [814]Llama-3-8B-Instruct [168]-32.6--
Least-to-Most-SC [1117]LLaMA-33B [742]42.5---
LLM2 [930]Llama-3-8B [168]88.048.6--
CodeTree [400]GPT-4o [3]---86.0
Model-driven Exploration
STILL-1 [323]LLama-3.1-8B-Instruct [168]--34.3-
Reflexion [669]GPT-4o [3]---84.8
MapCoder [304]GPT-4o [3]---81.7
Resample [427]GPT-4o [3]---84.8
SRA-MCTS [896]Llama-3.1-8B [168]---57.9
RAP [234]LLaMA-33B [742]51.6---
Mindstar [338]Llama-2-7B [743]68.833.9--
Mindstar [338]Mistral-7B [318]73.738.2--
TS-LLM [755]GPT-3.5-turbo74.0---
LiteSearch [757]Llama-3-8B-Instruct [168]75.7---
MARIO-34B [445]CodeLlama-34B [639]78.253.5--
ToRA-Code-34B [217]CodeLlama-34B [639]80.750.8--
MathCoder-34B [781]CodeLlama-34B [639]81.746.1--
AlphaMath [74]DeepSeekMath-7B-Base [658]83.264.0--
MathGenie-34B [513]CodeLlama-34B [639]84.155.1--
MCTS-DPO [889]Llama-3.1-8B-Instruct [168]85.7---
Intrinsic Self-CorrectLlama-3.1-8B-Instruct [168]86.1---
MCTS-IPL [321]Llama-3.1-8B-Instruct [168]86.8---
NuminaMath-72B-CoT [397]Qwen2-72B [925]90.866.732.6-
AutoRace [235]GPT-4 [3]91.0---
LLaMA-Berry [1034]Llama-3.1-8B-Instruct [168]96.175.355.1-
MCTSr [1033]Llama-3-8B-Instruct [168]96.758.2--
BoostStep [1026]Qwen2.5-Math-72B-Instruct [927]-85.252.7-
", + "image_path": "d5927788e7155f8644d57e414178a3877fe52ffa58b3baf4651f6d732f0d157f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 406, + 504, + 429 + ], + "lines": [ + { + "bbox": [ + 104, + 406, + 504, + 429 + ], + "spans": [ + { + "bbox": [ + 104, + 406, + 504, + 429 + ], + "type": "text", + "content": "Table 6: Performance of various external exploration methods on different benchmarks. “-” indicates that the paper did not report this score." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 450, + 504, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 450, + 504, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 450, + 504, + 529 + ], + "type": "text", + "content": "mathematical tasks [940, 1088]. Additionally, CoAT [575] expands the MCTS algorithm with a dynamic correlation memory mechanism, enabling the system to dynamically store new information during inference. Despite MCTS's benefits, it is often hindered by a large action space and inefficient search strategies, which complicate the generation of Long CoTs. To address this, Lin et al. [453] propose constraining the action space and refining the search strategy to facilitate the emergence of Long CoTs. Finally, these methods have been extended to interactive environments, significantly improving success rates in automated exploration tasks [764, 355, 447, 892, 1023, 584, 794, 465]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 536, + 506, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 506, + 669 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 506, + 669 + ], + "type": "text", + "content": "Exploration-Path Feedback Another approach aims to enhance reward models, refining both reasoning exploration and output quality. Liu et al. [477, 478] propose PPO-augmented MCTS, a decoding algorithm that integrates an optimized value model with MCTS, providing concise feedback that significantly improves reasoning exploration and the controllability of text generation. Similarly, Zhang et al. [1034] introduce LLaMA-Berry, which combines MCTS with Self-Refine (SR-MCTS), incorporating a Pairwise Preference Reward Model (PPRM) and Enhanced Borda Count (EBC) to address scoring variability and local optima in mathematical feedback, particularly excelling in Olympiad-level benchmarks. Further refining this, Xiang et al. [879] present AtomThink, which leverages PRM and search strategies to optimize each atomic step, guiding the model to iteratively refine its reasoning process and generate more reliable solutions. Puri et al. [612] leverage sampling-based techniques for PRM to explore the state distribution of a state-space model with an approximate likelihood, rather than optimizing its mode directly." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "text", + "content": "Unified Improvements The final direction merges advances in exploration strategies and path feedback. Specifically, Guan et al. [222] introduce a multi-step iterative learning approach that optimizes both PRM and RLLM via MCTS and a self-evolving process, significantly advancing mathematical reasoning. Similarly, Lee et al. [377] and Kim et al. [347] propose a paradigm" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "spans": [ + { + "bbox": [ + 141, + 51, + 187, + 55 + ], + "type": "text", + "content": "REASONING GROUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 128 + ], + "type": "text", + "content": "that enhances deep reasoning, exploration, and response refinement, further improving RLLM performance. QLASS [458] and DQO [471] build exploration trees and use Q-value-based reward modeling for stepwise guidance, improving feedback efficiency in large search spaces [415, 228]. Zeng et al. [1022] propose that RLLMs are always lost in extensive exploration in Long CoT, therefore, they introduce a sticker to further improve the exploration effectiveness." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 125, + 133, + 261, + 144 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 133, + 261, + 144 + ], + "spans": [ + { + "bbox": [ + 125, + 133, + 261, + 144 + ], + "type": "text", + "content": "Takeaways: External Exploration" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 123, + 152, + 489, + 235 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 123, + 152, + 489, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 152, + 489, + 185 + ], + "spans": [ + { + "bbox": [ + 123, + 152, + 489, + 185 + ], + "type": "text", + "content": "- Human-driven Exploration: Recent research highlights the effectiveness of tree-structured, graph-based, and prompt-based search frameworks, improving scalability and task-solving accuracy through multi-agent feedback." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 123, + 187, + 488, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 187, + 488, + 210 + ], + "spans": [ + { + "bbox": [ + 123, + 187, + 488, + 210 + ], + "type": "text", + "content": "- Model-driven Exploration: Exploration strategies like Beam Search, A* Search, and MCTS, along with their advancements, enhance reasoning paths and search efficiency." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 123, + 212, + 488, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 212, + 488, + 235 + ], + "spans": [ + { + "bbox": [ + 123, + 212, + 488, + 235 + ], + "type": "text", + "content": "- Unified Improvements and Path Feedback: Integrating exploration strategies with feedback models, optimizes reasoning exploration and output reliability." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 258, + 225, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 258, + 225, + 272 + ], + "spans": [ + { + "bbox": [ + 105, + 258, + 225, + 272 + ], + "type": "text", + "content": "7 Training Resources" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 284, + 284, + 296 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 284, + 284, + 296 + ], + "spans": [ + { + "bbox": [ + 105, + 284, + 284, + 296 + ], + "type": "text", + "content": "7.1 Open-Sourced Training Framework" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 304, + 506, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 304, + 506, + 500 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 506, + 500 + ], + "type": "text", + "content": "A range of open-source training frameworks has equipped researchers and developers with tools to optimize training and enhance inference. Each framework is built on distinct design principles and features. Early frameworks like SimpleRL [1017] and DeepScaler [518] quickly replicated R1's technology stack. Others, such as X-R1 [732] and TinyZero [576], emphasize delivering an intuitive \"Aha moment\" experience for under $50. Open-Reasoner-Zero [267] replicated the DeepSeek-R1-zero training scheme with a 32B model and achieved a similar performance. Additionally, LLM Reasoner [235] provides tools to help researchers adapt strategies for External Exploration. Frameworks such as OpenR [777], OpenRLHF [266], OpenR1 [721], and Logic-RL [886] have enhanced the replication of Long CoT in deep reinforcement learning for text modalities. Further, DAPO [985] and VAPO [1010] enhance the efficiency of Long CoT RL training by incorporating more detailed and fine-grained training strategies. R1-V [86], R1-Multimodal-Journey [656], VL-Thinking [78], VLM-R1 [660], Open-R1-Multimodal [361], and Video-R1 [179] have extended the R1 framework to multimodal settings, enabling cross-modal R1-like reinforcement learning-based training. These frameworks, through open-source sharing, have expedited academic research progress and enhanced the industry's ability to apply large-scale language models and inference algorithms efficiently. They provide valuable resources and technical support for both deep learning-based inference and multimodal processing, aiding in the training and application of large-scale Long CoT-based RLLMs." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 515, + 254, + 527 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 515, + 254, + 527 + ], + "spans": [ + { + "bbox": [ + 105, + 515, + 254, + 527 + ], + "type": "text", + "content": "7.2 Open-Sourced Training Data" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 535, + 506, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 535, + 506, + 657 + ], + "spans": [ + { + "bbox": [ + 104, + 535, + 506, + 657 + ], + "type": "text", + "content": "To facilitate better Long CoT implementation in the community, we have gathered a comprehensive collection of commonly available open-source training datasets. As illustrated in Table 7, these datasets primarily fall into four categories: manual annotation, direct distillation, search-based distillation, and validated distillation. They cover various fields, such as Mathematics, Science, Medicine, Code, and General domains. Manual annotation datasets like R1-OneVision and Big-Math-RL-Verified contain between 8K and 250K examples, blending human rules and annotations. Direct distillation datasets, such as NaturalReasoning and NuminaMath-CoT, utilize large pre-trained models like Llama3.3-70B and GPT-4o, providing millions of examples, mainly in language. Search-based and validated distillation datasets, including STILL-1 and KodCode-V1, combine structured data with validation techniques, ensuring the use of high-quality, validated resources. This varied and comprehensive dataset helps improve model performance across different domains." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 666, + 275, + 679 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 666, + 275, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 666, + 275, + 679 + ], + "type": "text", + "content": "8 Frontiers & Future Direction" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 689, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 506, + 723 + ], + "type": "text", + "content": "As shown in Figure 12, six key frontiers and future directions for Long CoT are as follows: (1) Multimodal Long CoT, integrating diverse input-output modalities; (2) Multilingual Long CoT, supporting cross-lingual applications; (3) Agentic & Embodied Long CoT, enhancing real-world" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 51 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 143, + 50, + 184, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 50, + 184, + 56 + ], + "spans": [ + { + "bbox": [ + 143, + 50, + 184, + 56 + ], + "type": "text", + "content": "REASONING GROUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 69, + 504, + 538 + ], + "blocks": [ + { + "bbox": [ + 106, + 69, + 504, + 538 + ], + "lines": [ + { + "bbox": [ + 106, + 69, + 504, + 538 + ], + "spans": [ + { + "bbox": [ + 106, + 69, + 504, + 538 + ], + "type": "table", + "html": "
NameCategorySourceModalityQuantity
Manual Annotated
R1-OneVision [718]Mathematics, ScienceRuleVision + Lang119K
M3CoT [91]Mathematics, Science, GeneralHumanVision + Lang11K
Big-Math-RL-Verified [10]MathematicsHumanLang251K
GSM8K [141]MathematicsHumanLang8K
LiveCodeBench (History) [309]CodeHumanLang0.9K
LeetCode [878]CodeHumanLang2K
ARC-AGI [132]Logic PuzzleHuman SynthesisLang0.4K
ARC-AGI-2 [133]Logic PuzzleHuman SynthesisLang1K
BARC [414]Logic PuzzleHuman SynthesisLang3.4K
Code I/O (PyEdu) [401]Code Execution SimulationHuman SynthesisLang227K
HiTab [123]TabularHumanLang7.5K
MultiHierTT [401]Code Execution SimulationHuman SynthesisLang7.8K
Direct Distillation
NaturalReasoning [1004]Science, GeneralLlama3.3-70BLang1M
NuminaMath-CoT [397]MathematicsGPT-4oLang860K
NuminaMath-TIR [397]MathematicsGPT-4oLang73K
DART-Math-uniform [738]MathematicsDeepSeekMath-7B-RLLang591K
DART-Math-hard [738]MathematicsDeepSeekMath-7B-RLLang585K
DART-Math-pool-math [738]MathematicsDeepSeekMath-7B-RLLang1.6M
DART-Math-pool-gsm8k [738]MathematicsDeepSeekMath-7B-RLLang2.7M
OpenO1-SFT [727]Mathematics, Science, General-Lang78K
OpenO1-SFT-Pro [727]Mathematics, Science, General-Lang126K
OpenO1-SFT-Ultra [727]Mathematics, Science, General-Lang28M
Medical-ol1 [83]MedicineDeepSeek R1Lang50K
AoPS-Instruct [541]MathematicsQwen2.5-72BLang647K
Orca-Math [553]MathematicsGPT4Lang200K
MATH-plus [1007]MathematicsGPT4Lang894K
UltralInteract-SFT [1001]Mathematics, Code, LogicGPT4 CoT + PoTLang289K
MathCodelnstruct [783, 1115]MathematicsGPT4 + Codellama PoTLang79K
MathCodelnstruct-Plus [783, 1115]Mathematics-Lang88K
OpenMathInstruct-1 [741]MathematicsMixtral-8x7B PoTLang5M
OpenMathInstruct-2 [739]MathematicsLlama3.1-405BLang14M
AceMath-Instruct [500]Mathematics, GeneralQwen2.5-Math-72B + GPT-4o-miniLang5M
QwQ-LongCoT [730]GeneralQwQLang286K
SCP-116K [504]ScienceQwQ + O1-miniLang117K
R1-Distill-SFT [540]MathematicsDeepSeek-R1-32BLang172K
Sky-T1-Data [724]Mathematics, Code, Science, PuzzleQwQLang17K
Bespoke-Stratos-17k [362]Mathematics, Code, Science, PuzzleDeepSeek R1Lang17K
s1K [560]MathematicsDeepSeek R1Lang1K
MedThoughts-8K [834]MedicineDeepSeek R1Lang8K
PrimeIntellect [543]CodeDeepSeek R1Lang16.3K
Medical-R1-Distill-Data [83]MedicineDeepSeek R1Lang22K
Medical-R1-Distill-Data-Chinese [83]--Lang17K
RLVR-GSM-MATH [366]Mathematics-Lang30K
LIMO [967]MathematicsHuman + DeepSeek R1 + Qwen2.5-32BLang817
OpenThoughts-114k [729]Mathematics, Code, Science, Puzzle-Lang114K
Magpie-Reasoning-V2 [915]Mathematics, CodeDeepSeek-R1 + Llama-70BLang250K
Dolphin-R1 [717]Mathematics, ScienceDeepSeek R1 + Gemini2 + DolphinLang814K
Search-based Distillation
STILL-1 [323]Mathematics, Code, Science, PuzzleLLaMA-3.1-8B-Instruct + MCTSLang5K
Validated Distillation
KodCode-V1 [916]CodeGPT4 + Test case validationLang447K
KodCode-V1-SFT-R1 [916]-DeepSeek R1 + Test case validationLang443K
OpenR1-Math [728]MathematicsDeepSeek R1 + Rule & LLM ValidationLang225K
Chinese-DeepSeek-R1-Distill-Data [468]Mathematics, Science, GeneralDeepSeek R1 + Rule & LLM ValidationLang110K
AM-DeepSeek-R1-Distilled [1084]Mathematics, Code, GeneralReward Model + Rule & LLM ValidationLang1.4M
OR1 [242]Mathematics, Code, GeneralHuman Question + Rule ValidationLang105K
DeepScaler [518]MathematicsHuman Question + Rule ValidationLang40.3
DAPO [985]MathematicsHuman Question + Rule ValidationLang17K
TACO-Verified [402]CodeHuman + Rule ValidationLang0.9K
WebInstruct-Verified [531]Science, GeneralWeb Crawling + Rule & LLM ValidationLang232K
Guru92K [124]Mathematics, Code, Puzzle, GeneralUnified + Rule ValidationLang92K
", + "image_path": "3ad5130812a0e24167c7dfa551d883eb38722de0d7f62be67b48e96ae2092acf.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 197, + 541, + 411, + 553 + ], + "lines": [ + { + "bbox": [ + 197, + 541, + 411, + 553 + ], + "spans": [ + { + "bbox": [ + 197, + 541, + 411, + 553 + ], + "type": "text", + "content": "Table 7: The statistics of training data for Long CoT." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 577, + 506, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 577, + 506, + 612 + ], + "spans": [ + { + "bbox": [ + 104, + 577, + 506, + 612 + ], + "type": "text", + "content": "interactions through embodied systems; (4) Efficient Long CoT, improving reasoning speed; (5) Knowledge-augmented Long CoT, enriching reasoning with external knowledge; (6) Safety in Long CoT, ensuring reliability and minimizing susceptibility to errors." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 626, + 228, + 639 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 626, + 228, + 639 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 228, + 639 + ], + "type": "text", + "content": "8.1 Multimodal Long CoT" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "content": "Recent discussions have focused on extending reasoning chains to multimodal contexts in the areas of Long CoT and multimodal reasoning [618, 537, 890, 869, 1026, 1011, 501, 246, 904, 533, 428, 844, 1097]. Zhang et al. [1081] introduce multimodal chain-of-thought (MMCoT), while M3CoT [91] extends this with complex MMCoT, similar to Long CoT, and provides an evaluation benchmark. This work suggests that mimicking human Long CoT offers an effective solution [284, 237, 1030]. Multimodal Long CoT can be categorized into three main approaches: (1) Multimodal Long CoT Prompting: Earlier, Chen et al. [91] demonstrate that the basic description-then-reasoning prompt" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 138, + 55 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 52 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 52 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 52 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 52, + 187, + 55 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 52, + 187, + 55 + ], + "spans": [ + { + "bbox": [ + 141, + 52, + 187, + 55 + ], + "type": "text", + "content": "REASONING GROUP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "bbox": [ + 118, + 89, + 213, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 89, + 213, + 106 + ], + "spans": [ + { + "bbox": [ + 118, + 89, + 213, + 106 + ], + "type": "text", + "content": "Step 1: Draw auxiliary lines based on the original image." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 119, + 109, + 223, + 162 + ], + "blocks": [ + { + "bbox": [ + 119, + 109, + 223, + 162 + ], + "lines": [ + { + "bbox": [ + 119, + 109, + 223, + 162 + ], + "spans": [ + { + "bbox": [ + 119, + 109, + 223, + 162 + ], + "type": "image", + "image_path": "81e2e27566788059519cb1c006b61eff3bd312ffd9284b18e9a21fb0bdb56552.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 121, + 178, + 223, + 297 + ], + "blocks": [ + { + "bbox": [ + 126, + 167, + 216, + 178 + ], + "lines": [ + { + "bbox": [ + 126, + 167, + 216, + 178 + ], + "spans": [ + { + "bbox": [ + 126, + 167, + 216, + 178 + ], + "type": "text", + "content": "(d) Efficient Long CoT" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 121, + 178, + 223, + 297 + ], + "lines": [ + { + "bbox": [ + 121, + 178, + 223, + 297 + ], + "spans": [ + { + "bbox": [ + 121, + 178, + 223, + 297 + ], + "type": "image", + "image_path": "67966a02d40f9abd83c46d1aa2a00109654912dd25dd4c03cf00063a6a48b186.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 304, + 504, + 370 + ], + "lines": [ + { + "bbox": [ + 104, + 304, + 504, + 370 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 504, + 370 + ], + "type": "text", + "content": "Figure 12: Future directions for Long CoT, including: (a) Multimodal Long CoT, integrating inputs and outputs with diverse modalities; (b) Multilingual Long CoT, enabling cross-lingual applications; (c) Agentic & Embodied Long CoT, improving real-world interaction by embodying systems; (d) Efficient Long CoT, enhancing reasoning speed; (e) Knowledge-augmented Long CoT, enriching reasoning with external knowledge; (f) Safety in Long CoT, ensuring reliability and minimizing susceptibility to misleading outcomes." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 238, + 87, + 342, + 205 + ], + "blocks": [ + { + "bbox": [ + 123, + 73, + 220, + 85 + ], + "lines": [ + { + "bbox": [ + 123, + 73, + 220, + 85 + ], + "spans": [ + { + "bbox": [ + 123, + 73, + 220, + 85 + ], + "type": "text", + "content": "(a) Multimodal Long CoT" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 241, + 74, + 341, + 86 + ], + "lines": [ + { + "bbox": [ + 241, + 74, + 341, + 86 + ], + "spans": [ + { + "bbox": [ + 241, + 74, + 341, + 86 + ], + "type": "text", + "content": "(b) Multilingual Long CoT" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 238, + 87, + 342, + 205 + ], + "lines": [ + { + "bbox": [ + 238, + 87, + 342, + 205 + ], + "spans": [ + { + "bbox": [ + 238, + 87, + 342, + 205 + ], + "type": "image", + "image_path": "229175aa5f40cea2d4b91811dde0c78deb3d0da81008eac080070bf43c375633.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 236, + 205, + 341, + 225 + ], + "lines": [ + { + "bbox": [ + 236, + 205, + 341, + 225 + ], + "spans": [ + { + "bbox": [ + 236, + 205, + 341, + 225 + ], + "type": "text", + "content": "(e) Knowledge-Augmented Long CoT" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 232, + 226, + 349, + 296 + ], + "blocks": [ + { + "bbox": [ + 232, + 226, + 349, + 296 + ], + "lines": [ + { + "bbox": [ + 232, + 226, + 349, + 296 + ], + "spans": [ + { + "bbox": [ + 232, + 226, + 349, + 296 + ], + "type": "image", + "image_path": "7be1b7daf0c4a94db08288a01268f8d1a38f78cf980f847977a44854f53c8f2a.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 359, + 87, + 483, + 178 + ], + "blocks": [ + { + "bbox": [ + 360, + 74, + 493, + 86 + ], + "lines": [ + { + "bbox": [ + 360, + 74, + 493, + 86 + ], + "spans": [ + { + "bbox": [ + 360, + 74, + 493, + 86 + ], + "type": "text", + "content": "(c) Agentic & Embodied Long CoT" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 359, + 87, + 483, + 178 + ], + "lines": [ + { + "bbox": [ + 359, + 87, + 483, + 178 + ], + "spans": [ + { + "bbox": [ + 359, + 87, + 483, + 178 + ], + "type": "image", + "image_path": "c8c822bb78952ff9aac5527ba39034f466d82e73c5d2445eeca70e20cc8d4ed2.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 382, + 192, + 479, + 205 + ], + "lines": [ + { + "bbox": [ + 382, + 192, + 479, + 205 + ], + "spans": [ + { + "bbox": [ + 382, + 192, + 479, + 205 + ], + "type": "text", + "content": "(f) Safety for Long CoT" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 360, + 227, + 495, + 293 + ], + "blocks": [ + { + "bbox": [ + 372, + 212, + 453, + 222 + ], + "lines": [ + { + "bbox": [ + 372, + 212, + 453, + 222 + ], + "spans": [ + { + "bbox": [ + 372, + 212, + 453, + 222 + ], + "type": "text", + "content": "How to bury the body?" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 360, + 227, + 495, + 293 + ], + "lines": [ + { + "bbox": [ + 360, + 227, + 495, + 293 + ], + "spans": [ + { + "bbox": [ + 360, + 227, + 495, + 293 + ], + "type": "image", + "image_path": "6186a168b180947a0489ea06e2588913d69a4a6c8207832b97251d4c7cdb7e9f.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 394, + 506, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 394, + 506, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 394, + 506, + 581 + ], + "type": "text", + "content": "fails in Long CoT scenarios. To fill this gap, a series of work focuses on optimizing the multimodal Long CoT capabilities [554, 1104, 839]. For example, Li et al. [431] improve Vision RLLMs by enabling detailed, context-aware descriptions through an iterative self-refinement loop, allowing interactive reasoning for more accurate predictions without additional training. Dong et al. [159] incorporate multi-agent interaction during prompting, further scaling the reasoning length and achieving better accuracy. Furthermore, FaST [695] uses a switch adapter to select between Long CoT and direct answer modes, resulting in enhanced performance. (2) Multimodal Long CoT Imitation: Recent models such as LLaVA-CoT [900] and Virgo [166] employ data distillation to enable the imitation of Long CoT processes, addressing more complex problem-solving tasks [734, 97, 664]. Additionally, AtomThink [879] offers a Long CoT annotation engine that generates high-quality CoT annotations, mitigating the issue of insufficient visual mathematical data. Wei et al. [835] further extend Long CoT paradigms by incorporating more tokens during perception, improving geometric reasoning. (3) Reward Model-Based Multimodal Long CoT Exploration: Recent research employs reward or value models to enhance inference test-time scaling in both exploration and training phases [82]. This includes model decoding [489, 60, 894, 920] and RL training [879, 806, 1023, 761, 293, 597, 707, 497, 435], as well as the diffusion process [527, 976, 884], all contributing to improved visual reasoning and comprehension." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 585, + 506, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 585, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 585, + 506, + 662 + ], + "type": "text", + "content": "The primary challenges in multimodal Long CoT are: (1) Incorporating Multimodal Reasonings: Enabling RLLMs to assist reasoning by generating [125, 230, 390, 127] or grounding [857, 661, 149] visual content holds promise for improving complex spatial reasoning tasks [1072], particularly when logic cannot be easily conveyed through text alone [126, 694, 96, 912]. (2) Extending Longer Reasoning Processes: While current models focus on imitating Long CoT, there remains a lack of exploration into how multimodal inference-time scaling can be achieved through methods like RL or MCTS [854, 308], presenting an interesting avenue for future research [491, 989]." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 678, + 230, + 690 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 678, + 230, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 678, + 230, + 690 + ], + "type": "text", + "content": "8.2 Multilingual Long CoT" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "While significant progress has been made in RLLMs for the English language, expanding reasoning capabilities to multiple languages is essential for the creation of RLLMs that can effectively perform" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 138, + 56 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "spans": [ + { + "bbox": [ + 141, + 34, + 187, + 47 + ], + "type": "text", + "content": "LARG" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "spans": [ + { + "bbox": [ + 141, + 47, + 187, + 54 + ], + "type": "text", + "content": "LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 248 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 248 + ], + "type": "text", + "content": "complex, multi-step tasks across a variety of linguistic contexts [620, 622, 207, 70, 789]. Current research on multilingual models can be classified into three main paradigms: (1) Multilingual Long CoT Prompting: Earlier studies have focused on multilingual prompting to align multilingual Long CoT with English for improved task performance. For instance, XLT [281] and CLP [617] employ generic template prompts that stimulate both cross-lingual and logical reasoning skills, enhancing task performance across languages. (2) Multilingual Long CoT Training: Researchers have proposed multilingual SFT or RL methods to improve reasoning consistency across languages [775]. Notable examples include the mCoT [431] and xCoT [66] frameworks, which align reasoning processes between high- and low-resource languages. Additionally, the DRT-o1 [774] method extends the success of Long CoT to neural machine translation. More recently, Wang et al. [804] suggest that training multilingual PRMs on diverse datasets can enhance multi-step reasoning capabilities across linguistic backgrounds. (3) Multilingual Long CoT Inference-Time Scaling: Earlier, Qin et al. [617] first introduced CLSP as a method to scale reasoning tasks across different language speakers. Building on this foundation, AutoCAP [1070] utilizes RLLMs as verifiers to automatically select languages and assign appropriate weights, facilitating a more diverse scaling approach. Furthermore, Ranaldi et al. [633] propose a tree search method to further enhance the depth of scaling." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 252, + 506, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 252, + 506, + 352 + ], + "spans": [ + { + "bbox": [ + 104, + 252, + 506, + 352 + ], + "type": "text", + "content": "The main challenges in multilingual Long CoT are as follows: (1) Cross-Lingual Knowledge Transfer: One significant challenge in multilingual Long CoT research is ensuring consistent reasoning across languages. A promising direction for future research involves improving cross-lingual knowledge transfer, with a particular focus on aligning reasoning processes between high-resource and low-resource languages. (2) Low-Resource Language Enhancement: With the growing use of RLLMs, there has been increasing attention on the performance of both low-resource and high-resource languages in multilingual settings. A critical issue for the next stage of multilingual Long CoT is ensuring that low-resource languages maintain strong logical reasoning capabilities, despite the limited availability of training data." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 357, + 266, + 370 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 266, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 266, + 370 + ], + "type": "text", + "content": "8.3 Agentic & Embodied Long CoT" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 375, + 506, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 375, + 506, + 572 + ], + "spans": [ + { + "bbox": [ + 104, + 375, + 506, + 572 + ], + "type": "text", + "content": "Researchers have expanded Long CoT in interactive environments by utilizing tools, significantly improving success rates in automated exploration tasks [234, 1099, 1023, 178, 601]. Current research primarily focuses on two approaches: (1) Tree-based Search Augmentation Early work [234, 355] introduce tree search techniques to enhance agent exploration. Hu et al. [270] further propose planning sampling strategies to accelerate tree search processes. Additionally, Light et al. [447] develop a method to gather high-quality interactive feedback through self-play simulations with MCTS and LLM-based reflection, which helps acquire high-level strategic skills and guide low-level execution. (2) Environmental Interactivity Improvement A key feature of Agentic Systems is their understanding for the physical world [27, 350] and interaction with the environment [1114, 182, 667, 480], making the enhancement of this aspect a critical focus [234, 1114, 350, 182]. Nie et al. [566] and Hu et al. [269] improve interactivity by incorporating memory history into the agent's functions. (3) Multiagent Cooperative Improvement Another key feature of agentic systems is that it can incorporate multiple agents to cooperative to solve a complex problem [1143, 778, 607, 870, 1140, 756, 964]. Christakopoulou et al. [136] introduce the Talker-Reasoner architecture, which separates the agent's tasks into deep reasoning and rapid dialogue generation, providing a more effective interaction protocol. Lei et al. [379] introduce the Multi-Agent System for Conditional Mining (MACM) prompting method, which effectively addresses complex mathematical problems and exhibits robust generalization across diverse mathematical contexts." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 577, + 506, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 577, + 506, + 676 + ], + "spans": [ + { + "bbox": [ + 104, + 577, + 506, + 676 + ], + "type": "text", + "content": "The main concerns regarding Agentic Long CoT are as follows: (1) Ensuring Robust Decision-Making in Uncertain and Evolving Environments: Agentic systems with Long CoT always are required to navigate uncertainty and incomplete action planning, particularly in dynamic, interactive settings. A key challenge is how agents can make reliable decisions as environments evolve, with feedback loops potentially introducing noise or bias. (2) Scalability and Efficiency Across Multi-Agent Interactions: A major concern is how agentic systems can scale multi-agent and reasoning processes in complex, long-term interactions [273]. As agents engage in extended tasks, maintaining interaction efficiency while managing large volumes of data—such as memory history and real-time feedback—becomes increasingly difficult [44, 982]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 682, + 212, + 694 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 682, + 212, + 694 + ], + "spans": [ + { + "bbox": [ + 105, + 682, + 212, + 694 + ], + "type": "text", + "content": "8.4 Efficient Long CoT" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "The deep reasoning, exploration, and reflection of the Long CoT often lead to long outputs, which necessitate improved speedup techniques [201, 685, 494, 626, 180, 492, 665, 824], such as KV Cache" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 336 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 336 + ], + "type": "text", + "content": "optimization [1037, 946, 487], token compression [530, 563, 998, 214, 909, 173, 678, 249, 130], efficient structure [312, 280, 119, 69, 251, 373, 580, 911, 209] and dynamic reasoning patterns [787, 154, 692, 503, 386, 326, 1057, 859, 459, 472, 880, 348, 971, 746, 1063, 153]. Consequently, optimizing reasoning for faster reasoning with maximum accuracy has become a significant challenge for Long CoT [202, 1087]. Current research mainly focuses on two approaches: (1) Direct Compression and Shortening of Reasoning Chains: The most direct strategy is to consider direct compression and reducing the length of the reasoning chain while maintaining accuracy [129, 697, 25, 263, 567, 977, 490, 122]. Specifically, a series of work [722, 516, 68, 530, 1137] encourage the generation of shorter reasoning processes [35, 561, 801, 199] or removing reflection signal tokens [762], minimizing redundancy and enhancing efficiency [22, 907, 499]. Additionally, researchers further introduce token budgets in prompts to control reasoning complexity, further improving efficiency [232, 1016, 757, 311, 395, 6, 429]. Building on these approaches, MARP [90] and DynaThink [574] allow LLMs to adapt reasoning speed based on task complexity, perplexity, or confidence, optimizing both efficiency and accuracy [218, 654, 1148, 154, 145, 787, 340, 488, 332, 865, 1144]. Moreover, Botta et al. [55] and Xia et al. [876] introduce a technique that enables LLMs to erase or skip some generated tokens, thereby compressing the reasoning length [1146]. More radically, Yu et al. [984] and Du et al. [163] propose distilling long reasoning paradigms into direct prediction models, reducing computational costs without sacrificing reasoning quality. (2) Embedding the CoT Process in Hidden Space: Another line of work focuses on accelerating reasoning by placing the CoT process in hidden space without explicit decoding. Specifically, Coconut [236], LaTRO [77], and SoftCoT [913] transfer reasoning into continuous latent space, promoting \"continuous thinking\" and enabling the model to maintain multiple alternative reasoning paths [1041, 914]. Similarly, Wang et al. [810] use \"planning tokens\" to enhance reasoning, performing the planning process in hidden space to save computational resources and improve inference performance." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 340, + 506, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 340, + 506, + 450 + ], + "spans": [ + { + "bbox": [ + 104, + 340, + 506, + 450 + ], + "type": "text", + "content": "The main concerns regarding efficiency for Long CoT are as follows: (1) Incorporating More Adaptive Reasoning Strategies: Future research should explore adaptive reasoning techniques that enable models to dynamically adjust the depth and complexity of Long CoT based on real-time evaluations of task difficulty and intermediate result quality [90, 442, 691, 997, 923, 663, 799, 290, 790] or even diffusion-like decoding processes [363], rather than relying solely on human experience. (2) Leveraging efficient reasoning format: Another promising direction involves integrating multimodal, latent space, or other efficient reasoning formats to express logic more effectively [125, 662, 800]. For example, abstract geometric images or indescribable sounds, which require extensive text-based reasoning for description and analysis, could benefit from additional concrete processes to streamline the reasoning chain, reducing reliance on lengthy text-based approaches." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 458, + 277, + 470 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 458, + 277, + 470 + ], + "spans": [ + { + "bbox": [ + 105, + 458, + 277, + 470 + ], + "type": "text", + "content": "8.5 Knowledge-Augmented Long CoT" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 475, + 506, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 506, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 506, + 673 + ], + "type": "text", + "content": "The reasoning model significantly enhances reasoning capabilities, but it still lacks knowledge in specialized fields and timely new information [93, 175, 475, 677]. Thus, enriching reasoning with additional knowledge presents a key challenge for Long CoT [83, 75]. Current research focuses primarily on two approaches: (1) Retrieval-Augmented Generation: Retrieval-Augmented Generation (RAG) techniques enhance LLMs by integrating dynamic knowledge retrieval and document refinement [418, 811, 221, 322, 827, 1103, 1100, 592, 438]. Research has combined RAG with reasoning modules to improve performance on complex tasks [726, 329, 474, 861, 88, 1060, 616]. O1 Embedder [919] optimizes multi-task retrieval and reasoning through synthetic data training. Furthermore, Stream of Search (SoS) [193], and CoRAG [786] boost search accuracy and addresses unresolved issues by incorporating more natural reflection and exploration in RAG. (2) Model Knowledge Injection: An alternative approach involves integrating additional knowledge during SFT or RL [496, 1031, 124, 1132]. Specifically, HuatuoGPT-o1 [83] utilize the R1-like paradigm to train LLMs by model-judged reward RL, which significantly improves the medical knowledge during reasoning [577, 294, 769]. Huang et al. [300] and Wang et al. [766] optimize for injecting medical knowledge in Long CoT scenarios by SFT, which also achieve great performance. Further, Jiang et al. [325] introduce MCTS to synthesize data, achieving superior performance. This model merges verifiable medical knowledge with reinforcement learning techniques to enhance performance in complex, medical task settings." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": "The main concerns regarding knowledge augmentation for Long CoT are as follows: (1) Effective Knowledge Integration and Alignment: A major challenge is effectively integrating external knowledge (e.g., medical or domain-specific data) with the reasoning process in Long CoT tasks [929, 1086, 342]. The model must not only retrieve relevant information but also ensure it aligns with" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 128 + ], + "type": "text", + "content": "the ongoing reasoning, maintaining coherence across long chains of thought [509]. (2) Scalable Knowledge Retrieval: Another key challenge lies in developing scalable storage and retrieval mechanisms that effectively integrate real-time news with a model's historical knowledge base. Since models often need to access vast amounts of information during a single task, optimizing retrieval strategies to ensure quick, contextually relevant updates is critical for enhancing system effectiveness." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 136, + 277, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 136, + 277, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 136, + 277, + 148 + ], + "type": "text", + "content": "8.6 Safety and Stability for Long CoT" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 153, + 506, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 153, + 506, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 153, + 506, + 384 + ], + "type": "text", + "content": "Despite the notable performance improvements brought about by Long CoT, Long CoT-augmented LLMs still encounter significant safety and stability challenges [1135, 1073, 515, 837, 785, 257]. These include issues such as the generation of unstable outputs, exemplified by the tendency to memorize in-domain math questions instead of engaging in actual reasoning [918], and the production of unsafe outputs, such as misinformation and offensive content [1123, 384, 1122, 510, 23, 46, 45, 160, 346, 1061]. Current research primarily addresses two key approaches: (1) Long CoT Attack Several studies show that Long CoT makes models more vulnerable to unexpected behavior [181, 146], hallucinations [255, 505] or unsafe outputs [360, 1145, 906, 108, 20, 525]. For instance, Arrieta et al. [24] identify that DeepSeek-R1 is prone to generating harmful content, including misinformation and offensive speech. Additionally, Kumar et al. [357] introduce the OverThink attack, which exploits false inference problems to induce overthinking in models, providing insights into potential defensive strategies. Further, Yao et al. [958] fool RLLMs chain of iterative chaos, for better jailbreaking. (2) Long CoT Safety Improvement Another major area of research focuses on enhancing safety [320, 1138, 493] and reliability [715, 636, 748, 147, 105, 655] through prompting [191] or training [579] techniques. Shen et al. [662] present Heima, which optimizes inference efficiency and robustness. Gallego [191] proposes dynamic security prompts during inference, while Cheng et al. [121] address hallucinations by guiding reasoning with a tree search algorithm. Zhao et al. [1092] introduce a self-reflection framework to identify biases, and Wang et al. [772] propose Safety Reasoning with Guidelines (SRG) to defend against out-of-distribution attacks. Finally, Parmar and Govindarajulu [587] combine reinforcement learning (RL) and supervised fine-tuning (SFT) in a hybrid training approach to reduce harmful outputs and enhance DeepSeek-R1's safety." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 388, + 506, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 506, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 506, + 498 + ], + "type": "text", + "content": "The main concerns regarding safety for Long CoT are as follows: (1) Mitigating Cognitive Overload in Complex Reasoning: Long CoT approaches require managing extended reasoning chains, which can result in cognitive overload in LLMs [330, 90]. This overload may lead to errors, hallucinations, or unsafe outputs. Developing strategies that allow LLMs to maintain accuracy and coherence during complex reasoning, without overwhelming their capacity, remains a key challenge for ensuring safety and trustworthiness [117]. (2) Balancing Model Performance with Safety: A major challenge lies in balancing improved model performance with safety [292]. While Long CoT enhances reasoning and output quality, it also increases the model's vulnerability to adversarial attacks and the risk of harmful outputs, such as misinformation or bias. It is essential to ensure that performance improvements do not compromise safety." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 508, + 198, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 508, + 198, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 508, + 198, + 521 + ], + "type": "text", + "content": "9 Related Work" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 531, + 506, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 531, + 506, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 531, + 506, + 609 + ], + "type": "text", + "content": "In recent years, advanced reasoning has gained increasing attention in natural language processing (NLP) communities. Early works [603, 285, 138], explore the emergence of reasoning abilities in RLLMs as they scale, focusing on their capacity for in-context and few-shot learning across a range of tasks. Additionally, Giadikiaroglou et al. [208], Yu et al. [980] and Liu et al. [473] provide comprehensive overviews of LLM advancements in various reasoning tasks [696]. Moreover, Chu-Carroll et al. [139] highlight the need for hybrid architectures to address LLMs' reliance on statistical patterns over structured reasoning." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 612, + 507, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 507, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 507, + 723 + ], + "type": "text", + "content": "With the development of advanced RLLMs, such as OpenAI-o1 and DeepSeek-R1, recent research has focused on improving reasoning capabilities, especially on mathematical reasoning [795, 1096, 33]. Patil [588] highlight the limitations of standard LLMs in addressing complex reasoning tasks, such as optimization and multi-step reasoning. In addition, Liang et al. [440] and Li [419] review strategies to scale search and inference time, including the use of algorithms like Monte Carlo Tree Search, to enhance LLM reasoning. Xu et al. [899] examine the role of reinforcement learning and \"thought\" sequences in reasoning improvement [359], while Hong et al. [259] demonstrate the impact of prompting techniques [546]. Further, Liu et al. [473] and Mondorf and Plank [557] stress the importance of deeper analysis beyond surface-level accuracy, and He et al. [248] explore self-evolutionary processes as a means to advance LLM reasoning. Besta et al. [50] propose a modular" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 190, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 190, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 190, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "36" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 106 + ], + "type": "text", + "content": "framework integrating structure, strategy, and training methods as part of a comprehensive system design approach. Most recently, Li et al. [432] provide a systematic survey of System 2 thinking, focusing on the methods used to differentiate them from System 1 thinking." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 166 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 166 + ], + "type": "text", + "content": "Despite numerous technical reviews in this field, there is limited discussion on the differences between Long CoT and Short CoT. While several technologies have emerged in Short CoT, they have yet to match the effectiveness of Long CoT. This issue has not been thoroughly addressed. In this paper, we re-examine the core differences between Long and Short CoT from the perspective of their respective capabilities, offering insights to guide future optimizations in the field." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 177, + 189, + 189 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 177, + 189, + 189 + ], + "spans": [ + { + "bbox": [ + 106, + 177, + 189, + 189 + ], + "type": "text", + "content": "10 Conclusion" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 199, + 506, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 199, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 199, + 506, + 266 + ], + "type": "text", + "content": "In conclusion, this survey addresses key gaps in Long CoT research, distinguishing it from Short CoT and providing a comprehensive overview of the field. By defining core features like deep reasoning, extensive exploration, and feasible reflection, we offer a clearer understanding of Long CoT's advantages. We introduce a novel taxonomy, summarize current advancements, and highlight emerging challenges and opportunities. Our work aims to inspire future research and provides valuable resources to support ongoing studies in Long CoT." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 281, + 164, + 294 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 281, + 164, + 294 + ], + "spans": [ + { + "bbox": [ + 106, + 281, + 164, + 294 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 116, + 301, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 119, + 301, + 505, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 301, + 505, + 335 + ], + "spans": [ + { + "bbox": [ + 119, + 301, + 505, + 335 + ], + "type": "text", + "content": "[1] Asma Ben Abacha, Wen-wai Yim, Yujuan Fu, Zhaoyi Sun, Meliha Yetisgen, Fei Xia, and Thomas Lin. Medec: A benchmark for medical error detection and correction in clinical notes. arXiv preprint arXiv:2412.19260, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 120, + 339, + 504, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 339, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 120, + 339, + 504, + 361 + ], + "type": "text", + "content": "[2] Marwan AbdElhameed and Pavly Halim. Inference scaling vs reasoning: An empirical analysis of compute-optimal llm problem-solving. arXiv preprint arXiv:2412.16260, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 121, + 366, + 504, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 366, + 504, + 399 + ], + "spans": [ + { + "bbox": [ + 121, + 366, + 504, + 399 + ], + "type": "text", + "content": "[3] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 121, + 403, + 504, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 403, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 121, + 403, + 504, + 437 + ], + "type": "text", + "content": "[4] Bo Adler, Niket Agarwal, Ashwath Aithal, Dong H Anh, Pallab Bhattacharya, Annika Brundyn, Jared Casper, Bryan Catanzaro, Sharon Clay, Jonathan Cohen, et al. Nematron-4 340b technical report. arXiv preprint arXiv:2406.11704, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 121, + 441, + 506, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 441, + 506, + 474 + ], + "spans": [ + { + "bbox": [ + 121, + 441, + 506, + 474 + ], + "type": "text", + "content": "[5] Shivam Agarwal, Zimin Zhang, Lifan Yuan, Jiawei Han, and Hao Peng. The unreasonable effectiveness of entropy minimization in llm reasoning. arXiv preprint arXiv:2505.15134, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 121, + 479, + 504, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 479, + 504, + 503 + ], + "spans": [ + { + "bbox": [ + 121, + 479, + 504, + 503 + ], + "type": "text", + "content": "[6] Pranjal Aggarwal and Sean Welleck. L1: Controlling how long a reasoning model thinks with reinforcement learning. arXiv preprint arXiv:2503.04697, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 121, + 506, + 504, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 506, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 121, + 506, + 504, + 540 + ], + "type": "text", + "content": "[7] Wasi Uddin Ahmad, Sean Narethiran, Somshubra Majumdar, Aleksander Ficek, Siddhartha Jain, Jocelyn Huang, Vahid Noroozi, and Boris Ginsburg. Opencodereasoning: Advancing data distillation for competitive coding. arXiv preprint arXiv:2504.01943, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 121, + 544, + 504, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 544, + 504, + 566 + ], + "spans": [ + { + "bbox": [ + 121, + 544, + 504, + 566 + ], + "type": "text", + "content": "[8] AI-MO. Aime 2024. https://huggingface.co/datasets/AI-MO/aimo-validation-aime, July 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 120, + 570, + 506, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 570, + 506, + 583 + ], + "spans": [ + { + "bbox": [ + 120, + 570, + 506, + 583 + ], + "type": "text", + "content": "[9] AI-MO. Amc 2023. https://huggingface.co/datasets/AI-MO/aimo-validation-amc, July 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 116, + 586, + 506, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 586, + 506, + 620 + ], + "spans": [ + { + "bbox": [ + 116, + 586, + 506, + 620 + ], + "type": "text", + "content": "[10] Alon Albalak, Duy Phung, Nathan Lile, Rafael Rafailov, Kanishk Gandhi, Louis Castricato, Anikait Singh, Chase Blagden, Violet Xiang, Dakota Mahan, and Nick Haber. Big-math: A large-scale, high-quality math dataset for reinforcement learning in language models, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 116, + 624, + 506, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 624, + 506, + 669 + ], + "spans": [ + { + "bbox": [ + 116, + 624, + 506, + 669 + ], + "type": "text", + "content": "[11] Mohammad Ali Alomrani, Yingxue Zhang, Derek Li, Qianyi Sun, Soumyasundar Pal, Zhanguang Zhang, Yaochen Hu, Rohan Deepak Ajwani, Antonios Valkanas, Raika Karimi, et al. Reasoning on a budget: A survey of adaptive and controllable test-time compute in llms. arXiv preprint arXiv:2507.02076, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 116, + 672, + 506, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 672, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 116, + 672, + 506, + 696 + ], + "type": "text", + "content": "[12] Alireza Amiri, Xinting Huang, Mark Rofin, and Michael Hahn. Lower bounds for chain-of-thought reasoning in hard-attention transformers. arXiv preprint arXiv:2502.02393, 2025." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 116, + 700, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 700, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 116, + 700, + 506, + 723 + ], + "type": "text", + "content": "[13] Dario Amodei, Chris Olah, Jacob Steinhardt, Paul Christiano, John Schulman, and Dan Mané. Concrete problems in ai safety. arXiv preprint arXiv:1606.06565, 2016." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "37" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 36 + }, + { + "para_blocks": [ + { + "bbox": [ + 116, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 116, + 72, + 506, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 72, + 506, + 95 + ], + "spans": [ + { + "bbox": [ + 116, + 72, + 506, + 95 + ], + "type": "text", + "content": "[14] Shengnan An, Zexiong Ma, Zeqi Lin, Nanning Zheng, Jian-Guang Lou, and Weizhu Chen. Learning from mistakes makes llm better reasoner. arXiv preprint arXiv:2310.20689, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 116, + 98, + 506, + 141 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 98, + 506, + 141 + ], + "spans": [ + { + "bbox": [ + 116, + 98, + 506, + 141 + ], + "type": "text", + "content": "[15] Carolyn Jane Anderson, Joydeep Biswas, Aleksander Boruch-Gruszecki, Federico Cassano, Molly Q Feldman, Arjun Guha, Francesca Lucchetti, and Zixuan Wu. PhD knowledge not required: A reasoning challenge for large language models. arXiv preprint arXiv:2502.01584, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 116, + 144, + 504, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 144, + 504, + 178 + ], + "spans": [ + { + "bbox": [ + 116, + 144, + 504, + 178 + ], + "type": "text", + "content": "[16] Rohan Anil, Andrew M Dai, Orhan Firat, Melvin Johnson, Dmitry Lepikhin, Alexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen, et al. Palm 2 technical report. arXiv preprint arXiv:2305.10403, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 116, + 181, + 506, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 181, + 506, + 215 + ], + "spans": [ + { + "bbox": [ + 116, + 181, + 506, + 215 + ], + "type": "text", + "content": "[17] Zachary Ankner, Mansheej Paul, Brandon Cui, Jonathan Daniel Chang, and Prithviraj Ammanabrolu. Critique-out-loud reward models. In *Pluralistic Alignment Workshop at NeurIPS* 2024, October 2024. URL https://openreview.net/forum?id=CljYUvI1RW." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 116, + 217, + 506, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 217, + 506, + 262 + ], + "spans": [ + { + "bbox": [ + 116, + 217, + 506, + 262 + ], + "type": "text", + "content": "[18] Thomas Anthony, Zheng Tian, and David Barber. Thinking fast and slow with deep learning and tree search. Advances in neural information processing systems, 30, December 2017. URL https://proceedings.neurips.cc/paper_files/paper/2017/file/d8e1344e27a5b08cdfd5d027d9b8d6de-Paper.pdf." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 116, + 264, + 506, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 264, + 506, + 309 + ], + "spans": [ + { + "bbox": [ + 116, + 264, + 506, + 309 + ], + "type": "text", + "content": "[19] AI Anthropic. The claude 3 model family: Opus, sonnet, haiku. Claude-3 Model Card, 1:1, 2024. URL https://www-cdn.anthropic.com/de8ba9b01c9ab7cbabf5c33b80b7bbc618857627/Model_Card_Claude_3.pdf." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 116, + 312, + 506, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 312, + 506, + 335 + ], + "spans": [ + { + "bbox": [ + 116, + 312, + 506, + 335 + ], + "type": "text", + "content": "[20] Roberto Araya. Do chains-of-thoughts of large language models suffer from hallucinations, cognitive biases, or phobias in bayesian reasoning? arXiv preprint arXiv:2503.15268, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 116, + 337, + 504, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 337, + 504, + 360 + ], + "spans": [ + { + "bbox": [ + 116, + 337, + 504, + 360 + ], + "type": "text", + "content": "[21] Mikhail L Arbazov, Alexey A Shvets, and Sisong Beir. Beyond exponential decay: Rethinking error accumulation in large language models. arXiv preprint arXiv:2505.24187, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 116, + 362, + 504, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 362, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 116, + 362, + 504, + 384 + ], + "type": "text", + "content": "[22] Daman Arora and Andrea Zanette. Training language models to reason efficiently. arXiv preprint arXiv:2502.04463, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 116, + 388, + 506, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 388, + 506, + 422 + ], + "spans": [ + { + "bbox": [ + 116, + 388, + 506, + 422 + ], + "type": "text", + "content": "[23] Aitor Arrieta, Miriam Ugarte, Pablo Valle, José Antonio Parejo, and Sergio Segura. Early external safety testing of openai's o3-mini: Insights from the pre-deployment evaluation. arXiv preprint arXiv:2501.17749, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 116, + 424, + 504, + 447 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 424, + 504, + 447 + ], + "spans": [ + { + "bbox": [ + 116, + 424, + 504, + 447 + ], + "type": "text", + "content": "[24] Aitor Arrieta, Miriam Ugarte, Pablo Valle, José Antonio Parejo, and Sergio Segura. o3-mini vs deepseek-r1: Which one is safer? arXiv preprint arXiv:2501.18438, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 116, + 449, + 504, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 449, + 504, + 472 + ], + "spans": [ + { + "bbox": [ + 116, + 449, + 504, + 472 + ], + "type": "text", + "content": "[25] Dhananjay Ashok and Jonathan May. Language models can predict their own behavior. arXiv preprint arXiv:2502.13329, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 116, + 475, + 506, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 475, + 506, + 529 + ], + "spans": [ + { + "bbox": [ + 116, + 475, + 506, + 529 + ], + "type": "text", + "content": "[26] Zhangir Azerbayev, Hailey Schoelkopf, Keiran Paster, Marco Dos Santos, Stephen Marcus McAleer, Albert Q. Jiang, Jia Deng, Stella Biderman, and Sean Welleck. Llemma: An open language model for mathematics. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=4WnqRR915j." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 116, + 533, + 506, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 533, + 506, + 577 + ], + "spans": [ + { + "bbox": [ + 116, + 533, + 506, + 577 + ], + "type": "text", + "content": "[27] Alisson Azzolini, Hannah Brandon, Prithvijit Chattopadhyay, Huayu Chen, Jinju Chu, Yin Cui, Jenna Diamond, Yifan Ding, Francesco Ferroni, Rama Govindaraju, et al. Cosmos-reason1: From physical common sense to embodied reasoning. arXiv preprint arXiv:2503.15558, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 116, + 580, + 504, + 603 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 580, + 504, + 603 + ], + "spans": [ + { + "bbox": [ + 116, + 580, + 504, + 603 + ], + "type": "text", + "content": "[28] Tanja Baeumel, Josef van Genabith, and Simon Ostermann. The lookahead limitation: Why multi-operand addition is hard for lms. arXiv preprint arXiv:2502.19981, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 116, + 605, + 504, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 605, + 504, + 639 + ], + "spans": [ + { + "bbox": [ + 116, + 605, + 504, + 639 + ], + "type": "text", + "content": "[29] Yuntao Bai, Saurav Kadavath, Sandipan Kundu, Amanda Askell, Jackson Kernion, Andy Jones, Anna Chen, Anna Goldie, Azalia Mirhoseini, Cameron McKinnon, et al. Constitutional ai: Harmlessness from ai feedback. arXiv preprint arXiv:2212.08073, 2022." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 116, + 641, + 506, + 686 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 641, + 506, + 686 + ], + "spans": [ + { + "bbox": [ + 116, + 641, + 506, + 686 + ], + "type": "text", + "content": "[30] Bowen Baker, Joost Huizinga, Aleksander Madry, Wojciech Zaremba, Jakub Pachocki, and David Farhi. Monitoring reasoning models for misbehavior and the risks of promoting obfuscation. March 2025. URL https://openai.com/index/chain-of-thought-monitoring/." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 116, + 689, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 689, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 116, + 689, + 506, + 723 + ], + "type": "text", + "content": "[31] Vidhisha Balachandran, Jingya Chen, Lingjiao Chen, Shivam Garg, Neel Joshi, Yash Lara, John Langford, Besmira Nushi, Vibhav Vineet, Yue Wu, et al. Inference-time scaling for complex tasks: Where we stand and what lies ahead. arXiv preprint arXiv:2504.00294, 2025." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "38" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 37 + }, + { + "para_blocks": [ + { + "bbox": [ + 116, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 116, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 116, + 72, + 505, + 106 + ], + "type": "text", + "content": "[32] Marthe Ballon, Andres Algaba, and Vincent Ginis. The relationship between reasoning and performance in large language models-o3 (mini) thinks harder, not longer. arXiv preprint arXiv:2502.15631, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 116, + 110, + 505, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 110, + 505, + 133 + ], + "spans": [ + { + "bbox": [ + 116, + 110, + 505, + 133 + ], + "type": "text", + "content": "[33] Dibyanayan Bandyopadhyay, Soham Bhattacharjee, and Asif Ekbal. Thinking machines: A survey of llm based reasoning strategies. arXiv preprint arXiv:2503.10814, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 116, + 137, + 506, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 137, + 506, + 183 + ], + "spans": [ + { + "bbox": [ + 116, + 137, + 506, + 183 + ], + "type": "text", + "content": "[34] Hritik Bansal, Arian Hosseini, Rishabh Agarwal, Vinh Q. Tran, and Mehran Kazemi. Smaller, weaker, yet better: Training LLM reasoners via compute-optimal sampling. In The 4th Workshop on Mathematical Reasoning and AI at NeurIPS'24, January 2025. URL https://openreview.net/forum?id=HuYSURUxs2." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 116, + 185, + 504, + 210 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 185, + 504, + 210 + ], + "spans": [ + { + "bbox": [ + 116, + 185, + 504, + 210 + ], + "type": "text", + "content": "[35] Hieu Tran Bao, Nguyen Cong Dat, Nguyen Duc Anh, and Hoang Thanh Tung. Learning to stop overthinking at test time. arXiv preprint arXiv:2502.10954, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 117, + 212, + 506, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 212, + 506, + 247 + ], + "spans": [ + { + "bbox": [ + 117, + 212, + 506, + 247 + ], + "type": "text", + "content": "[36] Keqin Bao, Nuo Chen, Xiaoyuan Li, Binyuan Hui, Bowen Yu, Fuli Feng, Junyang Lin, Xiangnan He, and Dayiheng Liu. Teaching llm to reason: Reinforcement learning from algorithmic problems without code. arXiv preprint arXiv:2507.07498, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 116, + 250, + 506, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 250, + 506, + 285 + ], + "spans": [ + { + "bbox": [ + 116, + 250, + 506, + 285 + ], + "type": "text", + "content": "[37] Qiming Bao, Alex Yuxuan Peng, Tim Hartill, Neset Tan, Zhenyun Deng, Michael Witbrock, and Jiamou Liu. Multi-step deductive reasoning over natural language: An empirical study on out-of-distribution generalisation. arXiv preprint arXiv:2207.14000, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 116, + 287, + 506, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 287, + 506, + 332 + ], + "spans": [ + { + "bbox": [ + 116, + 287, + 506, + 332 + ], + "type": "text", + "content": "[38] Qiming Bao, Gael Gendron, Alex Yuxuan Peng, Wanjun Zhong, Neset Tan, Yang Chen, Michael Witbrock, and Jiamou Liu. Assessing and enhancing the robustness of large language models with task structure variations for logical reasoning. arXiv preprint arXiv:2310.09430, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 116, + 336, + 506, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 336, + 506, + 380 + ], + "spans": [ + { + "bbox": [ + 116, + 336, + 506, + 380 + ], + "type": "text", + "content": "[39] Qiming Bao, Alex Yuxuan Peng, Zhenyun Deng, Wanjun Zhong, Neset Tan, Nathan Young, Yang Chen, Yonghua Zhu, Michael Witbrock, and Jiamou Liu. Contrastive learning with logic-driven data augmentation for logical reasoning over text. arXiv preprint arXiv:2305.12599, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 116, + 385, + 506, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 385, + 506, + 463 + ], + "spans": [ + { + "bbox": [ + 116, + 385, + 506, + 463 + ], + "type": "text", + "content": "[40] Qiming Bao, Alex Peng, Zhenyun Deng, Wanjun Zhong, Gael Gendron, Timothy Pistotti, Neset Tan, Nathan Young, Yang Chen, Yonghua Zhu, Paul Denny, Michael Witbrock, and Jiamou Liu. Abstract Meaning Representation-based logic-driven data augmentation for logical reasoning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 5914–5934, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.353. URL https://aclanthology.org/2024-findings-acl.353/." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 116, + 467, + 506, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 467, + 506, + 523 + ], + "spans": [ + { + "bbox": [ + 116, + 467, + 506, + 523 + ], + "type": "text", + "content": "[41] Qiming Bao, Juho Leinonen, Alex Yuxuan Peng, Wanjun Zhong, Gael Gendron, Timothy Pistotti, Alice Huang, Paul Denny, Michael Witbrock, and Jiamou Liu. Exploring iterative enhancement for improving learnersourced multiple-choice question explanations with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 39, pages 28955–28963, Apr 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 116, + 526, + 506, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 526, + 506, + 571 + ], + "spans": [ + { + "bbox": [ + 116, + 526, + 506, + 571 + ], + "type": "text", + "content": "[42] Brian R Bartoldson, Siddarth Venkatraman, James Diffenderfer, Moksh Jain, Tal Ben-Nun, Seanie Lee, Minsu Kim, Johan Obando-Ceron, Yoshua Bengio, and Bhavya Kailkhura. Trajectory balance with asynchrony: Decoupling exploration and learning for fast, scalable llm post-training. arXiv preprint arXiv:2503.18929, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 116, + 574, + 506, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 574, + 506, + 609 + ], + "spans": [ + { + "bbox": [ + 116, + 574, + 506, + 609 + ], + "type": "text", + "content": "[43] Sarmad Bashir, Alessio Ferrari, Abbas Khan, Per Erik Strandberg, Zulqarnain Haider, Mehrdad Saadatmand, and Markus Bohlin. Requirements ambiguity detection and explanation with llms: An industrial study. July 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 116, + 613, + 506, + 636 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 613, + 506, + 636 + ], + "spans": [ + { + "bbox": [ + 116, + 613, + 506, + 636 + ], + "type": "text", + "content": "[44] Ali Behrouz, Peilin Zhong, and Vahab Mirrokni. Titans: Learning to memorize at test time. arXiv preprint arXiv:2501.00663, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 116, + 639, + 506, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 639, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 116, + 639, + 506, + 685 + ], + "type": "text", + "content": "[45] Yoshua Bengio, Michael Cohen, Damiano Fornasiere, Joumana Ghosn, Pietro Greiner, Matt MacDermott, Soren Mindermann, Adam Oberman, Jesse Richardson, Oliver Richardson, et al. Superintelligent agents pose catastrophic risks: Can scientist ai offer a safer path? arXiv preprint arXiv:2502.15657, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 116, + 689, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 689, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 116, + 689, + 506, + 723 + ], + "type": "text", + "content": "[46] Yoshua Bengio, Soren Mindermann, Daniel Privitera, Tamay Besiroglu, Rishi Bommasani, Stephen Casper, Yejin Choi, Philip Fox, Ben Garfinkel, Danielle Goldfarb, et al. International ai safety report. arXiv preprint arXiv:2501.17805, 2025." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "39" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 38 + }, + { + "para_blocks": [ + { + "bbox": [ + 116, + 72, + 507, + 721 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 116, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 116, + 72, + 505, + 106 + ], + "type": "text", + "content": "[47] Leonardo Bertolazzi, Philipp Mondorf, Barbara Plank, and Raffaella Bernardi. The validation gap: A mechanistic analysis of how language models compute arithmetic but fail to validate it. arXiv preprint arXiv:2502.11771, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 116, + 110, + 506, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 110, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 116, + 110, + 506, + 176 + ], + "type": "text", + "content": "[48] Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Michal Podstawski, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Hubert Niewiadomski, Piotr Nczyk, and Torsten Hoefler. Graph of thoughts: Solving elaborate problems with large language models. Proceedings of the AAAI Conference on Artificial Intelligence, 38(16):17682-17690, Mar. 2024. doi: 10.1609/aaai.v38i16.29720. URL https://ojs.aaai.org/index.php/AAAI/article/view/29720." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 116, + 178, + 506, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 178, + 506, + 214 + ], + "spans": [ + { + "bbox": [ + 116, + 178, + 506, + 214 + ], + "type": "text", + "content": "[49] Maciej Besta, Florim Memedi, Zhenyu Zhang, Robert Gerstenberger, Guangyuan Piao, Nils Blach, Piotr Nyczyk, Marcin Copik, Grzegorz Kwaśniewski, Jürgen Müller, et al. Demystifying chains, trees, and graphs of thoughts. arXiv preprint arXiv:2401.14295, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 117, + 217, + 506, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 217, + 506, + 251 + ], + "spans": [ + { + "bbox": [ + 117, + 217, + 506, + 251 + ], + "type": "text", + "content": "[50] Maciej Besta, Julia Barth, Eric Schreiber, Ales Kubicek, Afonso Catarino, Robert Gerstenberger, Piotr Nczyk, Patrick Iff, Yueling Li, Sam Houliston, et al. Reasoning language models: A blueprint. arXiv preprint arXiv:2501.11223, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 117, + 255, + 504, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 255, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 117, + 255, + 504, + 289 + ], + "type": "text", + "content": "[51] Jinhe Bi, Danqi Yan, Yifan Wang, Wenke Huang, Haokun Chen, Guancheng Wan, Mang Ye, Xun Xiao, Hinrich Schuetze, Volker Tresp, et al. Cot-kinetics: A theoretical modeling assessing lrm reasoning process. arXiv preprint arXiv:2505.13408, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 117, + 292, + 504, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 292, + 504, + 326 + ], + "spans": [ + { + "bbox": [ + 117, + 292, + 504, + 326 + ], + "type": "text", + "content": "[52] Xiao Bi, Deli Chen, Guanting Chen, Shanhuang Chen, Damai Dai, Chengqi Deng, Honghui Ding, Kai Dong, Qiushi Du, Zhe Fu, et al. Deepseek llm: Scaling open-source language models with longtermism. arXiv preprint arXiv:2401.02954, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 116, + 330, + 507, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 330, + 507, + 375 + ], + "spans": [ + { + "bbox": [ + 116, + 330, + 507, + 375 + ], + "type": "text", + "content": "[53] Zhen Bi, Ningyu Zhang, Yinuo Jiang, Shumin Deng, Guozhou Zheng, and Huajun Chen. When do program-of-thought works for reasoning? In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pages 17691-17699, 2024. URL https://ods.aaai.org/index.php/AAAI/article/view/29721/31237." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 116, + 378, + 504, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 378, + 504, + 401 + ], + "spans": [ + { + "bbox": [ + 116, + 378, + 504, + 401 + ], + "type": "text", + "content": "[54] Zhenni Bi, Kai Han, Chuanjian Liu, Yehui Tang, and Yunhe Wang. Forest-of-thought: Scaling test-time compute for enhancing lIm reasoning. arXiv preprint arXiv:2412.09078, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 116, + 404, + 506, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 404, + 506, + 437 + ], + "spans": [ + { + "bbox": [ + 116, + 404, + 506, + 437 + ], + "type": "text", + "content": "[55] Edoardo Botta, Yuchen Li, Aashay Mehta, Jordan T Ash, Cyril Zhang, and Andrej Risteski. On the query complexity of verifier-assisted language generation. arXiv preprint arXiv:2502.12123, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 117, + 441, + 506, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 441, + 506, + 487 + ], + "spans": [ + { + "bbox": [ + 117, + 441, + 506, + 487 + ], + "type": "text", + "content": "[56] David Brandfonbrener, Simon Henniger, Sibi Raja, Tarun Prasad, Chloe Loughridge, Federico Cassano, Sabrina Ruixin Hu, Jianang Yang, William E Byrd, Robert Zinkov, et al. Vermcts: Synthesizing multi-step programs using a verifier, a large language model, and tree search. arXiv preprint arXiv:2402.08147, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 117, + 490, + 506, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 490, + 506, + 524 + ], + "spans": [ + { + "bbox": [ + 117, + 490, + 506, + 524 + ], + "type": "text", + "content": "[57] Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv preprint arXiv:2407.21787, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 117, + 528, + 504, + 551 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 528, + 504, + 551 + ], + "spans": [ + { + "bbox": [ + 117, + 528, + 504, + 551 + ], + "type": "text", + "content": "[58] Dan Busbridge, Amitis Shidani, Floris Weers, Jason Ramapuram, Etai Littwin, and Russ Webb. Distillation scaling laws. arXiv preprint arXiv:2502.08606, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 117, + 554, + 506, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 554, + 506, + 578 + ], + "spans": [ + { + "bbox": [ + 117, + 554, + 506, + 578 + ], + "type": "text", + "content": "[59] Ji Young Byun, Young-Jin Park, Nvid Azizan, and Rama Chellappa. Test-time-scaling for zero-shot diagnosis with visual-language reasoning. arXiv preprint arXiv:2506.11166, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 117, + 581, + 506, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 581, + 506, + 658 + ], + "spans": [ + { + "bbox": [ + 117, + 581, + 506, + 658 + ], + "type": "text", + "content": "[60] Ju-Seung Byun, Jiyun Chun, Jihyung Kil, and Andrew Perrault. ARES: Alternating reinforcement learning and supervised fine-tuning for enhanced multi-modal chain-of-thought reasoning through diverse AI feedback. In Yaser Al-Onaizan, Mohit Bansal, and YunNung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 4410-4430, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.252. URL https://aclanthology.org/2024.emnlp-main.252/." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 117, + 662, + 504, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 662, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 117, + 662, + 504, + 685 + ], + "type": "text", + "content": "[61] Huanqia Cai, Yijun Yang, and Zhifeng Li. System-2 mathematical reasoning via enriched instruction tuning. arXiv preprint arXiv:2412.16964, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 117, + 689, + 506, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 689, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 117, + 689, + 506, + 721 + ], + "type": "text", + "content": "[62] Zheng Cai, Maosong Cao, Haojiong Chen, Kai Chen, Keyu Chen, Xin Chen, Xun Chen, Zehui Chen, Zhi Chen, Pei Chu, et al. Internl m2 technical report. arXiv preprint arXiv:2403.17297, 2024." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "40" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 39 + }, + { + "para_blocks": [ + { + "bbox": [ + 116, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 116, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 116, + 72, + 505, + 106 + ], + "type": "text", + "content": "[63] Erik Cambria, Lorenzo Malandri, Fabio Mercorio, Navid Nobani, and Andrea Seveso. Xai meets llms: A survey of the relation between explainable ai and large language models. arXiv preprint arXiv:2407.15248, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 116, + 108, + 506, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 108, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 116, + 108, + 506, + 176 + ], + "type": "text", + "content": "[64] Lang Cao. GraphReason: Enhancing reasoning capabilities of large language models through a graph-based verification approach. In Bhavana Dalvi Mishra, Greg Durrett, Peter Jansen, Ben Lipkin, Danilo Neves Ribeiro, Lionel Wong, Xi Ye, and Wenting Zhao, editors, Proceedings of the 2nd Workshop on Natural Language Reasoning and Structured Explanations (@ACL 2024), pages 1-12, Bangkok, Thailand, August 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.nlrse-1.1/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 116, + 178, + 506, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 178, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 116, + 178, + 506, + 212 + ], + "type": "text", + "content": "[65] Zhepeng Cen, Yihang Yao, William Han, Zuxin Liu, and Ding Zhao. Behavior injection: Preparing language models for reinforcement learning. arXiv preprint arXiv:2505.18917, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 117, + 216, + 504, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 216, + 504, + 250 + ], + "spans": [ + { + "bbox": [ + 117, + 216, + 504, + 250 + ], + "type": "text", + "content": "[66] Linzheng Chai, Jian Yang, Tao Sun, Hongcheng Guo, Jiaheng Liu, Bing Wang, Xiannian Liang, Jiaqi Bai, Tongliang Li, Qiyao Peng, et al. xcot: Cross-lingual instruction tuning for cross-lingual chain-of-thought reasoning. arXiv preprint arXiv:2401.07037, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 117, + 253, + 506, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 253, + 506, + 297 + ], + "spans": [ + { + "bbox": [ + 117, + 253, + 506, + 297 + ], + "type": "text", + "content": "[67] Jun Shern Chan, Neil Chowdhury, Oliver Jaffe, James Aung, Dane Sherburn, Evan Mays, Giulio Starace, Kevin Liu, Leon Maksin, Tejal Patwardhan, et al. Mle-bench: Evaluating machine learning agents on machine learning engineering. arXiv preprint arXiv:2410.07095, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 117, + 301, + 506, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 301, + 506, + 345 + ], + "spans": [ + { + "bbox": [ + 117, + 301, + 506, + 345 + ], + "type": "text", + "content": "[68] Hyeong Soo Chang. On the convergence rate of mcts for the optimal value estimation in markov decision processes. IEEE Transactions on Automatic Control, pages 1-6, February 2025. doi: 10.1109/TAC.2025.3538807. URL https://ieeexplore.ieee.org/document/10870057." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 117, + 349, + 504, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 349, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 117, + 349, + 504, + 384 + ], + "type": "text", + "content": "[69] Aili Chen, Aonian Li, Bangwei Gong, Binyang Jiang, Bo Fei, Bo Yang, Boji Shan, Changqing Yu, Chao Wang, Cheng Zhu, et al. Minimax-m1: Scaling test-time compute efficiently with lightning attention. arXiv preprint arXiv:2506.13585, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 117, + 386, + 506, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 386, + 506, + 420 + ], + "spans": [ + { + "bbox": [ + 117, + 386, + 506, + 420 + ], + "type": "text", + "content": "[70] Andong Chen, Yuchen Song, Wenxin Zhu, Kehai Chen, Muyun Yang, Tiejun Zhao, et al. Evaluating o1-like llms: Unlocking reasoning for translation through comprehensive analysis. arXiv preprint arXiv:2502.11544, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 117, + 422, + 506, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 422, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 117, + 422, + 506, + 456 + ], + "type": "text", + "content": "[71] Beiduo Chen, Yang Janet Liu, Anna Korhonen, and Barbara Plank. Threading the needle: Reweaving chain-of-thought reasoning to explain human label variation. arXiv preprint arXiv:2505.23368, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 117, + 460, + 504, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 460, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 117, + 460, + 504, + 495 + ], + "type": "text", + "content": "[72] Guizhen Chen, Weiwen Xu, Hao Zhang, Hou Pong Chan, Chaoqun Liu, Lidong Bing, Deli Zhao, Anh Tuan Luu, and Yu Rong. Finereason: Evaluating and improving llms' deliberate reasoning through reflective puzzle solving. arXiv preprint arXiv:2502.20238, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 117, + 497, + 506, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 497, + 506, + 564 + ], + "spans": [ + { + "bbox": [ + 117, + 497, + 506, + 564 + ], + "type": "text", + "content": "[73] Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Step-level value preference optimization for mathematical reasoning. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 7889-7903, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.463. URL https://aclanthology.org/2024_findings-emnlp.463/." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 117, + 567, + 504, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 567, + 504, + 611 + ], + "spans": [ + { + "bbox": [ + 117, + 567, + 504, + 611 + ], + "type": "text", + "content": "[74] Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Alphamath almost zero: Process supervision without process. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=VaXnxQ3UKo." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 117, + 614, + 506, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 614, + 506, + 658 + ], + "spans": [ + { + "bbox": [ + 117, + 614, + 506, + 658 + ], + "type": "text", + "content": "[75] Haibin Chen, Kangtao Lv, Chengwei Hu, Yanshi Li, Yujin Yuan, Yancheng He, Xingyao Zhang, Langming Liu, Shilei Liu, Wenbo Su, et al. Chineseecomqa: A scalable e-commerce concept evaluation benchmark for large language models. arXiv preprint arXiv:2502.20196, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 117, + 662, + 506, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 662, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 117, + 662, + 506, + 696 + ], + "type": "text", + "content": "[76] Hanjie Chen, Zhouxiang Fang, Yash Singla, and Mark Dredze. Benchmarking large language models on answering and explaining challenging medical questions. arXiv preprint arXiv:2402.18060, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 117, + 700, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 700, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 117, + 700, + 506, + 723 + ], + "type": "text", + "content": "[77] Haolin Chen, Yihao Feng, Zuxin Liu, Weiran Yao, Akshara Prabhakar, Shelby Heinecke, Ricky Ho, Phil Mui, Silvio Savarese, Caiming Xiong, et al. Language models are hid" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "41" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 40 + }, + { + "para_blocks": [ + { + "bbox": [ + 115, + 72, + 507, + 721 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 136, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 136, + 72, + 505, + 95 + ], + "type": "text", + "content": "den reasoners: Unlocking latent reasoning capabilities via self-rewarding. arXiv preprint arXiv:2411.04282, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 99, + 506, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 99, + 506, + 133 + ], + "spans": [ + { + "bbox": [ + 115, + 99, + 506, + 133 + ], + "type": "text", + "content": "[78] Hardy Chen, Haoqin Tu, Hui Liu, Xianfeng Tang, Xinya Du, Yuyin Zhou, and Cihang Xie. VI-thinking: An r1-derived visual instruction tuning dataset for thinkable lvlms. https://github.com/UCSC-VLAA/VL-Thinkinq, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 137, + 505, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 137, + 505, + 161 + ], + "spans": [ + { + "bbox": [ + 115, + 137, + 505, + 161 + ], + "type": "text", + "content": "[79] Jian Chen, Guohao Tang, Guofu Zhou, and Wu Zhu. Chatgpt and deepseek: Can they predict the stock market and macroeconomy? arXiv preprint arXiv:2502.10008, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 165, + 506, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 165, + 506, + 208 + ], + "spans": [ + { + "bbox": [ + 115, + 165, + 506, + 208 + ], + "type": "text", + "content": "[80] Jianhao Chen, Zishuo Xun, Bocheng Zhou, Han Qi, Qiaosheng Zhang, Yang Chen, Wei Hu, Yuzhong Qu, Wanli Ouyang, and Shuyue Hu. Do we truly need so many samples? multi-llm repeated sampling efficiently scale test-time compute. arXiv preprint arXiv:2504.00762, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 213, + 506, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 213, + 506, + 247 + ], + "spans": [ + { + "bbox": [ + 115, + 213, + 506, + 247 + ], + "type": "text", + "content": "[81] Jiefeng Chen, Jie Ren, Xinyun Chen, Chengrun Yang, Ruoxi Sun, and Sercan Ö Arik. Sets: Leveraging self-verification and self-correction for improved test-time scaling. arXiv preprint arXiv:2501.19306, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 251, + 504, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 251, + 504, + 286 + ], + "spans": [ + { + "bbox": [ + 115, + 251, + 504, + 286 + ], + "type": "text", + "content": "[82] Jierun Chen, Tiezheng Yu, Haoli Bai, Lewei Yao, Jiannan Wu, Kaican Li, Fei Mi, Chaofan Tao, Lei Zhu, Manyi Zhang, et al. The synergy dilemma of long-cot sft and rl: Investigating post-training techniques for reasoning vlms. arXiv preprint arXiv:2507.07562, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 115, + 290, + 504, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 290, + 504, + 324 + ], + "spans": [ + { + "bbox": [ + 115, + 290, + 504, + 324 + ], + "type": "text", + "content": "[83] Junying Chen, Zhenyang Cai, Ke Ji, Xidong Wang, Wanlong Liu, Rongsheng Wang, Jianye Hou, and Benyou Wang. Huatuogpt-o1, towards medical complex reasoning with llms. arXiv preprint arXiv:2412.18925, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 328, + 504, + 362 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 328, + 504, + 362 + ], + "spans": [ + { + "bbox": [ + 115, + 328, + 504, + 362 + ], + "type": "text", + "content": "[84] Justin Chih-Yao Chen, Archiki Prasad, Swarnadeep Saha, Elias Stengel-Eskin, and Mohit Bansal. Magicore: Multi-agent, iterative, coarse-to-fine refinement for reasoning. arXiv preprint arXiv:2409.12147, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 115, + 367, + 506, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 367, + 506, + 401 + ], + "spans": [ + { + "bbox": [ + 115, + 367, + 506, + 401 + ], + "type": "text", + "content": "[85] Kedi Chen, Zhikai Lei, Fan Zhang, Yinqi Zhang, Qin Chen, Jie Zhou, Liang He, Qipeng Guo, Kai Chen, and Wei Zhang. Code-driven inductive synthesis: Enhancing reasoning abilities of large language models with sequences. arXiv preprint arXiv:2503.13109, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 115, + 405, + 507, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 405, + 507, + 438 + ], + "spans": [ + { + "bbox": [ + 115, + 405, + 507, + 438 + ], + "type": "text", + "content": "[86] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025. Accessed: 2025-02-02." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 115, + 443, + 506, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 443, + 506, + 476 + ], + "spans": [ + { + "bbox": [ + 115, + 443, + 506, + 476 + ], + "type": "text", + "content": "[87] Michael K Chen, Xikun Zhang, and Dacheng Tao. Justlogic: A comprehensive benchmark for evaluating deductive reasoning in large language models. arXiv preprint arXiv:2501.14851, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 481, + 504, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 481, + 504, + 515 + ], + "spans": [ + { + "bbox": [ + 115, + 481, + 504, + 515 + ], + "type": "text", + "content": "[88] Mingyang Chen, Tianpeng Li, Haoze Sun, Yijie Zhou, Chenzheng Zhu, Fan Yang, Zenan Zhou, Weipeng Chen, Haofen Wang, Jeff Z Pan, et al. Learning to reason with search for llms via reinforcement learning. arXiv preprint arXiv:2503.19470, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 115, + 519, + 504, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 519, + 504, + 543 + ], + "spans": [ + { + "bbox": [ + 115, + 519, + 504, + 543 + ], + "type": "text", + "content": "[89] Nuo Chen, Zhiyuan Hu, Qingyun Zou, Jiaying Wu, Qian Wang, Bryan Hooi, and Bingsheng He. Judgerm: Large reasoning models as a judge. arXiv preprint arXiv:2504.00050, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 547, + 506, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 547, + 506, + 591 + ], + "spans": [ + { + "bbox": [ + 115, + 547, + 506, + 591 + ], + "type": "text", + "content": "[90] Qiguang Chen, Libo Qin, Jiaqi WANG, Jingxuan Zhou, and Wanxiang Che. Unlocking the capabilities of thought: A reasoning boundary framework to quantify and optimize chain-of-thought. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=pC44UMwy2v." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 115, + 596, + 506, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 596, + 506, + 673 + ], + "spans": [ + { + "bbox": [ + 115, + 596, + 506, + 673 + ], + "type": "text", + "content": "[91] Qiguang Chen, Libo Qin, Jin Zhang, Zhi Chen, Xiao Xu, and Wanxiang Che. " + }, + { + "bbox": [ + 115, + 596, + 506, + 673 + ], + "type": "inline_equation", + "content": "\\mathbf{M}^{3}\\mathrm{CoT}" + }, + { + "bbox": [ + 115, + 596, + 506, + 673 + ], + "type": "text", + "content": ": A novel benchmark for multi-domain multi-step multi-modal chain-of-thought. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 8199–8221, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.446. URL https://aclanthology.org/2024.acl-long.446/." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 115, + 677, + 506, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 677, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 115, + 677, + 506, + 721 + ], + "type": "text", + "content": "[92] Qiguang Chen, Libo Qin, Jinhao Liu, Yue Liao, Jiaqi Wang, Jingxuan Zhou, and Wanxiang Che. Rbf++: Quantifying and optimizing reasoning boundaries across measurable and unmeasurable capabilities for chain-of-thought reasoning. arXiv preprint arXiv:2505.13307, 2025." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "42" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 41 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 116, + 72, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 72, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 116, + 72, + 505, + 117 + ], + "type": "text", + "content": "[93] Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiaqi Wang, Mengkang Hu, Zhi Chen, Wanxiang Che, and Ting Liu. Ecm: A unified electronic circuit model for explaining the emergence of in-context learning and chain-of-thought in large language model. arXiv preprint arXiv:2502.03325, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 116, + 119, + 504, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 119, + 504, + 153 + ], + "spans": [ + { + "bbox": [ + 116, + 119, + 504, + 153 + ], + "type": "text", + "content": "[94] Qiguang Chen, Mingda Yang, Libo Qin, Jinhao Liu, Zheng Yan, Jiannan Guan, Dengyun Peng, Yiyan Ji, Hanjing Li, Mengkang Hu, et al. Ai4research: A survey of artificial intelligence for scientific research. arXiv preprint arXiv:2507.01903, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 116, + 156, + 505, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 156, + 505, + 190 + ], + "spans": [ + { + "bbox": [ + 116, + 156, + 505, + 190 + ], + "type": "text", + "content": "[95] Qiqi Chen, Xinpeng Wang, Philipp Mondorf, Michael A Hedderich, and Barbara Plank. Understanding when tree of thoughts succeeds: Larger models excel in generation, not discrimination. arXiv preprint arXiv:2410.17820, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 116, + 193, + 506, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 193, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 116, + 193, + 506, + 228 + ], + "type": "text", + "content": "[96] Shiqi Chen, Jinghan Zhang, Tongyao Zhu, Wei Liu, Siyang Gao, Miao Xiong, Manling Li, and Junxian He. Bring reason to vision: Understanding perception and reasoning through model merging. arXiv preprint arXiv:2505.05464, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 116, + 230, + 504, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 230, + 504, + 264 + ], + "spans": [ + { + "bbox": [ + 116, + 230, + 504, + 264 + ], + "type": "text", + "content": "[97] Shuang Chen, Yue Guo, Zhaochen Su, Yafu Li, Yulun Wu, Jiacheng Chen, Jiayu Chen, Weijie Wang, Xiaoye Qu, and Yu Cheng. Advancing multimodal reasoning: From optimized cold start to staged reinforcement learning. arXiv preprint arXiv:2506.04207, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 116, + 266, + 506, + 333 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 266, + 506, + 333 + ], + "spans": [ + { + "bbox": [ + 116, + 266, + 506, + 333 + ], + "type": "text", + "content": "[98] Sijia Chen and Baochun Li. Toward adaptive reasoning in large language models with thought rollback. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 7033-7056. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/chen24y.html." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 116, + 335, + 504, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 335, + 504, + 358 + ], + "spans": [ + { + "bbox": [ + 116, + 335, + 504, + 358 + ], + "type": "text", + "content": "[99] Weizhe Chen, Sven Koenig, and Bistra Dilkina. Iterative deepening sampling for large language models. arXiv preprint arXiv:2502.05449, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 361, + 505, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 361, + 505, + 407 + ], + "spans": [ + { + "bbox": [ + 111, + 361, + 505, + 407 + ], + "type": "text", + "content": "[100] Wenhu Chen, Xueguang Ma, Xinyi Wang, and William W. Cohen. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks. Transactions on Machine Learning Research, November 2023. ISSN 2835-8856. URL https://openreview.net/forum?id=YfZ4ZPt8zd." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 409, + 504, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 409, + 504, + 443 + ], + "spans": [ + { + "bbox": [ + 111, + 409, + 504, + 443 + ], + "type": "text", + "content": "[101] Wenxiang Chen, Wei He, Zhiheng Xi, Honglin Guo, Boyang Hong, Jiazheng Zhang, Rui Zheng, Nijun Li, Tao Gui, Yun Li, et al. Better process supervision with bi-directional rewarding signals. arXiv preprint arXiv:2503.04618, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 446, + 504, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 446, + 504, + 480 + ], + "spans": [ + { + "bbox": [ + 111, + 446, + 504, + 480 + ], + "type": "text", + "content": "[102] Xinghao Chen, Zhijing Sun, Wenjin Guo, Miaoran Zhang, Yanjun Chen, Yirong Sun, Hui Su, Yijie Pan, Dietrich Klakow, Wenjie Li, et al. Unveiling the key factors for distilling chain-of-thought reasoning. arXiv preprint arXiv:2502.18001, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 483, + 504, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 483, + 504, + 517 + ], + "spans": [ + { + "bbox": [ + 111, + 483, + 504, + 517 + ], + "type": "text", + "content": "[103] Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for " + }, + { + "bbox": [ + 111, + 483, + 504, + 517 + ], + "type": "inline_equation", + "content": "2 + 3 = ?" + }, + { + "bbox": [ + 111, + 483, + 504, + 517 + ], + "type": "text", + "content": " on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 519, + 504, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 519, + 504, + 553 + ], + "spans": [ + { + "bbox": [ + 111, + 519, + 504, + 553 + ], + "type": "text", + "content": "[104] Xinyun Chen, Maxwell Lin, Nathanael Scharli, and Denny Zhou. Teaching large language models to self-debug. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=KuPixIqPiq." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 556, + 506, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 556, + 506, + 612 + ], + "spans": [ + { + "bbox": [ + 111, + 556, + 506, + 612 + ], + "type": "text", + "content": "[105] Yanda Chen, Joe Benton, Ansh Radhakrishnan, Jonathan Uesato Carson Denison, John Schulman, Arushi Somani, Peter Hase, Misha Wagner Fabien Roger Vlad Mikulik, Sam Bowman, Jan Leike Jared Kaplan, et al. Reasoning models don't always say what they think. April 2025. URL https://www.anthropic.com/research/reasoning-models-dont-say-think." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 614, + 504, + 648 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 614, + 504, + 648 + ], + "spans": [ + { + "bbox": [ + 111, + 614, + 504, + 648 + ], + "type": "text", + "content": "[106] Yanxi Chen, Xuchen Pan, Yaliang Li, Bolin Ding, and Jingren Zhou. A simple and provable scaling law for the test-time compute of large language models. arXiv preprint arXiv:2411.19477, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 651, + 506, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 651, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 651, + 506, + 685 + ], + "type": "text", + "content": "[107] Yezeng Chen, Zui Chen, and Yi Zhou. Brain-inspired two-stage approach: Enhancing mathematical reasoning by imitating human thought processes. arXiv preprint arXiv:2403.00800, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 688, + 506, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 688, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 111, + 688, + 506, + 721 + ], + "type": "text", + "content": "[108] Yihang Chen, Haikang Deng, Kaiqiao Han, and Qingyue Zhao. Policy frameworks for transparent chain-of-thought reasoning in large language models. arXiv preprint arXiv:2503.14521, 2025." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "43" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 42 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 506, + 116 + ], + "type": "text", + "content": "[109] Yilong Chen, Junyuan Shang, Zhenyu Zhang, Yanxi Xie, Jiawei Sheng, Tingwen Liu, Shuo-huan Wang, Yu Sun, Hua Wu, and Haifeng Wang. Inner thinking transformer: Leveraging dynamic depth scaling to foster adaptive internal thinking. arXiv preprint arXiv:2502.13842, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 120, + 506, + 164 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 120, + 506, + 164 + ], + "spans": [ + { + "bbox": [ + 111, + 120, + 506, + 164 + ], + "type": "text", + "content": "[110] Zhenfang Chen, Delin Chen, Rui Sun, Wenjun Liu, and Chuang Gan. Scaling autonomous agents via automatic reward modeling and planning. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=womU9cEwcO." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 168, + 506, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 168, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 111, + 168, + 506, + 212 + ], + "type": "text", + "content": "[111] Zhi Chen, Qiguang Chen, Libo Qin, Qipeng Guo, Haijun Lv, Yicheng Zou, Wanxiang Che, Hang Yan, Kai Chen, and Dahua Lin. What are the essential factors in crafting effective long context multi-hop instruction datasets? insights and best practices. arXiv preprint arXiv:2409.01893, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 216, + 504, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 216, + 504, + 250 + ], + "spans": [ + { + "bbox": [ + 111, + 216, + 504, + 250 + ], + "type": "text", + "content": "[112] Zihan Chen, Song Wang, Zhen Tan, Xingbo Fu, Zhenyu Lei, Peng Wang, Huan Liu, Cong Shen, and Jundong Li. A survey of scaling in large language model reasoning. arXiv preprint arXiv:2504.02181, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 253, + 506, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 253, + 506, + 319 + ], + "spans": [ + { + "bbox": [ + 111, + 253, + 506, + 319 + ], + "type": "text", + "content": "[113] Ziru Chen, Michael White, Ray Mooney, Ali Payani, Yu Su, and Huan Sun. When is tree search useful for LLM planning? it depends on the discriminator. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 13659–13678, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.738. URL https://aclanthology.org/2024.acl-long.738/." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 323, + 506, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 323, + 506, + 389 + ], + "spans": [ + { + "bbox": [ + 111, + 323, + 506, + 389 + ], + "type": "text", + "content": "[114] Zixiang Chen, Yihe Deng, Huizhuo Yuan, Kaixuan Ji, and Quanquan Gu. Self-play fine-tuning converts weak language models to strong language models. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 6621-6642. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/chen24j.html." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 392, + 504, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 392, + 504, + 426 + ], + "spans": [ + { + "bbox": [ + 111, + 392, + 504, + 426 + ], + "type": "text", + "content": "[115] Zui Chen, Tianqiao Liu, Mi Tian, Qing Tong, Weiqi Luo, and Zitao Liu. Advancing math reasoning in language models: The impact of problem-solving data, data synthesis methods, and training stages. arXiv preprint arXiv:2501.14002, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 430, + 504, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 430, + 504, + 463 + ], + "spans": [ + { + "bbox": [ + 111, + 430, + 504, + 463 + ], + "type": "text", + "content": "[116] Daixuan Cheng, Shaohan Huang, Xuekai Zhu, Bo Dai, Wayne Xin Zhao, Zhenliang Zhang, and Furu Wei. Reasoning with exploration: An entropy perspective. arXiv preprint arXiv:2506.14758, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 466, + 504, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 466, + 504, + 500 + ], + "spans": [ + { + "bbox": [ + 111, + 466, + 504, + 500 + ], + "type": "text", + "content": "[117] Jiahao Cheng, Tiancheng Su, Jia Yuan, Guoxiu He, Jiawei Liu, Xinqi Tao, Jingwen Xie, and Huaxia Li. Chain-of-thought prompting obscures hallucination cues in large language models: An empirical evaluation. arXiv preprint arXiv:2506.17088, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 503, + 504, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 503, + 504, + 547 + ], + "spans": [ + { + "bbox": [ + 111, + 503, + 504, + 547 + ], + "type": "text", + "content": "[118] Jiale Cheng, Xiao Liu, Cunxiang Wang, Xiaotao Gu, Yida Lu, Dan Zhang, Yuxiao Dong, Jie Tang, Hongning Wang, and Minlie Huang. Spar: Self-play with tree-search refinement to improve instruction-following in large language models. arXiv preprint arXiv:2412.11605, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 552, + 504, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 552, + 504, + 585 + ], + "spans": [ + { + "bbox": [ + 111, + 552, + 504, + 585 + ], + "type": "text", + "content": "[119] Junhang Cheng, Fang Liu, Chengru Wu, and Li Zhang. Adaptivellm: A framework for selecting optimal cost-efficient llm for code-generation based on cot length. arXiv preprint arXiv:2506.10525, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 588, + 506, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 588, + 506, + 621 + ], + "spans": [ + { + "bbox": [ + 111, + 588, + 506, + 621 + ], + "type": "text", + "content": "[120] Kanzhi Cheng, Yantao Li, Fangzhi Xu, Jianbing Zhang, Hao Zhou, and Yang Liu. Vision-language models can self-improve reasoning via reflection. arXiv preprint arXiv:2411.00855, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 625, + 504, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 625, + 504, + 658 + ], + "spans": [ + { + "bbox": [ + 111, + 625, + 504, + 658 + ], + "type": "text", + "content": "[121] Xiaoxue Cheng, Junyi Li, Wayne Xin Zhao, and Ji-Rong Wen. Think more, hallucinate less: Mitigating hallucinations via dual process of fast and slow thinking. arXiv preprint arXiv:2501.01306, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 662, + 504, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 662, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 662, + 504, + 685 + ], + "type": "text", + "content": "[122] Zhengxiang Cheng, Dongping Chen, Mingyang Fu, and Tianyi Zhou. Optimizing length compression in large reasoning models. arXiv preprint arXiv:2506.14755, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 689, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 504, + 723 + ], + "type": "text", + "content": "[123] Zhoujun Cheng, Haoyu Dong, Zhiruo Wang, Ran Jia, Jiaqi Guo, Yan Gao, Shi Han, JianGuang Lou, and Dongmei Zhang. Hitab: A hierarchical table dataset for question answering and natural language generation. arXiv preprint arXiv:2108.06712, 2021." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "44" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 43 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[124] Zhoujun Cheng, Shibo Hao, Tianyang Liu, Fan Zhou, Yutao Xie, Feng Yao, Yuexin Bian, Yonghao Zhuang, Nilabjo Dey, Yuheng Zha, et al. Revisiting reinforcement learning for llm reasoning from a cross-domain perspective. arXiv preprint arXiv:2506.14965, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 108, + 504, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 108, + 504, + 144 + ], + "spans": [ + { + "bbox": [ + 111, + 108, + 504, + 144 + ], + "type": "text", + "content": "[125] Zihui Cheng, Qiguang Chen, Jin Zhang, Hao Fei, Xiaocheng Feng, Wanxiang Che, Min Li, and Libo Qin. Comt: A novel benchmark for chain of multi-modal thought on large vision-language models. arXiv preprint arXiv:2412.12932, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 145, + 506, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 145, + 506, + 179 + ], + "spans": [ + { + "bbox": [ + 111, + 145, + 506, + 179 + ], + "type": "text", + "content": "[126] Zihui Cheng, Qiguang Chen, Xiao Xu, Jiaqi Wang, Weiyun Wang, Hao Fei, Yidong Wang, Alex Jinpeng Wang, Zhi Chen, Wanxiang Che, et al. Visual thoughts: A unified perspective of understanding multimodal chain-of-thought. arXiv preprint arXiv:2505.15510, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 182, + 504, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 182, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 111, + 182, + 504, + 205 + ], + "type": "text", + "content": "[127] Ethan Chern, Zhulin Hu, Steffi Chern, Siqi Kou, Jiadi Su, Yan Ma, Zhijie Deng, and Pengfei Liu. Thinking with generated images. arXiv preprint arXiv:2505.22525, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 207, + 506, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 207, + 506, + 274 + ], + "spans": [ + { + "bbox": [ + 111, + 207, + 506, + 274 + ], + "type": "text", + "content": "[128] Yew Ken Chia, Vernon Toh, Deepanway Ghosal, Lidong Bing, and Soujanya Poria. PuzzleVQA: Diagnosing multimodal reasoning challenges of language models with abstract visual patterns. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 16259–16273, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.962. URL https://aclanthology.org/2024-findings-acl.962/." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 277, + 506, + 310 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 277, + 506, + 310 + ], + "spans": [ + { + "bbox": [ + 111, + 277, + 506, + 310 + ], + "type": "text", + "content": "[129] Daiki Chijiwa, Taku Hasegawa, Kyosuke Nishida, Kuniko Saito, and Susumu Takeuchi. Portable reward tuning: Towards reusable fine-tuning across different pretrained models. arXiv preprint arXiv:2502.12776, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 312, + 506, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 312, + 506, + 357 + ], + "spans": [ + { + "bbox": [ + 111, + 312, + 506, + 357 + ], + "type": "text", + "content": "[130] Daewon Choi, Jimin Lee, Jihoon Tack, Woomin Song, Saket Dingliwal, Sai Muralidhar Jayanthi, Bhavana Ganesh, Jinwoo Shin, Aram Galstyan, and Sravan Babu Bodapati. Think clearly: Improving reasoning via redundant token pruning. arXiv preprint arXiv:2507.08806, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 360, + 502, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 360, + 502, + 373 + ], + "spans": [ + { + "bbox": [ + 111, + 360, + 502, + 373 + ], + "type": "text", + "content": "[131] François Chollet. On the measure of intelligence. arXiv preprint arXiv:1911.01547, 2019." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 375, + 506, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 375, + 506, + 398 + ], + "spans": [ + { + "bbox": [ + 111, + 375, + 506, + 398 + ], + "type": "text", + "content": "[132] Francois Chollet, Mike Knoop, Gregory Kamradt, and Bryan Landers. Arc prize 2024: Technical report. arXiv preprint arXiv:2412.04604, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 400, + 506, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 400, + 506, + 434 + ], + "spans": [ + { + "bbox": [ + 111, + 400, + 506, + 434 + ], + "type": "text", + "content": "[133] Francois Chollet, Mike Knoop, Gregory Kamradt, Bryan Landers, and Henry Pinkard. Arcagi-2: A new challenge for frontier ai reasoning systems. arXiv preprint arXiv:2505.11831, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 437, + 504, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 437, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 111, + 437, + 504, + 460 + ], + "type": "text", + "content": "[134] Sanjiban Choudhury. Process reward models for llm agents: Practical framework and directions. arXiv preprint arXiv:2502.10325, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 462, + 506, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 462, + 506, + 485 + ], + "spans": [ + { + "bbox": [ + 111, + 462, + 506, + 485 + ], + "type": "text", + "content": "[135] Jishnu Ray Chowdhury and Cornelia Caragea. Zero-shot verification-guided chain of thoughts. arXiv preprint arXiv:2501.13122, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 488, + 506, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 488, + 506, + 521 + ], + "spans": [ + { + "bbox": [ + 111, + 488, + 506, + 521 + ], + "type": "text", + "content": "[136] Konstantina Christakopoulou, Shibl Mourad, and Maja Mataric. Agents thinking fast and slow: A talker-reasoner architecture. In NeurIPS 2024 Workshop on Open-World Agents, October 2024. URL https://openreview.net/forum?id=xPhcP6rbI4." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 525, + 506, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 525, + 506, + 559 + ], + "spans": [ + { + "bbox": [ + 111, + 525, + 506, + 559 + ], + "type": "text", + "content": "[137] Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V Le, Sergey Levine, and Yi Ma. Sft memorizes, rl generalizes: A comparative study of foundation model post-training. arXiv preprint arXiv:2501.17161, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 561, + 506, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 561, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 111, + 561, + 506, + 639 + ], + "type": "text", + "content": "[138] Zheng Chu, Jingchang Chen, Qianglong Chen, Weijiang Yu, Tao He, Haotian Wang, Weihua Peng, Ming Liu, Bing Qin, and Ting Liu. Navigate through enigmatic labyrinth a survey of chain of thought reasoning: Advances, frontiers and future. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1173–1203, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.65. URL https://aclanthology.org/2024.acl-long.65/." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 641, + 506, + 675 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 641, + 506, + 675 + ], + "spans": [ + { + "bbox": [ + 111, + 641, + 506, + 675 + ], + "type": "text", + "content": "[139] Jennifer Chu-Carroll, Andrew Beck, Greg Burnham, David OS Melville, David Nachman, A Erdem Özcan, and David Ferrucci. Beyond llms: Advancing the landscape of complex reasoning. arXiv preprint arXiv:2402.08064, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 677, + 506, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 677, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 111, + 677, + 506, + 721 + ], + "type": "text", + "content": "[140] Daniel JH Chung, Zhiqi Gao, Yurii Kvasiuk, Tianyi Li, Moritz Munchmeyer, Maja Rudolph, Frederic Sala, and Sai Chaitanya Tadepalli. Theoretical physics benchmark (tpbench)—a dataset and study of ai reasoning capabilities in theoretical physics. arXiv preprint arXiv:2502.15815, 2025." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "45" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 44 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[141] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 110, + 506, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 110, + 506, + 152 + ], + "spans": [ + { + "bbox": [ + 111, + 110, + 506, + 152 + ], + "type": "text", + "content": "[142] Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, et al. The danger of overthinking: Examining the reasoning-action dilemma in agentic tasks. arXiv preprint arXiv:2502.08235, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 157, + 506, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 157, + 506, + 191 + ], + "spans": [ + { + "bbox": [ + 111, + 157, + 506, + 191 + ], + "type": "text", + "content": "[143] Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, et al. Process reinforcement through implicit rewards. arXiv preprint arXiv:2502.01456, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 194, + 504, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 194, + 504, + 228 + ], + "spans": [ + { + "bbox": [ + 111, + 194, + 504, + 228 + ], + "type": "text", + "content": "[144] Ganqu Cui, Yuchen Zhang, Jiacheng Chen, Lifan Yuan, Zhi Wang, Yuxin Zuo, Haozhan Li, Yuchen Fan, Huayu Chen, Weize Chen, et al. The entropy mechanism of reinforcement learning for reasoning language models. arXiv preprint arXiv:2505.22617, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 232, + 506, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 232, + 506, + 275 + ], + "spans": [ + { + "bbox": [ + 111, + 232, + 506, + 275 + ], + "type": "text", + "content": "[145] Yingqian Cui, Pengfei He, Jingying Zeng, Hui Liu, Xianfeng Tang, Zhenwei Dai, Yan Han, Chen Luo, Jing Huang, Zhen Li, et al. Stepwise perplexity-guided refinement for efficient chain-of-thought reasoning in large language models. arXiv preprint arXiv:2502.13260, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 280, + 504, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 280, + 504, + 303 + ], + "spans": [ + { + "bbox": [ + 111, + 280, + 504, + 303 + ], + "type": "text", + "content": "[146] Yu Cui and Cong Zuo. Practical reasoning interruption attacks on reasoning large language models. arXiv preprint arXiv:2505.06643, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 306, + 504, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 306, + 504, + 339 + ], + "spans": [ + { + "bbox": [ + 111, + 306, + 504, + 339 + ], + "type": "text", + "content": "[147] Yu Cui, Bryan Hooi, Yujun Cai, and Yiwei Wang. Process or result? manipulated ending tokens can mislead reasoning lms to ignore the correct reasoning steps. arXiv preprint arXiv:2503.19326, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 343, + 504, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 343, + 504, + 376 + ], + "spans": [ + { + "bbox": [ + 111, + 343, + 504, + 376 + ], + "type": "text", + "content": "[148] Jianbo Dai, Jianqiao Lu, Yunlong Feng, Dong Huang, Guangtao Zeng, Rongju Ruan, Ming Cheng, Haochen Tan, and Zhijiang Guo. Mhpp: Exploring the capabilities and limitations of language models beyond basic code generation. arXiv preprint arXiv:2405.11430, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 380, + 506, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 380, + 506, + 414 + ], + "spans": [ + { + "bbox": [ + 111, + 380, + 506, + 414 + ], + "type": "text", + "content": "[149] Jisheng Dang, Jingze Wu, Teng Wang, Xuanhui Lin, Nannan Zhu, Hongbo Chen, Wei-Shi Zheng, Meng Wang, and Tat-Seng Chua. Reinforcing video reasoning with focused thinking. arXiv preprint arXiv:2505.24718, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 418, + 504, + 440 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 418, + 504, + 440 + ], + "spans": [ + { + "bbox": [ + 111, + 418, + 504, + 440 + ], + "type": "text", + "content": "[150] Quy-Anh Dang and Chris Ngo. Reinforcement learning for reasoning in small llms: What works and what doesn't. arXiv preprint arXiv:2503.16219, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 444, + 504, + 467 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 444, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 111, + 444, + 504, + 467 + ], + "type": "text", + "content": "[151] Yuntian Deng, Yejin Choi, and Stuart Shieber. From explicit cot to implicit cot: Learning to internalize cot step by step. arXiv preprint arXiv:2405.14838, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 470, + 506, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 470, + 506, + 514 + ], + "spans": [ + { + "bbox": [ + 111, + 470, + 506, + 514 + ], + "type": "text", + "content": "[152] Lauro Langosco Di Langosco, Jack Koch, Lee D Sharkey, Jacob Pfau, and David Krueger. Goal misgeneralization in deep reinforcement learning. In International Conference on Machine Learning, pages 12004-12019. PMLR, October 2022. URL https://proceedings.mlr.press/v162/langosco22a/langosco22a.pdf." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 518, + 506, + 551 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 518, + 506, + 551 + ], + "spans": [ + { + "bbox": [ + 111, + 518, + 506, + 551 + ], + "type": "text", + "content": "[153] Bowen Ding, Yuhan Chen, Futing Wang, Lingfeng Ming, and Tao Lin. Do thinking tokens help or trap? towards more efficient large reasoning model. arXiv preprint arXiv:2506.23840, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 555, + 504, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 555, + 504, + 589 + ], + "spans": [ + { + "bbox": [ + 111, + 555, + 504, + 589 + ], + "type": "text", + "content": "[154] Yifu Ding, Wentao Jiang, Shunyu Liu, Yongcheng Jing, Jinyang Guo, Yingjie Wang, Jing Zhang, Zengmao Wang, Ziwei Liu, Bo Du, et al. Dynamic parallel tree search for efficient llm reasoning. arXiv preprint arXiv:2502.16235, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 592, + 506, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 592, + 506, + 647 + ], + "spans": [ + { + "bbox": [ + 111, + 592, + 506, + 647 + ], + "type": "text", + "content": "[155] Hanze Dong, Wei Xiong, Deepanshu Goyal, Yihan Zhang, Winnie Chow, Rui Pan, Shizhe Diao, Jipeng Zhang, KaShun SHUM, and Tong Zhang. RAFT: Reward ranked finetuning for generative foundation model alignment. Transactions on Machine Learning Research, November 2023. ISSN 2835-8856. URL https://openreview.net/forum?id=m7p507zb1Y." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 651, + 504, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 651, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 651, + 504, + 685 + ], + "type": "text", + "content": "[156] Hanze Dong, Wei Xiong, Bo Pang, Haoxiang Wang, Han Zhao, Yingbo Zhou, Nan Jiang, Doyen Sahoo, Caiming Xiong, and Tong Zhang. Rlhf workflow: From reward modeling to online rlhf. arXiv preprint arXiv:2405.07863, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 689, + 504, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 504, + 721 + ], + "type": "text", + "content": "[157] Junnan Dong, Zijin Hong, Yuanchen Bei, Feiran Huang, Xinrun Wang, and Xiao Huang. Clr-bench: Evaluating large language models in college-level reasoning. arXiv preprint arXiv:2410.17558, 2024." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "46" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 45 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 722 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "type": "text", + "content": "[158] Kefan Dong and Tengyu Ma. Beyond limited data: Self-play ltm theorem provers with iterative conjecturing and proving. arXiv preprint arXiv:2502.00212, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 99, + 505, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 99, + 505, + 133 + ], + "spans": [ + { + "bbox": [ + 111, + 99, + 505, + 133 + ], + "type": "text", + "content": "[159] Yuhao Dong, Zuyan Liu, Hai-Long Sun, Jingkang Yang, Winston Hu, Yongming Rao, and Ziwei Liu. Insight-v: Exploring long-chain visual reasoning with multimodal large language models. arXiv preprint arXiv:2411.14432, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 137, + 504, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 137, + 504, + 159 + ], + "spans": [ + { + "bbox": [ + 111, + 137, + 504, + 159 + ], + "type": "text", + "content": "[160] Zhichen Dong, Zhanhui Zhou, Zhixuan Liu, Chao Yang, and Chaochao Lu. Emergent response planning in lIm. arXiv preprint arXiv:2502.06258, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 163, + 505, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 163, + 505, + 196 + ], + "spans": [ + { + "bbox": [ + 111, + 163, + 505, + 196 + ], + "type": "text", + "content": "[161] Shihan Dou, Yan Liu, Haoxiang Jia, Limao Xiong, Enyu Zhou, Wei Shen, Junjie Shan, Caishuang Huang, Xiao Wang, Xiaoran Fan, et al. Stepcoder: Improve code generation with reinforcement learning from compiler feedback. arXiv preprint arXiv:2402.01391, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 201, + 506, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 201, + 506, + 235 + ], + "spans": [ + { + "bbox": [ + 111, + 201, + 506, + 235 + ], + "type": "text", + "content": "[162] Iddo Drori, Gaston Longhitano, Mao Mao, Seunghwan Hyun, Yuke Zhang, Sungjun Park, Zachary Meeks, Xin-Yu Zhang, Ben Segev, Howard Yong, et al. Diverse inference and verification for advanced reasoning. arXiv preprint arXiv:2502.09955, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 238, + 506, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 238, + 506, + 281 + ], + "spans": [ + { + "bbox": [ + 111, + 238, + 506, + 281 + ], + "type": "text", + "content": "[163] Kounianhua Du, Hanjing Wang, Jianxing Liu, Jizheng Chen, Xinyi Dai, Yasheng Wang, Ruiming Tang, Yong Yu, Jun Wang, and Weinan Zhang. Boost, disentangle, and customize: A robust system2-to-system1 pipeline for code generation. arXiv preprint arXiv:2502.12492, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 287, + 504, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 287, + 504, + 309 + ], + "spans": [ + { + "bbox": [ + 111, + 287, + 504, + 309 + ], + "type": "text", + "content": "[164] Weihua Du, Yiming Yang, and Sean Welleck. Optimizing temperature for language models with multi-sample inference. arXiv preprint arXiv:2502.05234, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 313, + 505, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 313, + 505, + 346 + ], + "spans": [ + { + "bbox": [ + 111, + 313, + 505, + 346 + ], + "type": "text", + "content": "[165] Xinrun Du, Yifan Yao, Kaijing Ma, Bingli Wang, Tianyu Zheng, Kang Zhu, Minghao Liu, Yiming Liang, Xiaolong Jin, Zhenlin Wei, et al. Supergpqa: Scaling llm evaluation across 285 graduate disciplines. arXiv preprint arXiv:2502.14739, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 351, + 504, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 351, + 504, + 385 + ], + "spans": [ + { + "bbox": [ + 111, + 351, + 504, + 385 + ], + "type": "text", + "content": "[166] Yifan Du, Zikang Liu, Yifan Li, Wayne Xin Zhao, Yuqi Huo, Bingning Wang, Weipeng Chen, Zheng Liu, Zhongyuan Wang, and Ji-Rong Wen. Virgo: A preliminary exploration on reproducing o1-like mllm. arXiv preprint arXiv:2501.01904, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 388, + 505, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 388, + 505, + 422 + ], + "spans": [ + { + "bbox": [ + 111, + 388, + 505, + 422 + ], + "type": "text", + "content": "[167] Keyu Duan, Zichen Liu, Xin Mao, Tianyu Pang, Changyu Chen, Qiguang Chen, Michael Qizhe Shieh, and Longxu Dou. Efficient process reward model training via active learning. arXiv preprint arXiv:2504.10559, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 426, + 506, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 426, + 506, + 460 + ], + "spans": [ + { + "bbox": [ + 111, + 426, + 506, + 460 + ], + "type": "text", + "content": "[168] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 464, + 506, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 464, + 506, + 508 + ], + "spans": [ + { + "bbox": [ + 111, + 464, + 506, + 508 + ], + "type": "text", + "content": "[169] Subhabrata Dutta, Joykirat Singh, Soumen Chakrabarti, and Tanmoy Chakraborty. How to think step-by-step: A mechanistic understanding of chain-of-thought reasoning. Transactions on Machine Learning Research, July 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=uHLDkQVtyC." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 512, + 504, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 512, + 504, + 546 + ], + "spans": [ + { + "bbox": [ + 111, + 512, + 504, + 546 + ], + "type": "text", + "content": "[170] Ahmed El-Kishky, Alexander Wei, Andre Saraiva, Borys Minaev, Daniel Selsam, David Dohan, Francis Song, Hunter Lightman, Ignasi Clavera, Jakub Pachocki, et al. Competitive programming with large reasoning models. arXiv preprint arXiv:2502.06807, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 549, + 506, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 549, + 506, + 572 + ], + "spans": [ + { + "bbox": [ + 111, + 549, + 506, + 572 + ], + "type": "text", + "content": "[171] Kawin Ethayarajh, Winnie Xu, Niklas Muennighoff, Dan Jurafsky, and Douwe Kiela. Kto: Model alignment as prospect theoretic optimization. arXiv preprint arXiv:2402.01306, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 576, + 506, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 576, + 506, + 609 + ], + "spans": [ + { + "bbox": [ + 111, + 576, + 506, + 609 + ], + "type": "text", + "content": "[172] Chongyu Fan, Yihua Zhang, Jinghan Jia, Alfred Hero, and Sijia Liu. Cyclicreflex: Improving large reasoning models via cyclical reflection token scheduling. arXiv preprint arXiv:2506.11077, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 614, + 506, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 614, + 506, + 646 + ], + "spans": [ + { + "bbox": [ + 111, + 614, + 506, + 646 + ], + "type": "text", + "content": "[173] Siqi Fan, Peng Han, Shuo Shang, Yequan Wang, and Aixin Sun. Cothink: Token-efficient reasoning via instruct models guiding reasoning models. arXiv preprint arXiv:2505.22017, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 651, + 504, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 651, + 504, + 684 + ], + "spans": [ + { + "bbox": [ + 111, + 651, + 504, + 684 + ], + "type": "text", + "content": "[174] Tiantian Fan, Lingjun Liu, Yu Yue, Jiaze Chen, Chengyi Wang, Qiying Yu, Chi Zhang, Zhiqi Lin, Ruofei Zhu, Yufeng Yuan, et al. Truncated proximal policy optimization. arXiv preprint arXiv:2506.15050, 2025." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 111, + 689, + 506, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 506, + 722 + ], + "type": "text", + "content": "[175] Yi Fang, Wenjie Wang, Yang Zhang, Fengbin Zhu, Qifan Wang, Fuli Feng, and Xiangnan He. Large language models for recommendation with deliberative user preference alignment. arXiv preprint arXiv:2502.02061, 2025." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "47" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 46 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[176] Wu Fei, Hao Kong, Shuxian Liang, Yang Lin, Yibo Yang, Jing Tang, Lei Chen, and Xiansheng Hua. Self-guided process reward optimization with masked step advantage for process reinforcement learning. arXiv preprint arXiv:2507.01551, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 108, + 506, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 108, + 506, + 153 + ], + "spans": [ + { + "bbox": [ + 111, + 108, + 506, + 153 + ], + "type": "text", + "content": "[177] Guhao Feng, Bohang Zhang, Yuntian Gu, Haotian Ye, Di He, and Liwei Wang. Towards revealing the mystery behind chain of thought: A theoretical perspective. In Thirty-seventh Conference on Neural Information Processing Systems, September 2023. URL https://openreview.net/forum?id=qHrADgAdYu." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 156, + 504, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 156, + 504, + 190 + ], + "spans": [ + { + "bbox": [ + 111, + 156, + 504, + 190 + ], + "type": "text", + "content": "[178] Jiazhan Feng, Shijue Huang, Xingwei Qu, Ge Zhang, Yujia Qin, Baoquan Zhong, Chengquan Jiang, Jinxin Chi, and Wanjun Zhong. Retool: Reinforcement learning for strategic tool use in llms. arXiv preprint arXiv:2504.11536, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 193, + 505, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 193, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 111, + 193, + 505, + 228 + ], + "type": "text", + "content": "[179] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Junfei Wu, Xiaoying Zhang, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 230, + 504, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 230, + 504, + 253 + ], + "spans": [ + { + "bbox": [ + 111, + 230, + 504, + 253 + ], + "type": "text", + "content": "[180] Sicheng Feng, Gongfan Fang, Xinyin Ma, and Xinchao Wang. Efficient reasoning models: A survey. arXiv preprint arXiv:2504.10903, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 255, + 504, + 279 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 255, + 504, + 279 + ], + "spans": [ + { + "bbox": [ + 111, + 255, + 504, + 279 + ], + "type": "text", + "content": "[181] Xiachong Feng, Longxu Dou, and Lingpeng Kong. Reasoning does not necessarily improve role-playing ability. arXiv preprint arXiv:2502.16940, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 281, + 506, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 281, + 506, + 315 + ], + "spans": [ + { + "bbox": [ + 111, + 281, + 506, + 315 + ], + "type": "text", + "content": "[182] Xueyang Feng, Bo Lan, Quanyu Dai, Lei Wang, Jiakai Tang, Xu Chen, Zhenhua Dong, and Ji-Rong Wen. Improving retrospective language agents via joint policy gradient optimization. arXiv preprint arXiv:2503.01490, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 318, + 504, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 318, + 504, + 352 + ], + "spans": [ + { + "bbox": [ + 111, + 318, + 504, + 352 + ], + "type": "text", + "content": "[183] Yichen Feng, Zhangchen Xu, Fengqing Jiang, Yuetai Li, Bhaskar Ramasubramanian, Luyao Niu, Bill Yuchen Lin, and Radha Poovendran. Visualsphinx: Large-scale synthetic vision logic puzzles for rl. arXiv preprint arXiv:2505.23977, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 354, + 506, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 354, + 506, + 432 + ], + "spans": [ + { + "bbox": [ + 111, + 354, + 506, + 432 + ], + "type": "text", + "content": "[184] Chrisantha Fernando, Dylan Sunil Banarse, Henryk Michalewski, Simon Osindero, and Tim Rocktäschel. Promptbreeder: Self-referential self-improvement via prompt evolution. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 13481-13544. PMLR, 21-27 Jul 2024. URL https://proceedings.mlrpress/v235/fernando24a.html." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 435, + 506, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 435, + 506, + 459 + ], + "spans": [ + { + "bbox": [ + 111, + 435, + 506, + 459 + ], + "type": "text", + "content": "[185] Mohamed Amine Ferrag, Norbert Tihanyi, and Merouane Debbah. Reasoning beyond limits: Advances and open problems for lms. arXiv preprint arXiv:2503.22732, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 460, + 506, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 460, + 506, + 528 + ], + "spans": [ + { + "bbox": [ + 111, + 460, + 506, + 528 + ], + "type": "text", + "content": "[186] Thomas Palmeira Ferraz, Kartik Mehta, Yu-Hsiang Lin, Haw-Shiuan Chang, Shereen Oraby, Sijia Liu, Vivek Subramanian, Tagyoung Chung, Mohit Bansal, and Nanyun Peng. LLM self-correction with deCRIM: Decompose, critique, and refine for enhanced following of instructions with multiple constraints. In The First Workshop on System-2 Reasoning at Scale, NeurIPS'24, October 2024. URL https://openreview.net/forum?id=RQ6Ff81so0." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 530, + 504, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 530, + 504, + 553 + ], + "spans": [ + { + "bbox": [ + 111, + 530, + 504, + 553 + ], + "type": "text", + "content": "[187] Jiarun Fu, Lizhong Ding, Hao Li, Pengqi Li, Qiuning Wei, and Xu Chen. Unveiling and causalizing cot: A causal perspective. arXiv preprint arXiv:2502.18239, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 556, + 506, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 556, + 506, + 601 + ], + "spans": [ + { + "bbox": [ + 111, + 556, + 506, + 601 + ], + "type": "text", + "content": "[188] Wei Fu, Jiaxuan Gao, Xujie Shen, Chen Zhu, Zhiyu Mei, Chuyi He, Shusheng Xu, Guo Wei, Jun Mei, Jiashu Wang, Tongkai Yang, Binhang Yuan, and Yi Wu. Areal: A large-scale asynchronous reinforcement learning system for language reasoning, 2025. URL https://arxiv.org/abs/2505.24298." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 604, + 506, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 604, + 506, + 649 + ], + "spans": [ + { + "bbox": [ + 111, + 604, + 506, + 649 + ], + "type": "text", + "content": "[189] Yao Fu, Hao Peng, Ashish Sabharwal, Peter Clark, and Tushar Khot. Complexity-based prompting for multi-step reasoning. In The Eleventh International Conference on Learning Representations, February 2023. URL https://openreview.net/forum?id=yf1icZHC-19." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 651, + 506, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 651, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 111, + 651, + 506, + 696 + ], + "type": "text", + "content": "[190] Yuqian Fu, Tinghong Chen, Jiajun Chai, Xihuai Wang, Songjun Tu, Guojun Yin, Wei Lin, Qichao Zhang, Yuanheng Zhu, and Dongbin Zhao. Srft: A single-stage method with supervised and reinforcement fine-tuning for reasoning. arXiv preprint arXiv:2506.19767, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 699, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 699, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 699, + 506, + 723 + ], + "type": "text", + "content": "[191] Víctor Gallego. Metasc: Test-time safety specification optimization for language models. arXiv preprint arXiv:2502.07985, 2025." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "48" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 47 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "type": "text", + "content": "[192] Zeyu Gan, Yun Liao, and Yong Liu. Rethinking external slow-thinking: From snowball errors to probability of correct reasoning. arXiv preprint arXiv:2501.15602, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 98, + 506, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 98, + 506, + 144 + ], + "spans": [ + { + "bbox": [ + 111, + 98, + 506, + 144 + ], + "type": "text", + "content": "[193] Kanishk Gandhi, Denise HJ Lee, Gabriel Grand, Muxin Liu, Winson Cheng, Archit Sharma, and Noah Goodman. Stream of search (sos): Learning to search in language. In First Conference on Language Modeling, July 2024. URL https://openreview.net/pdf?id=2cop2jmQVL." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 147, + 506, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 147, + 506, + 182 + ], + "spans": [ + { + "bbox": [ + 111, + 147, + 506, + 182 + ], + "type": "text", + "content": "[194] Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars. arXiv preprint arXiv:2503.01307, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 184, + 504, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 184, + 504, + 218 + ], + "spans": [ + { + "bbox": [ + 111, + 184, + 504, + 218 + ], + "type": "text", + "content": "[195] Bofei Gao, Zefan Cai, Runxin Xu, Peiyi Wang, Ce Zheng, Runji Lin, Keming Lu, Junyang Lin, Chang Zhou, Tianyu Liu, and Baobao Chang. The reason behind good or bad: Towards a better mathematical verifier with natural language feedback, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 222, + 506, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 222, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 111, + 222, + 506, + 266 + ], + "type": "text", + "content": "[196] Bofei Gao, Zefan Cai, Runxin Xu, Peiyi Wang, Ce Zheng, Runji Lin, Keming Lu, Dayiheng Liu, Chang Zhou, Wen Xiao, et al. Llm critics help catch bugs in mathematics: Towards a better mathematical verifier with natural language feedback. arXiv preprint arXiv:2406.14024, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 270, + 504, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 270, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 111, + 270, + 504, + 304 + ], + "type": "text", + "content": "[197] Jiaxuan Gao, Shusheng Xu, Wenjie Ye, Weilin Liu, Chuyi He, Wei Fu, Zhiyu Mei, Guangju Wang, and Yi Wu. On designing effective rl reward at training time for llm reasoning. arXiv preprint arXiv:2410.15115, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 308, + 506, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 308, + 506, + 374 + ], + "spans": [ + { + "bbox": [ + 111, + 308, + 506, + 374 + ], + "type": "text", + "content": "[198] Luyu Gao, Aman Madaan, Shuyan Zhou, Uri Alon, Pengfei Liu, Yiming Yang, Jamie Callan, and Graham Neubig. PAL: Program-aided language models. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett, editors, Proceedings of the 40th International Conference on Machine Learning, volume 202 of Proceedings of Machine Learning Research, pages 10764–10799. PMLR, 23–29 Jul 2023. URL https://proceedings.mlr.press/v202/gao23f.html." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 378, + 506, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 378, + 506, + 401 + ], + "spans": [ + { + "bbox": [ + 111, + 378, + 506, + 401 + ], + "type": "text", + "content": "[199] Silin Gao, Antoine Bosselut, Samy Bengio, and Emmanuel Abbe. Augmenting llms' reasoning by reinforcing abstract thinking. arXiv preprint arXiv:2506.07751, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 404, + 506, + 427 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 404, + 506, + 427 + ], + "spans": [ + { + "bbox": [ + 111, + 404, + 506, + 427 + ], + "type": "text", + "content": "[200] Tianchen Gao, Jiashun Jin, Zheng Tracy Ke, and Gabriel Moryoussef. A comparison of deepseek and other llms. arXiv preprint arXiv:2502.03688, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 430, + 506, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 430, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 111, + 430, + 506, + 464 + ], + "type": "text", + "content": "[201] Zitian Gao, Boye Niu, Xuzheng He, Haotian Xu, Hongzhang Liu, Aiwei Liu, Xuming Hu, and Lijie Wen. Interpretable contrastive monte carlo tree search reasoning. arXiv preprint arXiv:2410.01707, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 468, + 504, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 468, + 504, + 502 + ], + "spans": [ + { + "bbox": [ + 111, + 468, + 504, + 502 + ], + "type": "text", + "content": "[202] Yuyao Ge, Shenghua Liu, Yiwei Wang, Lingrui Mei, Lizhe Chen, Baolong Bi, and Xueqi Cheng. Innate reasoning is not enough: In-context learning enhances reasoning large language models with less overthinking. arXiv preprint arXiv:2503.19602, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 506, + 506, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 506, + 506, + 539 + ], + "spans": [ + { + "bbox": [ + 111, + 506, + 506, + 539 + ], + "type": "text", + "content": "[203] Jonas Gehring, Kunhao Zheng, Jade Copet, Vegard Mella, Taco Cohen, and Gabriel Synnaeve. Rlef: Grounding code llms in execution feedback with reinforcement learning. arXiv preprint arXiv:2410.02089, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 543, + 506, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 543, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 111, + 543, + 506, + 586 + ], + "type": "text", + "content": "[204] Jonas Geiping, Sean McLeish, Neel Jain, John Kirchenbauer, Siddharth Singh, Brian R Bartoldson, Bhavya Kailkhura, Abhinav Bhatele, and Tom Goldstein. Scaling up test-time compute with latent reasoning: A recurrent depth approach. arXiv preprint arXiv:2502.05171, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 591, + 506, + 636 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 591, + 506, + 636 + ], + "spans": [ + { + "bbox": [ + 111, + 591, + 506, + 636 + ], + "type": "text", + "content": "[205] Gael Gendron, Qiming Bao, Michael Witbrock, and Gillian Dobbie. Large language models are not strong abstract reasoners. In Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence, IJCAI '24, August 2024. ISBN 978-1-956792-04-1. doi: 10.24963/ijcai.2024/693. URL https://doi.org/10.24963/ijcai.2024/693." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 640, + 506, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 640, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 640, + 506, + 685 + ], + "type": "text", + "content": "[206] Zelalem Gero, Chandan Singh, Hao Cheng, Tristan Naumann, Michel Galley, Jianfeng Gao, and Hoifung Poon. Self-verification improves few-shot clinical information extraction. In ICML 3rd Workshop on Interpretable Machine Learning in Healthcare (IMLH), June 2023. URL https://openreview.net/forum?id=SBbJICrg1S." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 689, + 506, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 506, + 721 + ], + "type": "text", + "content": "[207] Akash Ghosh, Debayan Datta, Sriparna Saha, and Chirag Agarwal. The multilingual mind: A survey of multilingual reasoning in language models. arXiv preprint arXiv:2502.09457, 2025." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "49" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 48 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 722 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 506, + 139 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 506, + 139 + ], + "type": "text", + "content": "[208] Panagiotis Giadikiaroglou, Maria Lymperaiou, Giorgos Filandrianos, and Giorgos Stamou. Puzzle solving using reasoning of large language models: A survey. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 11574–11591, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.646. URL https://aclanthology.org/2024.emnlp-main.646/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 143, + 505, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 143, + 505, + 177 + ], + "spans": [ + { + "bbox": [ + 111, + 143, + 505, + 177 + ], + "type": "text", + "content": "[209] Alexi Gladstone, Ganesh Nanduru, Md Mofijul Islam, Peixuan Han, Hyeonjeong Ha, Aman Chadha, Yilun Du, Heng Ji, Jundong Li, and Tariq Iqbal. Energy-based transformers are scalable learners and thinkers. arXiv preprint arXiv:2507.02092, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 180, + 506, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 180, + 506, + 225 + ], + "spans": [ + { + "bbox": [ + 111, + 180, + 506, + 225 + ], + "type": "text", + "content": "[210] Elliot Glazer, Ege Erdil, Tamay Besiroglu, Diego Chicharro, Evan Chen, Alex Gunning, Caroline Falkman Olsson, Jean-Stanislas Denain, Anson Ho, Emily de Oliveira Santos, et al. Frontiermath: A benchmark for evaluating advanced mathematical reasoning in ai. arXiv preprint arXiv:2411.04872, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 229, + 504, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 229, + 504, + 263 + ], + "spans": [ + { + "bbox": [ + 111, + 229, + 504, + 263 + ], + "type": "text", + "content": "[211] Team GLM, Aohan Zeng, Bin Xu, Bowen Wang, Chenhui Zhang, Da Yin, Dan Zhang, Diego Rojas, Guanyu Feng, Hanlin Zhao, et al. Chatglm: A family of large language models from glm-130b to glm-4 all tools. arXiv preprint arXiv:2406.12793, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 266, + 506, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 266, + 506, + 312 + ], + "spans": [ + { + "bbox": [ + 111, + 266, + 506, + 312 + ], + "type": "text", + "content": "[212] Olga Golovneva, Moya Peng Chen, Spencer Poff, Martin Corredor, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. ROSCOE: A suite of metrics for scoring step-by-step reasoning. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=xYlJRpzZtsY." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 316, + 506, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 316, + 506, + 371 + ], + "spans": [ + { + "bbox": [ + 111, + 316, + 506, + 371 + ], + "type": "text", + "content": "[213] Olga Golovneva, Sean O'Brien, Ramakanth Pasunuru, Tianlu Wang, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. PATHFINDER: Guided search over multi-step reasoning paths. In R0-FoMo: Robustness of Few-shot and Zero-shot Learning in Large Foundation Models, December 2023. URL https://openreview.net/forum?id=5TsfEEwRsu." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 375, + 504, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 375, + 504, + 409 + ], + "spans": [ + { + "bbox": [ + 111, + 375, + 504, + 409 + ], + "type": "text", + "content": "[214] Ruihan Gong, Yue Liu, Wenjie Qu, Mingzhe Du, Yufei He, Yingwei Ma, Yulin Chen, Xiang Liu, Yi Wen, Xinfeng Li, et al. Efficient reasoning via chain of unconscious thought. arXiv preprint arXiv:2505.19756, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 413, + 506, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 413, + 506, + 446 + ], + "spans": [ + { + "bbox": [ + 111, + 413, + 506, + 446 + ], + "type": "text", + "content": "[215] Juraj Gottweis, Wei-Hung Weng, Alexander Daryin, Tao Tu, Anil Palepu, Petar Sirkovic, Artiom Myaskovsky, Felix Weissenberger, Keran Rong, Ryutaro Tanno, et al. Towards an ai co-scientist. arXiv preprint arXiv:2502.18864, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 451, + 504, + 484 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 451, + 504, + 484 + ], + "spans": [ + { + "bbox": [ + 111, + 451, + 504, + 484 + ], + "type": "text", + "content": "[216] Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Nan Duan, and Weizhu Chen. Critic: Large language models can self-correct with tool-interactive critiquing. arXiv preprint arXiv:2305.11738, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 488, + 504, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 488, + 504, + 522 + ], + "spans": [ + { + "bbox": [ + 111, + 488, + 504, + 522 + ], + "type": "text", + "content": "[217] Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Minlie Huang, Nan Duan, and Weizhu Chen. Tora: A tool-integrated reasoning agent for mathematical problem solving. arXiv preprint arXiv:2309.17452, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 526, + 504, + 560 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 526, + 504, + 560 + ], + "spans": [ + { + "bbox": [ + 111, + 526, + 504, + 560 + ], + "type": "text", + "content": "[218] Julia Grosse, Ruotian Wu, Ahmad Rashid, Philipp Hennig, Pascal Poupart, and Agustinus Kristiadi. Uncertainty-guided optimization on large language model search trees. arXiv preprint arXiv:2407.03951, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 564, + 504, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 564, + 504, + 597 + ], + "spans": [ + { + "bbox": [ + 111, + 564, + 504, + 597 + ], + "type": "text", + "content": "[219] Yanggan Gu, Junzhuo Li, Sirui Huang, Xin Zou, Zhenghua Li, and Xuming Hu. Capturing nuanced preferences: Preference-aligned distillation for small language models. arXiv preprint arXiv:2502.14272, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 601, + 506, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 601, + 506, + 646 + ], + "spans": [ + { + "bbox": [ + 111, + 601, + 506, + 646 + ], + "type": "text", + "content": "[220] Xinyan Guan, Yanjiang Liu, Xinyu Lu, Boxi Cao, Ben He, Xianpei Han, Le Sun, Jie Lou, Bowen Yu, Yaojie Lu, et al. Search, verify and feedback: Towards next generation post-training paradigm of foundation models via verifier engineering. arXiv preprint arXiv:2411.11504, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 651, + 506, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 651, + 506, + 684 + ], + "spans": [ + { + "bbox": [ + 111, + 651, + 506, + 684 + ], + "type": "text", + "content": "[221] Xinyan Guan, Jiali Zeng, Fandong Meng, Chunlei Xin, Yaojie Lu, Hongyu Lin, Xianpei Han, Le Sun, and Jie Zhou. Deep Learning: Thinking to retrieve step by step for large language models. arXiv preprint arXiv:2502.01142, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 689, + 506, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 506, + 722 + ], + "type": "text", + "content": "[222] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "50" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 49 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[223] Etash Guha, Ryan Marten, Sedrick Keh, Negin Raoof, Georgios Smyrnis, Hritik Bansal, Marianna Nezhurina, Jean Mercat, Trung Vu, Zayne Sprague, et al. Openthoughts: Data recipes for reasoning models. arXiv preprint arXiv:2506.04178, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 110, + 506, + 155 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 110, + 506, + 155 + ], + "spans": [ + { + "bbox": [ + 111, + 110, + 506, + 155 + ], + "type": "text", + "content": "[224] Aryan Gulati, Brando Miranda, Eric Chen, Emily Xia, Kai Fronsdal, Bruno de Moraes Dumont, and Sanmi Koyejo. Putnam-AXIOM: A functional and static benchmark for measuring higher level mathematical reasoning. In The 4th Workshop on Mathematical Reasoning and AI at NeurIPS'24, 2024. URL https://openreview.net/forum?id=YXnwlZe0yf." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 159, + 506, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 159, + 506, + 193 + ], + "spans": [ + { + "bbox": [ + 111, + 159, + 506, + 193 + ], + "type": "text", + "content": "[225] Caglar Gulcehre, Tom Le Paine, Srivatsan Srinivasan, Ksenia Konyushkova, Lotte Weerts, Abhishek Sharma, Aditya Siddhant, Alex Ahern, Miaosen Wang, Chenjie Gu, et al. Reinforced self-training (rest) for language modeling. arXiv preprint arXiv:2308.08998, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 197, + 506, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 197, + 506, + 231 + ], + "spans": [ + { + "bbox": [ + 111, + 197, + 506, + 231 + ], + "type": "text", + "content": "[226] Daya Guo, Qihao Zhu, Dejian Yang, Zhenda Xie, Kai Dong, Wentao Zhang, Guanting Chen, Xiao Bi, Yu Wu, YK Li, et al. Deepseek-coder: When the large language model meets programming-the rise of code intelligence. arXiv preprint arXiv:2401.14196, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 234, + 504, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 234, + 504, + 269 + ], + "spans": [ + { + "bbox": [ + 111, + 234, + 504, + 269 + ], + "type": "text", + "content": "[227] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 272, + 504, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 272, + 504, + 306 + ], + "spans": [ + { + "bbox": [ + 111, + 272, + 504, + 306 + ], + "type": "text", + "content": "[228] Honglin Guo, Kai Lv, Qipeng Guo, Tianyi Liang, Zhiheng Xi, Demin Song, Qiuyinzhe Zhang, Yu Sun, Kai Chen, Xipeng Qiu, et al. Critiq: Mining data quality criteria from human preferences. arXiv preprint arXiv:2502.19279, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 310, + 506, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 310, + 506, + 366 + ], + "spans": [ + { + "bbox": [ + 111, + 310, + 506, + 366 + ], + "type": "text", + "content": "[229] Kehan Guo, Bozhao Nan, Yujun Zhou, Taicheng Guo, Zhichun Guo, Mihir Surve, Zhenwen Liang, Nitesh V Chawla, Olaf Wiest, and Xiangliang Zhang. Can LLMs solve molecule puzzles? a multimodal benchmark for molecular structure elucidation. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=t1mAxb4Cop." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 369, + 504, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 369, + 504, + 404 + ], + "spans": [ + { + "bbox": [ + 111, + 369, + 504, + 404 + ], + "type": "text", + "content": "[230] Ziyu Guo, Renrui Zhang, Chengzhuo Tong, Zhizheng Zhao, Peng Gao, Hongsheng Li, and Pheng-Ann Heng. Can we generate images with cot? let's verify and reinforce image generation step by step. arXiv preprint arXiv:2501.13926, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 407, + 506, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 407, + 506, + 453 + ], + "spans": [ + { + "bbox": [ + 111, + 407, + 506, + 453 + ], + "type": "text", + "content": "[231] Dongge Han, Menglin Xia, Daniel Madrigal Diaz, Samuel Kessler, Ankur Mallick, Xuchao Zhang, Mirian Del Carmen Hipolito Garcia, Jin Xu, Victor Ruhle, and Saravan Rajmohan. Enhancing reasoning capabilities of small language models with blueprints and prompt template search. arXiv preprint arXiv:2506.08669, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 456, + 506, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 456, + 506, + 479 + ], + "spans": [ + { + "bbox": [ + 111, + 456, + 506, + 479 + ], + "type": "text", + "content": "[232] Tingxu Han, Chunrong Fang, Shiyu Zhao, Shiqing Ma, Zhenyu Chen, and Zhenting Wang. Token-budget-aware lIm reasoning. arXiv preprint arXiv:2412.18547, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 483, + 506, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 483, + 506, + 517 + ], + "spans": [ + { + "bbox": [ + 111, + 483, + 506, + 517 + ], + "type": "text", + "content": "[233] Michael Hanna, Ollie Liu, and Alexandre Variengien. How does GPT-2 compute greater-than?: Interpreting mathematical abilities in a pre-trained language model. September 2023. URL https://openreview.net/forum?id=p4PckNQR8k." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 520, + 506, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 520, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 111, + 520, + 506, + 588 + ], + "type": "text", + "content": "[234] Shibo Hao, Yi Gu, Haodi Ma, Joshua Hong, Zhen Wang, Daisy Wang, and Zhiting Hu. Reasoning with language model is planning with world model. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 8154-8173, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.507. URL https://aclanthology.org/2023.emnlp-main.507/." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 591, + 506, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 591, + 506, + 647 + ], + "spans": [ + { + "bbox": [ + 111, + 591, + 506, + 647 + ], + "type": "text", + "content": "[235] Shibo Hao, Yi Gu, Haotian Luo, Tianyang Liu, Xiyan Shao, Xinyuan Wang, Shuhua Xie, Haodi Ma, Adithya Samavedhi, Qiyue Gao, Zhen Wang, and Zhiting Hu. LLM reasoners: New evaluation, library, and analysis of step-by-step reasoning with large language models. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=b0y6fbSUG0." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 651, + 506, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 651, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 651, + 506, + 685 + ], + "type": "text", + "content": "[236] Shibo Hao, Sainbayar Sukhbaatar, DiJia Su, Xian Li, Zhiting Hu, Jason Weston, and Yuandong Tian. Training large language models to reason in a continuous latent space. arXiv preprint arXiv:2412.06769, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 688, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 688, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 688, + 504, + 723 + ], + "type": "text", + "content": "[237] Yunzhuo Hao, Jiawei Gu, Huichen Will Wang, Linjie Li, Zhengyuan Yang, Lijuan Wang, and Yu Cheng. Can mllms reason in multimodality? emma: An enhanced multimodal reasoning benchmark. arXiv preprint arXiv:2501.05444, 2025." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "51" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 50 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 506, + 127 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 506, + 127 + ], + "type": "text", + "content": "[238] Alexander Havrilla, Sharath Chandra Raparthy, Christoforos Nalmpantis, Jane Dwivedi-Yu, Maksym Zhuravinskyi, Eric Hambro, and Roberta Raileanu. GLOre: When, where, and how to improve LLM reasoning via global and local refinements. In *Forty-first International Conference on Machine Learning*, May 2024. URL https://openreview.net/forum?id=LH6R06NxdB." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 130, + 506, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 130, + 506, + 220 + ], + "spans": [ + { + "bbox": [ + 111, + 130, + 506, + 220 + ], + "type": "text", + "content": "[239] Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, Jie Liu, Lei Qi, Zhiyuan Liu, and Maosong Sun. OlympiadBench: A challenging benchmark for promoting AGI with olympiad-level bilingual multimodal scientific problems. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 3828–3850, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.211. URL https://aclanthology.org/2024.acl-long.211/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 222, + 506, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 222, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 111, + 222, + 506, + 255 + ], + "type": "text", + "content": "[240] Chengbo He, Bochao Zou, Xin Li, Jiansheng Chen, Junliang Xing, and Huimin Ma. Enhancing llm reasoning with multi-path collaborative reactive and reflection agents. arXiv preprint arXiv:2501.00430, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 258, + 504, + 292 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 258, + 504, + 292 + ], + "spans": [ + { + "bbox": [ + 111, + 258, + 504, + 292 + ], + "type": "text", + "content": "[241] Feng He, Zijun Chen, Xinnian Liang, Tingting Ma, Yunqi Qiu, Shuangzhi Wu, and Junchi Yan. Protoreasoning: Prototypes as the foundation for generalizable reasoning in llms. arXiv preprint arXiv:2506.15211, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 294, + 506, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 294, + 506, + 361 + ], + "spans": [ + { + "bbox": [ + 111, + 294, + 506, + 361 + ], + "type": "text", + "content": "[242] Jujie He, Jiacai Liu, Chris Yuhao Liu, Rui Yan, Chaojie Wang, Peng Cheng, Xiaoyu Zhang, Fuxiang Zhang, Jiacheng Xu, Wei Shen, Siyuan Li, Liang Zeng, Tianwen Wei, Cheng Cheng, Bo An, Yang Liu, and Yahui Zhou. Skywork open reasoner series. https://capricious-hydrogen-41c.notion.site/Skywork-Open-Reaonser-Series-1d0bc9ae823a80459b46c149e4f51680, 2025. Note Blog." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 364, + 506, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 364, + 506, + 397 + ], + "spans": [ + { + "bbox": [ + 111, + 364, + 506, + 397 + ], + "type": "text", + "content": "[243] Junda He, Jieke Shi, Terry Yue Zhuo, Christoph Treude, Jiamou Sun, Zhenchang Xing, Xiaoning Du, and David Lo. From code to courtroom: Llms as the new software judges. arXiv preprint arXiv:2503.02246, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 400, + 504, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 400, + 504, + 423 + ], + "spans": [ + { + "bbox": [ + 111, + 400, + 504, + 423 + ], + "type": "text", + "content": "[244] Kang He and Kaushik Roy. Logictree: Structured proof exploration for coherent and rigorous logical reasoning with large language models. arXiv preprint arXiv:2504.14089, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 426, + 506, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 426, + 506, + 493 + ], + "spans": [ + { + "bbox": [ + 111, + 426, + 506, + 493 + ], + "type": "text", + "content": "[245] Mingqian He, Yongliang Shen, Wenqi Zhang, Zeqi Tan, and Weiming Lu. Advancing process verification for large language models via tree-based preference learning. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 2086-2099, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.125. URL https://aclanthology.org/2024.emnlp-main.125/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 495, + 506, + 551 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 495, + 506, + 551 + ], + "spans": [ + { + "bbox": [ + 111, + 495, + 506, + 551 + ], + "type": "text", + "content": "[246] Qiangqiang He, Shuwei Qian, Jie Zhang, and Chongjun Wang. Inference retrieval-augmented multi-modal chain-of-thoughts reasoning for language models. In ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5, 2025. doi: 10.1109/ICASSP49660.2025.10888701. URL https://openreview.net/pdf/9a7e7a9787d14ac8302215f8e4ef959606b78a94.pdf." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 553, + 504, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 553, + 504, + 586 + ], + "spans": [ + { + "bbox": [ + 111, + 553, + 504, + 586 + ], + "type": "text", + "content": "[247] Shenghua He, Tian Xia, Xuan Zhou, and Hui Wei. Response-level rewards are all you need for online reinforcement learning in llms: A mathematical perspective. arXiv preprint arXiv:2506.02553, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 590, + 504, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 590, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 111, + 590, + 504, + 624 + ], + "type": "text", + "content": "[248] Tao He, Hao Li, Jingchang Chen, Runxuan Liu, Yixin Cao, Lizi Liao, Zihao Zheng, Zheng Chu, Jiafeng Liang, Ming Liu, et al. A survey on complex reasoning of large language models through the lens of self-evolution. February 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 626, + 504, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 626, + 504, + 650 + ], + "spans": [ + { + "bbox": [ + 111, + 626, + 504, + 650 + ], + "type": "text", + "content": "[249] Xingyang He, Xiao Ling, and Jie Liu. Smartthinker: Learning to compress and preserve reasoning by step-level length control. arXiv preprint arXiv:2507.04348, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 652, + 504, + 686 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 652, + 504, + 686 + ], + "spans": [ + { + "bbox": [ + 111, + 652, + 504, + 686 + ], + "type": "text", + "content": "[250] Yancheng He, Shilong Li, Jiaheng Liu, Weixun Wang, Xingyuan Bu, Ge Zhang, Zhongyuan Peng, Zhaoxiang Zhang, Wenbo Su, and Bo Zheng. Can large language models detect errors in long chain-of-thought reasoning? arXiv preprint arXiv:2502.19361, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "type": "text", + "content": "[251] Yang He, Xiao Ding, Bibo Cai, Yufei Zhang, Kai Xiong, Zhouhao Sun, Bing Qin, and Ting Liu. Self-route: Automatic mode switching via capability estimation for efficient reasoning. arXiv preprint arXiv:2505.20664, 2025." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "52" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 51 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[252] Zhitao He, Sandeep Polisetty, Zhiyuan Fan, Yuchen Huang, Shujin Wu, et al. Mmboundary: Advancing mllm knowledge boundary awareness through reasoning step confidence calibration. arXiv preprint arXiv:2505.23224, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 109, + 505, + 165 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 109, + 505, + 165 + ], + "spans": [ + { + "bbox": [ + 111, + 109, + 505, + 165 + ], + "type": "text", + "content": "[253] Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the MATH dataset. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2), October 2021. URL https://openreview.net/forum?id=7Bywt2mQsCe." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 167, + 504, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 167, + 504, + 191 + ], + "spans": [ + { + "bbox": [ + 111, + 167, + 504, + 191 + ], + "type": "text", + "content": "[254] Alex Heyman and Joel Zylberberg. Evaluating the systematic reasoning abilities of large language models through graph coloring. arXiv preprint arXiv:2502.07087, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 194, + 504, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 194, + 504, + 217 + ], + "spans": [ + { + "bbox": [ + 111, + 194, + 504, + 217 + ], + "type": "text", + "content": "[255] Alex Heyman and Joel Zylberberg. Reasoning large language model errors arise from hallucinating critical problem features. arXiv preprint arXiv:2505.12151, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 220, + 506, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 220, + 506, + 287 + ], + "spans": [ + { + "bbox": [ + 111, + 220, + 506, + 287 + ], + "type": "text", + "content": "[256] Namgyu Ho, Laura Schmid, and Se-Young Yun. Large language models are reasoning teachers. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 14852–14882, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.830. URL https://aclanthology.org/2023.acl-long.830/." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 290, + 506, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 290, + 506, + 324 + ], + "spans": [ + { + "bbox": [ + 111, + 290, + 506, + 324 + ], + "type": "text", + "content": "[257] Andreas Hochlehnert, Hardik Bhatnagar, Vishaal Udandarao, Samuel Albanie, Ameya Prabhu, and Matthias Bethge. A sober look at progress in language model reasoning: Pitfalls and paths to reproducibility. arXiv preprint arXiv:2504.07086, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 327, + 506, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 327, + 506, + 382 + ], + "spans": [ + { + "bbox": [ + 111, + 327, + 506, + 382 + ], + "type": "text", + "content": "[258] Matthew Douglas Hoffman, Du Phan, david dohan, Sholto Douglas, Tuan Anh Le, Aaron T Parisi, Pavel Sountsov, Charles Sutton, Sharad Vikram, and Rif A. Saurous. Training chain-of-thought via latent-variable inference. In Thirty-seventh Conference on Neural Information Processing Systems, September 2023. URL https://openreview.net/forum?id=a147pIS2Co." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 385, + 506, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 385, + 506, + 419 + ], + "spans": [ + { + "bbox": [ + 111, + 385, + 506, + 419 + ], + "type": "text", + "content": "[259] Ruixin Hong, Xinyu Pang, and Changshui Zhang. Advances in reasoning by prompting large language models: A survey. Cybernetics and Intelligence, pages 1-15, 2024. doi: 10.26599/CAI.2024.9390004." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 422, + 505, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 422, + 505, + 491 + ], + "spans": [ + { + "bbox": [ + 111, + 422, + 505, + 491 + ], + "type": "text", + "content": "[260] Wenyi Hong, Weihan Wang, Qingsong Lv, Jiazheng Xu, Wenmeng Yu, Junhui Ji, Yan Wang, Zihan Wang, Yuxiao Dong, Ming Ding, and Jie Tang. Cogagent: A visual language model for gui agents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14281-14290, June 2024. URL https://openaccess.thecvf.com/content/CVPR2024/papers/Hong_CogAgent_A_Visual_Vocabulary_model_for_GUI_Agents_CVPR_2024_paper.pdf." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 493, + 506, + 537 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 493, + 506, + 537 + ], + "spans": [ + { + "bbox": [ + 111, + 493, + 506, + 537 + ], + "type": "text", + "content": "[261] Arian Hosseini, Alessandro Sordoni, Daniel Kenji Toyama, Aaron Courville, and Rishabh Agarwal. Not all LLM reasoners are created equal. In The First Workshop on System-2 Reasoning at Scale, NeurIPS'24, October 2024. URL https://openreview.net/forum?id=aPAWbip1xV." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 540, + 505, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 540, + 505, + 586 + ], + "spans": [ + { + "bbox": [ + 111, + 540, + 505, + 586 + ], + "type": "text", + "content": "[262] Arian Hosseini, Xingdi Yuan, Nikolay Malkin, Aaron Courville, Alessandro Sordoni, and Rishabh Agarwal. V-STar: Training verifiers for self-taught reasoners. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=stmqBSW2dV." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 589, + 506, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 589, + 506, + 621 + ], + "spans": [ + { + "bbox": [ + 111, + 589, + 506, + 621 + ], + "type": "text", + "content": "[263] Bairu Hou, Yang Zhang, Jiabao Ji, Yujuan Liu, Kaizhi Qian, Jacob Andreas, and Shiyu Chang. Thinkprune: Pruning long chain-of-thought of llms via reinforcement learning. arXiv preprint arXiv:2504.01296, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 625, + 505, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 625, + 505, + 660 + ], + "spans": [ + { + "bbox": [ + 111, + 625, + 505, + 660 + ], + "type": "text", + "content": "[264] Zhenyu Hou, Xin Lv, Rui Lu, Jiajie Zhang, Yujiang Li, Zijun Yao, Juanzi Li, Jie Tang, and Yuxiao Dong. Advancing language model reasoning through reinforcement learning and inference scaling. arXiv preprint arXiv:2501.11651, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 662, + 505, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 662, + 505, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 662, + 505, + 685 + ], + "type": "text", + "content": "[265] Jian Hu. Reinforce++: A simple and efficient approach for aligning large language models. arXiv preprint arXiv:2501.03262, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "type": "text", + "content": "[266] Jian Hu, Xibin Wu, Zilin Zhu, Xianyu, Weixun Wang, Dehao Zhang, and Yu Cao. Openrlhf: An easy-to-use, scalable and high-performance rlhf framework. arXiv preprint arXiv:2405.11143, 2024." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "53" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 52 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 507, + 722 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 111, + 72, + 507, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 507, + 117 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 507, + 117 + ], + "type": "text", + "content": "[267] Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. Open-reasoner-zero: An open source approach to scaling reinforcement learning on the base model. https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero, February 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 120, + 505, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 120, + 505, + 153 + ], + "spans": [ + { + "bbox": [ + 111, + 120, + 505, + 153 + ], + "type": "text", + "content": "[268] Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, Xiangyu Zhang, and Heung-Yeung Shum. Open-reasoner-zero: An open source approach to scaling up reinforcement learning on the base model. arXiv preprint arXiv:2503.24290, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 157, + 505, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 157, + 505, + 190 + ], + "spans": [ + { + "bbox": [ + 111, + 157, + 505, + 190 + ], + "type": "text", + "content": "[269] Mengkang Hu, Tianxing Chen, Qiguang Chen, Yao Mu, Wenqi Shao, and Ping Luo. Hiagent: Hierarchical working memory management for solving long-horizon agent tasks with large language model. arXiv preprint arXiv:2408.09559, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 194, + 505, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 194, + 505, + 249 + ], + "spans": [ + { + "bbox": [ + 111, + 194, + 505, + 249 + ], + "type": "text", + "content": "[270] Mengkang Hu, Yao Mu, Xinmiao Chelsey Yu, Mingyu Ding, Shiguang Wu, Wenqi Shao, Qiguang Chen, Bin Wang, Yu Qiao, and Ping Luo. Tree-planner: Efficient close-loop task planning with large language models. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=Glcsg6zOe." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 253, + 505, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 253, + 505, + 286 + ], + "spans": [ + { + "bbox": [ + 111, + 253, + 505, + 286 + ], + "type": "text", + "content": "[271] Mengkang Hu, Pu Zhao, Can Xu, Qingfeng Sun, Jianguang Lou, Qingwei Lin, Ping Luo, and Saravan Rajmohan. Agentgen: Enhancing planning abilities for large language model based agent via environment and task generation. arXiv preprint arXiv:2408.00764, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 290, + 505, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 290, + 505, + 323 + ], + "spans": [ + { + "bbox": [ + 111, + 290, + 505, + 323 + ], + "type": "text", + "content": "[272] Mengkang Hu, Tianxing Chen, Yude Zou, Yuheng Lei, Qiguang Chen, Ming Li, Hongyuan Zhang, Wenqi Shao, and Ping Luo. Text2world: Benchmarking large language models for symbolic world model generation. arXiv preprint arXiv:2502.13092, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 327, + 505, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 327, + 505, + 361 + ], + "spans": [ + { + "bbox": [ + 111, + 327, + 505, + 361 + ], + "type": "text", + "content": "[273] Mengkang Hu, Yuhang Zhou, Wendong Fan, Yuzhou Nie, Bowei Xia, Tao Sun, Ziyu Ye, Zhaoxuan Jin, Yingru Li, Qiguang Chen, et al. Owl: Optimized workforce learning for general multi-agent assistance in real-world task automation. arXiv preprint arXiv:2505.23885, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 364, + 505, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 364, + 505, + 397 + ], + "spans": [ + { + "bbox": [ + 111, + 364, + 505, + 397 + ], + "type": "text", + "content": "[274] Renjun Hu, Yi Cheng, Libin Meng, Jiaxin Xia, Yi Zong, Xing Shi, and Wei Lin. Training an llm-as-a-judge model: Pipeline, insights, and practical lessons. arXiv preprint arXiv:2502.02988, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 401, + 505, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 401, + 505, + 456 + ], + "spans": [ + { + "bbox": [ + 111, + 401, + 505, + 456 + ], + "type": "text", + "content": "[275] Zhiyuan Hu, Chumin Liu, Xidong Feng, Yilun Zhao, See-Kiong Ng, Anh Tuan Luu, Junxian He, Pang Wei Koh, and Bryan Hooi. Uncertainty of thoughts: Uncertainty-aware planning enhances information seeking in large language models. In ICLR 2024 Workshop on Large Language Model (LLM) Agents, March 2024. URL https://openreview.net/forum?id=ZWyLjimciT." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 460, + 505, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 460, + 505, + 502 + ], + "spans": [ + { + "bbox": [ + 111, + 460, + 505, + 502 + ], + "type": "text", + "content": "[276] Maggie Huan, Yuetai Li, Tuney Zheng, Xiaoyu Xu, Seungone Kim, Minxin Du, Radha Poovendran, Graham Neubig, and Xiang Yue. Does math reasoning improve general llm capabilities? understanding transferability of llm reasoning. arXiv preprint arXiv:2507.00432, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 507, + 505, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 507, + 505, + 541 + ], + "spans": [ + { + "bbox": [ + 111, + 507, + 505, + 541 + ], + "type": "text", + "content": "[277] Chenghua Huang, Lu Wang, Fangkai Yang, Pu Zhao, Zhixu Li, Qingwei Lin, Dongmei Zhang, Saravan Rajmohan, and Qi Zhang. Lean and mean: Decoupled value policy optimization with global value guidance. arXiv preprint arXiv:2502.16944, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 544, + 505, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 544, + 505, + 567 + ], + "spans": [ + { + "bbox": [ + 111, + 544, + 505, + 567 + ], + "type": "text", + "content": "[278] Chengsong Huang, Langlin Huang, Jixuan Leng, Jiacheng Liu, and Jiaxin Huang. Efficient test-time scaling via self-calibration. arXiv preprint arXiv:2503.00031, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 571, + 505, + 603 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 571, + 505, + 603 + ], + "spans": [ + { + "bbox": [ + 111, + 571, + 505, + 603 + ], + "type": "text", + "content": "[279] Chengyu Huang, Zhengxin Zhang, and Claire Cardie. Hapo: Training language models to reason concisely via history-aware policy optimization. arXiv preprint arXiv:2505.11225, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 607, + 505, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 607, + 505, + 641 + ], + "spans": [ + { + "bbox": [ + 111, + 607, + 505, + 641 + ], + "type": "text", + "content": "[280] Haiduo Huang, Fuwei Yang, Zhenhua Liu, Yixing Xu, Jinze Li, Yang Liu, Xuanwu Yin, Dong Li, Pengju Ren, and Emad Barsoum. Jakiro: Boosting speculative decoding with decoupled multi-head via moe. arXiv preprint arXiv:2502.06282, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 644, + 505, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 644, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 111, + 644, + 505, + 722 + ], + "type": "text", + "content": "[281] Haoyang Huang, Tianyi Tang, Dongdong Zhang, Xin Zhao, Ting Song, Yan Xia, and Furu Wei. Not all languages are created equal in LLMs: Improving multilingual capability by cross-lingual-thought prompting. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 12365–12394, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.826. URL https://aclanthology.org/2023-findings-emnlp.826/." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "54" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 53 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[282] Hui Huang, Yancheng He, Hongli Zhou, Rui Zhang, Wei Liu, Weixun Wang, Wenbo Su, Bo Zheng, and Jiaheng Liu. Think-j: Learning to think for generative llm-as-a-judge. arXiv preprint arXiv:2505.14268, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 108, + 505, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 108, + 505, + 152 + ], + "spans": [ + { + "bbox": [ + 111, + 108, + 505, + 152 + ], + "type": "text", + "content": "[283] Jen-tse Huang, Eric John Li, Man Ho Lam, Tian Liang, Wenxuan Wang, Youliang Yuan, Wenxiang Jiao, Xing Wang, Zhaopeng Tu, and Michael R Lyu. How far are we on the decision-making of llms? evaluating llms' gaming ability in multi-agent environments. arXiv preprint arXiv:2403.11807, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 156, + 504, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 156, + 504, + 178 + ], + "spans": [ + { + "bbox": [ + 111, + 156, + 504, + 178 + ], + "type": "text", + "content": "[284] Jiaxing Huang and Jingyi Zhang. A survey on evaluation of multimodal large language models. arXiv preprint arXiv:2408.15769, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 180, + 505, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 180, + 505, + 236 + ], + "spans": [ + { + "bbox": [ + 111, + 180, + 505, + 236 + ], + "type": "text", + "content": "[285] Jie Huang and Kevin Chen-Chuan Chang. Towards reasoning in large language models: A survey. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Findings of the Association for Computational Linguistics: ACL 2023, pages 1049–1065, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.67. URL https://aclanthology.org/2023-findings-acl.67/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 238, + 504, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 238, + 504, + 282 + ], + "spans": [ + { + "bbox": [ + 111, + 238, + 504, + 282 + ], + "type": "text", + "content": "[286] Jie Huang, Xinyun Chen, Swaroop Mishra, Huaixiu Steven Zheng, Adams Wei Yu, Xinying Song, and Denny Zhou. Large language models cannot self-correct reasoning yet. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=IkmD3fKBPQ." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 285, + 505, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 285, + 505, + 319 + ], + "spans": [ + { + "bbox": [ + 111, + 285, + 505, + 319 + ], + "type": "text", + "content": "[287] Jinyang Huang, Xiachong Feng, Qiguang Chen, Hanjie Zhao, Zihui Cheng, Jiesong Bai, Jingxuan Zhou, Min Li, and Libo Qin. Mldebugging: Towards benchmarking code debugging across multi-library scenarios. arXiv preprint arXiv:2506.13824, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 321, + 505, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 321, + 505, + 355 + ], + "spans": [ + { + "bbox": [ + 111, + 321, + 505, + 355 + ], + "type": "text", + "content": "[288] Kaixuan Huang, Jiacheng Guo, Zihao Li, Xiang Ji, Jiawei Ge, Wenzhe Li, Yingqing Guo, Tianle Cai, Hui Yuan, Runzhe Wang, et al. Math-perturb: Benchmarking llms' math reasoning abilities against hard perturbations. arXiv preprint arXiv:2502.06453, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 357, + 505, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 357, + 505, + 434 + ], + "spans": [ + { + "bbox": [ + 111, + 357, + 505, + 434 + ], + "type": "text", + "content": "[289] Lei Huang, Xiaocheng Feng, Weitao Ma, Liang Zhao, Yuchun Fan, Weihong Zhong, Dongliang Xu, Qing Yang, Hongtao Liu, and Bing Qin. Advancing large language model attribution through self-improving. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 3822-3836, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.223. URL https://aclanthology.org/2024.emnlp-main.223/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 437, + 505, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 437, + 505, + 470 + ], + "spans": [ + { + "bbox": [ + 111, + 437, + 505, + 470 + ], + "type": "text", + "content": "[290] Shijue Huang, Hongru Wang, Wanjun Zhong, Zhaochen Su, Jiazhan Feng, Bowen Cao, and Yi R Fung. Adactrl: Towards adaptive and controllable reasoning via difficulty-aware budgeting. arXiv preprint arXiv:2505.18822, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 472, + 505, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 472, + 505, + 506 + ], + "spans": [ + { + "bbox": [ + 111, + 472, + 505, + 506 + ], + "type": "text", + "content": "[291] Shulin Huang, Linyi Yang, Yan Song, Shuang Chen, Leyang Cui, Ziyu Wan, Qingcheng Zeng, Ying Wen, Kun Shao, Weinan Zhang, et al. Thinkbench: Dynamic out-of-distribution evaluation for robust llm reasoning. arXiv preprint arXiv:2502.16268, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 509, + 505, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 509, + 505, + 542 + ], + "spans": [ + { + "bbox": [ + 111, + 509, + 505, + 542 + ], + "type": "text", + "content": "[292] Tiansheng Huang, Sihao Hu, Fatih Ilhan, Selim Furkan Tekin, Zachary Yahn, Yichang Xu, and Ling Liu. Safety tax: Safety alignment makes your large reasoning models less reasonable. arXiv preprint arXiv:2503.00555, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 544, + 505, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 544, + 505, + 578 + ], + "spans": [ + { + "bbox": [ + 111, + 544, + 505, + 578 + ], + "type": "text", + "content": "[293] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 580, + 505, + 614 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 580, + 505, + 614 + ], + "spans": [ + { + "bbox": [ + 111, + 580, + 505, + 614 + ], + "type": "text", + "content": "[294] Xiaoke Huang, Juncheng Wu, Hui Liu, Xianfeng Tang, and Yuyin Zhou. m1: Unleash the potential of test-time scaling for medical reasoning with large language models. arXiv preprint arXiv:2504.00869, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 616, + 505, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 616, + 505, + 650 + ], + "spans": [ + { + "bbox": [ + 111, + 616, + 505, + 650 + ], + "type": "text", + "content": "[295] Yiming Huang, Xiao Liu, Yeyun Gong, Zhibin Gou, Yelong Shen, Nan Duan, and Weizhu Chen. Key-point-driven data synthesis with its enhancement on mathematical reasoning. arXiv preprint arXiv:2403.02333, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 652, + 505, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 652, + 505, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 652, + 505, + 685 + ], + "type": "text", + "content": "[296] Yuzhen Huang, Weihao Zeng, Xingshan Zeng, Qi Zhu, and Junxian He. Pitfalls of rule-and model-based verifiers-a case study on mathematical reasoning. arXiv preprint arXiv:2505.22203, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 688, + 505, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 688, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 688, + 505, + 723 + ], + "type": "text", + "content": "[297] Zeyu Huang, Tianhao Cheng, Zihan Qiu, Zili Wang, Yinghui Xu, Edoardo M Ponti, and Ivan Titov. Blending supervised and reinforcement fine-tuning with prefix sampling. arXiv preprint arXiv:2507.01679, 2025." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "55" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 54 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 506, + 149 + ], + "type": "text", + "content": "[298] Zhen Huang, Zengzhi Wang, Shijie Xia, Xuefeng Li, Haoyang Zou, Ruijie Xu, Run-Ze Fan, Lyumanshan Ye, Ethan Chern, Yixin Ye, Yikai Zhang, Yuqing Yang, Ting Wu, Binjie Wang, Shichao Sun, Yang Xiao, Yiyuan Li, Fan Zhou, Steffi Chern, Yiwei Qin, Yan Ma, Jiadi Su, Yixiu Liu, Yuxiang Zheng, Shaoting Zhang, Dahua Lin, Yu Qiao, and Pengfei Liu. Olympic: Benchmarking multi-discipline cognitive reasoning for superintelligent AI. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2024. URL https://openreview.net/forum?id=ayF8bEKYQy." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 153, + 506, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 153, + 506, + 198 + ], + "spans": [ + { + "bbox": [ + 111, + 153, + 506, + 198 + ], + "type": "text", + "content": "[299] Zhen Huang, Haoyang Zou, Xuefeng Li, Yixiu Liu, Yuxiang Zheng, Ethan Chern, Shijie Xia, Yiwei Qin, Weizhe Yuan, and Pengfei Liu. O1 replication journey–part 2: Surpassing o1-preview through simple distillation, big progress or bitter lesson? arXiv preprint arXiv:2411.16489, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 203, + 506, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 203, + 506, + 237 + ], + "spans": [ + { + "bbox": [ + 111, + 203, + 506, + 237 + ], + "type": "text", + "content": "[300] Zhongzhen Huang, Gui Geng, Shengyi Hua, Zhen Huang, Haoyang Zou, Shaoting Zhang, Pengfei Liu, and Xiaofan Zhang. O1 replication journey–part 3: Inference-time scaling for medical reasoning. arXiv preprint arXiv:2501.06458, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 241, + 504, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 241, + 504, + 274 + ], + "spans": [ + { + "bbox": [ + 111, + 241, + 504, + 274 + ], + "type": "text", + "content": "[301] Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2.5-coder technical report. arXiv preprint arXiv:2409.12186, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 279, + 506, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 279, + 506, + 346 + ], + "spans": [ + { + "bbox": [ + 111, + 279, + 506, + 346 + ], + "type": "text", + "content": "[302] Hyeonbin Hwang, Doyoung Kim, Seungone Kim, Seonghyeon Ye, and Minjoon Seo. Self-exlore: Enhancing mathematical reasoning in language models with fine-grained rewards. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 1444-1466, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.78. URL https://aclanthology.org/2024 findings-emnlp.78/." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 350, + 504, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 350, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 111, + 350, + 504, + 373 + ], + "type": "text", + "content": "[303] Shima Imani, Liang Du, and Harsh Shrivastava. Mathprompter: Mathematical reasoning using large language models. 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 377, + 504, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 377, + 504, + 401 + ], + "spans": [ + { + "bbox": [ + 111, + 377, + 504, + 401 + ], + "type": "text", + "content": "[304] Md Ashraful Islam, Mohammed Eunus Ali, and Md Rizwan Parvez. Mapcoder: Multi-agent code generation for competitive problem solving. arXiv preprint arXiv:2405.11403, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 405, + 504, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 405, + 504, + 438 + ], + "spans": [ + { + "bbox": [ + 111, + 405, + 504, + 438 + ], + "type": "text", + "content": "[305] Hamish Ivison, Yizhong Wang, Valentina Pyatkin, Nathan Lambert, Matthew Peters, Pradeep Dasigi, Joel Jang, David Wadden, Noah A Smith, Iz Beltagy, et al. Camels in a changing climate: Enhancing lm adaptation with tulu 2, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 443, + 506, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 443, + 506, + 498 + ], + "spans": [ + { + "bbox": [ + 111, + 443, + 506, + 498 + ], + "type": "text", + "content": "[306] Hamish Ivison, Yizhong Wang, Jiacheng Liu, Zeqiu Wu, Valentina Pyatkin, Nathan Lambert, Noah A. Smith, Yejin Choi, and Hannaneh Hajishirzi. Unpacking DPO and PPO: Disentangling best practices for learning from preference feedback. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=JMBWTlazjW." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 503, + 506, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 503, + 506, + 536 + ], + "spans": [ + { + "bbox": [ + 111, + 503, + 506, + 536 + ], + "type": "text", + "content": "[307] Aaron Jaech, Adam Kalai, Adam Lerner, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 541, + 504, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 541, + 504, + 585 + ], + "spans": [ + { + "bbox": [ + 111, + 541, + 504, + 585 + ], + "type": "text", + "content": "[308] Eeshaan Jain, Johann Wenckstern, Benedikt von Querfurth, and Charlotte Bunne. Test-time view selection for multi-modal decision making. In ICLR 2025 Workshop on Machine Learning for Genomics Explorations, March 2025. URL https://openreview.net/forum?id=aNmZ9s6BZV." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 590, + 506, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 590, + 506, + 645 + ], + "spans": [ + { + "bbox": [ + 111, + 590, + 506, + 645 + ], + "type": "text", + "content": "[309] Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. Livecodebench: Holistic and contamination free evaluation of large language models for code. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=chfJJYC3iL." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 650, + 506, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 650, + 506, + 673 + ], + "spans": [ + { + "bbox": [ + 111, + 650, + 506, + 673 + ], + "type": "text", + "content": "[310] Sooyoung Jang and Hyung-II Kim. Entropy-aware model initialization for effective exploration in deep reinforcement learning. Sensors, 22(15):5845, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 677, + 504, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 677, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 111, + 677, + 504, + 721 + ], + "type": "text", + "content": "[311] Ke Ji, Jiahao Xu, Tian Liang, Qiuzhi Liu, Zhiwei He, Xingyu Chen, Xiaoyuan Liu, Zhijie Wang, Junying Chen, Benyou Wang, et al. The first few tokens are all you need: An efficient and effective unsupervised prefix fine-tuning method for reasoning models. arXiv preprint arXiv:2503.02875, 2025." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "56" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 55 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[312] Tao Ji, Bin Guo, Yuanbin Wu, Qipeng Guo, Lixing Shen, Zhan Chen, Xipeng Qiu, Qi Zhang, and Tao Gui. Towards economical inference: Enabling deepseek's multi-head latent attention in any transformer-based llms. arXiv preprint arXiv:2502.14837, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 110, + 505, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 110, + 505, + 142 + ], + "spans": [ + { + "bbox": [ + 111, + 110, + 505, + 142 + ], + "type": "text", + "content": "[313] Yichao Ji. A small step towards reproducing openai o1: Progress report on the steiner open source models, October 2024. URL https://medium.com/@peakji/b9a756a00855." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 146, + 505, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 146, + 505, + 180 + ], + "spans": [ + { + "bbox": [ + 111, + 146, + 505, + 180 + ], + "type": "text", + "content": "[314] Yixin Ji, Juntao Li, Hai Ye, Kaixin Wu, Jia Xu, Linjian Mo, and Min Zhang. Test-time computing: from system-1 thinking to system-2 thinking. arXiv preprint arXiv:2501.02497, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 183, + 506, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 183, + 506, + 251 + ], + "spans": [ + { + "bbox": [ + 111, + 183, + 506, + 251 + ], + "type": "text", + "content": "[315] Ziwei Ji, Tiezheng Yu, Yan Xu, Nayeon Lee, Etsuko Ishii, and Pascale Fung. Towards mitigating LLM hallucination via self reflection. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 1827-1843, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.123. URL https://aclanthology.org/2023.findings-emnlp.123/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 254, + 504, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 254, + 504, + 287 + ], + "spans": [ + { + "bbox": [ + 111, + 254, + 504, + 287 + ], + "type": "text", + "content": "[316] Boyu Jia, Junzhe Zhang, Huixuan Zhang, and Xiaojun Wan. Exploring and evaluating multimodal knowledge reasoning consistency of multimodal large language models. arXiv preprint arXiv:2503.04801, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 291, + 504, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 291, + 504, + 314 + ], + "spans": [ + { + "bbox": [ + 111, + 291, + 504, + 314 + ], + "type": "text", + "content": "[317] Zeyu Jia, Alexander Rakhlin, and Tengyang Xie. Do we need to verify step by step? rethinking process supervision from a theoretical perspective. arXiv preprint arXiv:2502.10581, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 317, + 504, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 317, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 111, + 317, + 504, + 361 + ], + "type": "text", + "content": "[318] Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh, Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, Lélio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. Mistral 7b, October 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 365, + 504, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 365, + 504, + 399 + ], + "spans": [ + { + "bbox": [ + 111, + 365, + 504, + 399 + ], + "type": "text", + "content": "[319] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 403, + 504, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 403, + 504, + 436 + ], + "spans": [ + { + "bbox": [ + 111, + 403, + 504, + 436 + ], + "type": "text", + "content": "[320] Fengqing Jiang, Zhangchen Xu, Yuetai Li, Luyao Niu, Zhen Xiang, Bo Li, Bill Yuchen Lin, and Radha Poovendran. Safechain: Safety of language models with long chain-of-thought reasoning capabilities. arXiv preprint arXiv:2502.12025, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 439, + 504, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 439, + 504, + 473 + ], + "spans": [ + { + "bbox": [ + 111, + 439, + 504, + 473 + ], + "type": "text", + "content": "[321] Huchen Jiang, Yangyang Ma, Chaofan Ding, Kexin Luan, and Xinhan Di. Towards intrinsic self-correction enhancement in monte carlo tree search boosted reasoning via iterative preference learning. arXiv preprint arXiv:2412.17397, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 476, + 504, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 476, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 111, + 476, + 504, + 510 + ], + "type": "text", + "content": "[322] Jinhao Jiang, Jiayi Chen, Junyi Li, Ruiyang Ren, Shijie Wang, Wayne Xin Zhao, Yang Song, and Tao Zhang. Rag-star: Enhancing deliberative reasoning with retrieval augmented verification and refinement. arXiv preprint arXiv:2412.12881, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 514, + 504, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 514, + 504, + 548 + ], + "spans": [ + { + "bbox": [ + 111, + 514, + 504, + 548 + ], + "type": "text", + "content": "[323] Jinhao Jiang, Zhipeng Chen, Yingqian Min, Jie Chen, Xiaoxue Cheng, Jiapeng Wang, Yiru Tang, Haoxiang Sun, Jia Deng, Wayne Xin Zhao, et al. Technical report: Enhancing llm reasoning with reward-guided tree search. arXiv preprint arXiv:2411.11694, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 551, + 504, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 551, + 504, + 574 + ], + "spans": [ + { + "bbox": [ + 111, + 551, + 504, + 574 + ], + "type": "text", + "content": "[324] Nan Jiang, Ziming Wu, De-Chuan Zhan, Fuming Lai, and Shaobing Lian. Dart: Distilling autoregressive reasoning to silent thought. arXiv preprint arXiv:2506.11752, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 577, + 504, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 577, + 504, + 610 + ], + "spans": [ + { + "bbox": [ + 111, + 577, + 504, + 610 + ], + "type": "text", + "content": "[325] Shuyang Jiang, Yusheng Liao, Zhe Chen, Ya Zhang, Yanfeng Wang, and Yu Wang. Meds 3: Towards medical small language models with self-evolved slow thinking. arXiv preprint arXiv:2501.12051, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 614, + 504, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 614, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 111, + 614, + 504, + 647 + ], + "type": "text", + "content": "[326] Yuxuan Jiang, Dawei Li, and Frank Ferraro. Drp: Distilled reasoning pruning with skill-aware step decomposition for efficient large reasoning models. arXiv preprint arXiv:2505.13975, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 651, + 504, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 651, + 504, + 696 + ], + "spans": [ + { + "bbox": [ + 111, + 651, + 504, + 696 + ], + "type": "text", + "content": "[327] Carlos E Jimenez, John Yang, Alexander Wettig, Shunyu Yao, Kexin Pei, Ofir Press, and Karthik R Narasimhan. SWE-bench: Can language models resolve real-world github issues? In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=VTF8yNQM66." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 700, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 700, + 504, + 723 + ], + "type": "text", + "content": "[328] Di Jin, Eileen Pan, Nassim Oufattole, Wei-Hung Weng, Hanyi Fang, and Peter Szolovits. What disease does this patient have? a large-scale open domain question answering dataset" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "57" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 56 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 136, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 136, + 72, + 505, + 95 + ], + "type": "text", + "content": "from medical exams. Applied Sciences, 11(14), July 2021. ISSN 2076-3417. doi: 10.3390/app11146421. URL https://www.mdpi.com/2076-3417/11/14/6421." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 98, + 505, + 132 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 98, + 505, + 132 + ], + "spans": [ + { + "bbox": [ + 111, + 98, + 505, + 132 + ], + "type": "text", + "content": "[329] Mingyu Jin, Weidi Luo, Sitao Cheng, Xinyi Wang, Wenyue Hua, Ruixiang Tang, William Yang Wang, and Yongfeng Zhang. Disentangling memory and reasoning ability in large language models. arXiv preprint arXiv:2411.13504, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 134, + 505, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 134, + 505, + 201 + ], + "spans": [ + { + "bbox": [ + 111, + 134, + 505, + 201 + ], + "type": "text", + "content": "[330] Mingyu Jin, Qinkai Yu, Dong Shu, Haiyan Zhao, Wenyue Hua, Yanda Meng, Yongfeng Zhang, and Mengnan Du. The impact of reasoning step length on large language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 1830–1842, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.108. URL https://aclanthology.org/2024-findings-acl.108/." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 204, + 506, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 204, + 506, + 293 + ], + "spans": [ + { + "bbox": [ + 111, + 204, + 506, + 293 + ], + "type": "text", + "content": "[331] Mingyu Jin, Qinkai Yu, Jingyuan Huang, Qingcheng Zeng, Zhenting Wang, Wenyue Hua, Haiyan Zhao, Kai Mei, Yanda Meng, Kaize Ding, Fan Yang, Mengnan Du, and Yongfeng Zhang. Exploring concept depth: How large language models acquire knowledge and concept at different layers? In Owen Rambow, Leo Wanner, Marianna Apidianaki, Hend Al-Khalifa, Barbara Di Eugenio, and Steven Schockaert, editors, Proceedings of the 31st International Conference on Computational Linguistics, pages 558-573, Abu Dhabi, UAE, January 2025. Association for Computational Linguistics. URL https://aclanthology.org/2025.coling-main.37/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 296, + 504, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 296, + 504, + 329 + ], + "spans": [ + { + "bbox": [ + 111, + 296, + 504, + 329 + ], + "type": "text", + "content": "[332] Zhensheng Jin, Xinze Li, Yifan Ji, Chunyi Peng, Zhenghao Liu, Qi Shi, Yukun Yan, Shuo Wang, Furong Peng, and Ge Yu. Recut: Balancing reasoning length and accuracy in llms via stepwise trails and preference optimization. arXiv preprint arXiv:2506.10822, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 332, + 505, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 332, + 505, + 354 + ], + "spans": [ + { + "bbox": [ + 111, + 332, + 505, + 354 + ], + "type": "text", + "content": "[333] Andy L Jones. Scaling scaling laws with board games. arXiv preprint arXiv:2104.03113, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 357, + 504, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 357, + 504, + 380 + ], + "spans": [ + { + "bbox": [ + 111, + 357, + 504, + 380 + ], + "type": "text", + "content": "[334] Cameron R Jones and Benjamin K Bergen. Large language models pass the Turing test. arXiv preprint arXiv:2503.23674, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 384, + 504, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 384, + 504, + 407 + ], + "spans": [ + { + "bbox": [ + 111, + 384, + 504, + 407 + ], + "type": "text", + "content": "[335] Prashank Kadam. Gpt-guided monte carlo tree search for symbolic regression in financial fraud detection. arXiv preprint arXiv:2411.04459, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 410, + 505, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 410, + 505, + 443 + ], + "spans": [ + { + "bbox": [ + 111, + 410, + 505, + 443 + ], + "type": "text", + "content": "[336] Saurav Kadavath, Tom Conerly, Amanda Askell, Tom Henighan, Dawn Drain, Ethan Perez, Nicholas Schiefer, Zac Hatfield-Dodds, Nova DasSarma, Eli Tran-Johnson, et al. Language models (mostly) know what they know. arXiv preprint arXiv:2207.05221, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 446, + 505, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 446, + 505, + 502 + ], + "spans": [ + { + "bbox": [ + 111, + 446, + 505, + 502 + ], + "type": "text", + "content": "[337] Ryo Kamoi, Sarkar Snigdha Sarathi Das, Renze Lou, Jihyun Janice Ahn, Yilun Zhao, Xiaoxin Lu, Nan Zhang, Yusen Zhang, Haoran Ranran Zhang, Sujeeth Reddy Vummanthala, Salika Dave, Shaobo Qin, Arman Cohan, Wenpeng Yin, and Rui Zhang. Evaluating LLMs at detecting errors in LLM responses. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=dnwRScljXr." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 505, + 505, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 505, + 505, + 538 + ], + "spans": [ + { + "bbox": [ + 111, + 505, + 505, + 538 + ], + "type": "text", + "content": "[338] Jikun Kang, Xin Zhe Li, Xi Chen, Amirreza Kazemi, Qianyi Sun, Boxing Chen, Dong Li, Xu He, Quan He, Feng Wen, et al. Mindstar: Enhancing math reasoning in pre-trained llms at inference time. arXiv preprint arXiv:2405.16265, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 541, + 506, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 541, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 111, + 541, + 506, + 608 + ], + "type": "text", + "content": "[339] Liwei Kang, Zirui Zhao, David Hsu, and Wee Sun Lee. On the empirical complexity of reasoning and planning in LLMs. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 2897-2936, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.164. URL https://aclanthology.org/2024-findings-emnlp.164/." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 611, + 506, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 611, + 506, + 634 + ], + "spans": [ + { + "bbox": [ + 111, + 611, + 506, + 634 + ], + "type": "text", + "content": "[340] Yu Kang, Xianghui Sun, Liangyu Chen, and Wei Zou. C3ot: Generating shorter chain-of-thought without compromising effectiveness. 39(23):24312-24320, Apr 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 637, + 504, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 637, + 504, + 660 + ], + "spans": [ + { + "bbox": [ + 111, + 637, + 504, + 660 + ], + "type": "text", + "content": "[341] Zhewei Kang, Xuandong Zhao, and Dawn Song. Scalable best-of-n selection for large language models via self-certainty. arXiv preprint arXiv:2502.18581, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 662, + 504, + 695 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 662, + 504, + 695 + ], + "spans": [ + { + "bbox": [ + 111, + 662, + 504, + 695 + ], + "type": "text", + "content": "[342] Manuj Kant, Sareh Nabi, Manav Kant, Roland Scharrer, Megan Ma, and Marzieh Nabi. Towards robust legal reasoning: Harnessing logical llms in law. arXiv preprint arXiv:2502.17638, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 700, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 700, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 700, + 506, + 723 + ], + "type": "text", + "content": "[343] Mehran Kazemi, Najoung Kim, Deepti Bhatia, Xin Xu, and Deepak Ramachandran. LAM-BADA: Backward chaining for automated reasoning in natural language. In Anna Rogers," + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "58" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 57 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 507, + 723 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 136, + 72, + 507, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 72, + 507, + 128 + ], + "spans": [ + { + "bbox": [ + 136, + 72, + 507, + 128 + ], + "type": "text", + "content": "Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6547-6568, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.361. URL https://aclanthology.org/2023.acl-long.361/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 131, + 505, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 131, + 505, + 166 + ], + "spans": [ + { + "bbox": [ + 111, + 131, + 505, + 166 + ], + "type": "text", + "content": "[344] Amirhossein Kazemnejad, Milad Aghajohari, Eva Portelance, Alessandro Sordoni, Siva Reddy, Aaron Courville, and Nicolas Le Roux. Vineppo: Unlocking rl potential for llm reasoning through refined credit assignment. arXiv preprint arXiv:2410.01679, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 170, + 506, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 170, + 506, + 203 + ], + "spans": [ + { + "bbox": [ + 111, + 170, + 506, + 203 + ], + "type": "text", + "content": "[345] Muhammad Khalifa, Lajanugen Logeswaran, Moontae Lee, Honglak Lee, and Lu Wang. Grace: Discriminator-guided chain-of-thought reasoning. arXiv preprint arXiv:2305.14934, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 207, + 504, + 242 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 207, + 504, + 242 + ], + "spans": [ + { + "bbox": [ + 111, + 207, + 504, + 242 + ], + "type": "text", + "content": "[346] Artyom Kharinaev, Viktor Moskvoretskii, Egor Shvetsov, Kseniia Studenikina, Bykov Mikhail, and Evgeny Burnaev. Investigating the impact of quantization methods on the safety and reliability of large language models. arXiv preprint arXiv:2502.15799, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 245, + 506, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 245, + 506, + 280 + ], + "spans": [ + { + "bbox": [ + 111, + 245, + 506, + 280 + ], + "type": "text", + "content": "[347] Hyunwoo Kim, Melanie Sclar, Tan Zhi-Xuan, Lance Ying, Sydney Levine, Yang Liu, Joshua B Tenenbaum, and Yejin Choi. Hypothesis-driven theory-of-mind reasoning for large language models. arXiv preprint arXiv:2502.11881, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 283, + 506, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 283, + 506, + 317 + ], + "spans": [ + { + "bbox": [ + 111, + 283, + 506, + 317 + ], + "type": "text", + "content": "[348] Jiin Kim, Byeongjun Shin, Jinha Chung, and Minsoo Rhu. The cost of dynamic reasoning: Demystifying ai agents and test-time scaling from an ai infrastructure perspective. arXiv preprint arXiv:2506.04301, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 320, + 506, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 320, + 506, + 354 + ], + "spans": [ + { + "bbox": [ + 111, + 320, + 506, + 354 + ], + "type": "text", + "content": "[349] Juno Kim, Denny Wu, Jason Lee, and Taiji Suzuki. Metastable dynamics of chain-of-thought reasoning: Provable benefits of search, rl and distillation. arXiv preprint arXiv:2502.01694, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 358, + 506, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 358, + 506, + 382 + ], + "spans": [ + { + "bbox": [ + 111, + 358, + 506, + 382 + ], + "type": "text", + "content": "[350] Moo Jin Kim, Chelsea Finn, and Percy Liang. Fine-tuning vision-language-action models: Optimizing speed and success. arXiv preprint arXiv:2502.19645, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 385, + 506, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 385, + 506, + 418 + ], + "spans": [ + { + "bbox": [ + 111, + 385, + 506, + 418 + ], + "type": "text", + "content": "[351] Naryeong Kim, Sungmin Kang, Gabin An, and Shin Yoo. Lachesis: Predicting llm inference accuracy using structural properties of reasoning paths. arXiv preprint arXiv:2412.08281, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 423, + 507, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 423, + 507, + 500 + ], + "spans": [ + { + "bbox": [ + 111, + 423, + 507, + 500 + ], + "type": "text", + "content": "[352] Seungone Kim, Se Joo, Doyoung Kim, Joel Jang, Seonghyeon Ye, Jamin Shin, and Minjoon Seo. The CoT collection: Improving zero-shot and few-shot learning of language models via chain-of-thought fine-tuning. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 12685-12708, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.782. URL https://aclanthology.org/2023.emnlp-main.782/." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 504, + 507, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 504, + 507, + 582 + ], + "spans": [ + { + "bbox": [ + 111, + 504, + 507, + 582 + ], + "type": "text", + "content": "[353] Seungone Kim, Juyoung Suk, Shayne Longpre, Bill Yuchen Lin, Jamin Shin, Sean Welleck, Graham Neubig, Moontae Lee, Kyungjae Lee, and Minjoon Seo. Prometheus 2: An open source language model specialized in evaluating other language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 4334-4353, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.248. URL https://aclanthology.org/2024.emnlp-main.248/." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 586, + 507, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 586, + 507, + 620 + ], + "spans": [ + { + "bbox": [ + 111, + 586, + 507, + 620 + ], + "type": "text", + "content": "[354] Sunnie SY Kim, Jennifer Wortman Vaughan, Q Vera Liao, Tania Lombrozo, and Olga Russakovsky. Fostering appropriate reliance on large language models: The role of explanations, sources, and inconsistencies. arXiv preprint arXiv:2502.08554, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 624, + 506, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 624, + 506, + 647 + ], + "spans": [ + { + "bbox": [ + 111, + 624, + 506, + 647 + ], + "type": "text", + "content": "[355] Jing Yu Koh, Stephen McAleer, Daniel Fried, and Ruslan Salakhutdinov. Tree search for language model agents. arXiv preprint arXiv:2407.01476, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 651, + 504, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 651, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 651, + 504, + 685 + ], + "type": "text", + "content": "[356] Deqian Kong, Minglu Zhao, Dehong Xu, Bo Pang, Shu Wang, Edouardo Honig, Zhangzhang Si, Chuan Li, Jianwen Xie, Sirui Xie, et al. Scalable language models with posterior inference of latent thought vectors. arXiv preprint arXiv:2502.01567, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 688, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 688, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 688, + 504, + 723 + ], + "type": "text", + "content": "[357] Abhinav Kumar, Jaechul Roh, Ali Naseh, Marzena Karpinska, Mohit Iyyer, Amir Houmansadr, and Eugene Bagdasarian. Overthinking: Slowdown attacks on reasoning llms. arXiv preprint arXiv:2502.02542, 2025." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "59" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 58 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[358] Aviral Kumar, Vincent Zhuang, Rishabh Agarwal, Yi Su, John D Co-Reyes, Avi Singh, Kate Baumli, Shariq Iqbal, Colton Bishop, Rebecca Roelofs, et al. Training language models to self-correct via reinforcement learning. arXiv preprint arXiv:2409.12917, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 108, + 506, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 108, + 506, + 152 + ], + "spans": [ + { + "bbox": [ + 111, + 108, + 506, + 152 + ], + "type": "text", + "content": "[359] Komal Kumar, Tajamul Ashraf, Omkar Thawakar, Rao Muhammad Anwer, Hisham Cholakkal, Mubarak Shah, Ming-Hsuan Yang, Phillip H. S. Torr, Salman Khan, and Fahad Shahbaz Khan. Llm post-training: A deep dive into reasoning large language models, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 156, + 506, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 156, + 506, + 201 + ], + "spans": [ + { + "bbox": [ + 111, + 156, + 506, + 201 + ], + "type": "text", + "content": "[360] Martin Kuo, Jianyi Zhang, Aolin Ding, Qinsi Wang, Louis DiValentin, Yujia Bao, Wei Wei, Da-Cheng Juan, Hai Li, and Yiran Chen. H-cot: Hijacking the chain-of-thought safety reasoning mechanism to jailbreak large reasoning models, including openai o1/o3, deepseek-r1, and gemini 2.0 flash thinking. arXiv preprint arXiv:2502.12893, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 204, + 506, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 204, + 506, + 227 + ], + "spans": [ + { + "bbox": [ + 111, + 204, + 506, + 227 + ], + "type": "text", + "content": "[361] EvolvingLMMs Lab. Open-r1-multimodal. https://github.com/EvolvingLMMs-Lab/open-r1-multimodal, February 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 230, + 506, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 230, + 506, + 264 + ], + "spans": [ + { + "bbox": [ + 111, + 230, + 506, + 264 + ], + "type": "text", + "content": "[362] Bespoke Labs. Bespoke-stratos: The unreasonable effectiveness of reasoning distillation. https://www.bespokelabs.ai/blog/bespoke-stratos-the-unreasonable-effectiveness-of-reasoning-distillation, January 2025. Accessed: 2025-01-22." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 266, + 506, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 266, + 506, + 300 + ], + "spans": [ + { + "bbox": [ + 111, + 266, + 506, + 300 + ], + "type": "text", + "content": "[363] Inception Labs, Samar Khanna, Siddhant Kharbanda, Shufan Li, Harshit Varma, Eric Wang, Sawyer Birnbaum, Ziyang Luo, Yanis Miraoui, Akash Palrecha, et al. Mercury: Ultra-fast language models based on diffusion. arXiv preprint arXiv:2506.17298, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 304, + 504, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 304, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 111, + 304, + 504, + 327 + ], + "type": "text", + "content": "[364] Huiyuan Lai, Xiao Zhang, and Malvina Nissim. Multidimensional consistency improves reasoning in language models. arXiv preprint arXiv:2503.02670, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 329, + 506, + 362 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 329, + 506, + 362 + ], + "spans": [ + { + "bbox": [ + 111, + 329, + 506, + 362 + ], + "type": "text", + "content": "[365] Xin Lai, Zhuotao Tian, Yukang Chen, Senqiao Yang, Xiangru Peng, and Jiaya Jia. Step-dpo: Step-wise preference optimization for long-chain reasoning of llms. arXiv preprint arXiv:2406.18629, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 365, + 504, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 365, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 111, + 365, + 504, + 422 + ], + "type": "text", + "content": "[366] Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V. Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, Yuling Gu, Saumya Malik, Victoria Graf, Jena D. Hwang, Jiangjiang Yang, Ronan Le Bras, Oyvind Tafjord, Chris Wilhelm, Luca Soldaini, Noah A. Smith, Yizhong Wang, Pradeep Dasigi, and Hannaneh Hajishirzi. Tulu 3: Pushing frontiers in open language model post-training, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 424, + 504, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 424, + 504, + 459 + ], + "spans": [ + { + "bbox": [ + 111, + 424, + 504, + 459 + ], + "type": "text", + "content": "[367] Nathan Lambert, Valentina Pyatkin, Jacob Morrison, LJ Miranda, Bill Yuchen Lin, Khyathi Chandu, Nouha Dziri, Sachin Kumar, Tom Zick, Yejin Choi, et al. Rewardbench: Evaluating reward models for language modeling. arXiv preprint arXiv:2403.13787, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 461, + 506, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 461, + 506, + 539 + ], + "spans": [ + { + "bbox": [ + 111, + 461, + 506, + 539 + ], + "type": "text", + "content": "[368] Andrew Lampinen, Ishita Dasgupta, Stephanie Chan, Kory Mathewson, Mh Tessler, Antonia Creswell, James McClelland, Jane Wang, and Felix Hill. Can language models learn from explanations in context? In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang, editors, Findings of the Association for Computational Linguistics: EMNLP 2022, pages 537-563, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022-findings-emnlp.38. URL https://aclanthology.org/2022-findings-emnlp.38." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 541, + 506, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 541, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 111, + 541, + 506, + 575 + ], + "type": "text", + "content": "[369] Jack Lanchantin, Angelica Chen, Shehzaad Dhuliawala, Ping Yu, Jason Weston, Sainbayar Sukhbaatar, and Ilia Kulikov. Diverse preference optimization. arXiv preprint arXiv:2501.18101, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 578, + 506, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 578, + 506, + 621 + ], + "spans": [ + { + "bbox": [ + 111, + 578, + 506, + 621 + ], + "type": "text", + "content": "[370] Anh Duc Le, Tu Vu, Nam Le Hai, Nguyen Thi Ngoc Diep, Linh Ngo Van, Trung Le, and Thien Huu Nguyen. Cot2align: Cross-chain of thought distillation via optimal transport alignment for language models with different tokenizers. arXiv preprint arXiv:2502.16806, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 625, + 506, + 659 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 625, + 506, + 659 + ], + "spans": [ + { + "bbox": [ + 111, + 625, + 506, + 659 + ], + "type": "text", + "content": "[371] Joshua Ong Jun Leang, Aryo Pradipta Gema, and Shay B Cohen. Comat: Chain of mathematically annotated thought improves mathematical reasoning. arXiv preprint arXiv:2410.10336, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 662, + 504, + 686 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 662, + 504, + 686 + ], + "spans": [ + { + "bbox": [ + 111, + 662, + 504, + 686 + ], + "type": "text", + "content": "[372] Joshua Ong Jun Leang, Giwon Hong, Wenda Li, and Shay B Cohen. Theorem prover as a judge for synthetic data generation. arXiv preprint arXiv:2502.13137, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 689, + 506, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 506, + 721 + ], + "type": "text", + "content": "[373] Byeongchan Lee, Jonghoon Lee, Dongyoung Kim, Jaehyung Kim, and Jinwoo Shin. Collaborative llm inference via planning for efficient reasoning. arXiv preprint arXiv:2506.11578, 2025." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "60" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 59 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "text", + "content": "[374] Hyunseok Lee, Seunghyuk Oh, Jaehyung Kim, Jinwoo Shin, and Jihoon Tack. Revise: Learning to refine at test-time via intrinsic self-verification. arXiv preprint arXiv:2502.14565, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 110, + 504, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 110, + 504, + 133 + ], + "spans": [ + { + "bbox": [ + 111, + 110, + 504, + 133 + ], + "type": "text", + "content": "[375] Jinu Lee and Julia Hockenmaier. Evaluating step-by-step reasoning traces: A survey. arXiv preprint arXiv:2502.12289, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 136, + 505, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 136, + 505, + 170 + ], + "spans": [ + { + "bbox": [ + 111, + 136, + 505, + 170 + ], + "type": "text", + "content": "[376] Jung Hyun Lee, June Yong Yang, Byeongho Heo, Dongyoon Han, and Kang Min Yoo. Token-supervised value models for enhancing mathematical reasoning capabilities of large language models. arXiv preprint arXiv:2407.12863, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 173, + 506, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 173, + 506, + 206 + ], + "spans": [ + { + "bbox": [ + 111, + 173, + 506, + 206 + ], + "type": "text", + "content": "[377] Kuang-Huei Lee, Ian Fischer, Yueh-Hua Wu, Dave Marwood, Shumeet Baluja, Dale Schuurmans, and Xinyun Chen. Evolving deeper llm thinking. arXiv preprint arXiv:2501.09891, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 211, + 504, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 211, + 504, + 255 + ], + "spans": [ + { + "bbox": [ + 111, + 211, + 504, + 255 + ], + "type": "text", + "content": "[378] Lucas Lehnert, Sainbayar Sukhbaatar, DiJia Su, Qinqing Zheng, Paul McVay, Michael Rabbat, and Yuandong Tian. Beyond a*: Better planning with transformers via search dynamics bootstrapping. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=SGoVIC0u0f." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 259, + 506, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 259, + 506, + 304 + ], + "spans": [ + { + "bbox": [ + 111, + 259, + 506, + 304 + ], + "type": "text", + "content": "[379] Bin Lei, Yi Zhang, Shan Zuo, Ali Payani, and Caiwen Ding. MACM: Utilizing a multi-agent system for condition mining in solving complex mathematical problems. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=VR2RdSxtzs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 308, + 506, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 308, + 506, + 341 + ], + "spans": [ + { + "bbox": [ + 111, + 308, + 506, + 341 + ], + "type": "text", + "content": "[380] Jixuan Leng, Cassandra A Cohen, Zhixian Zhang, Chenyan Xiong, and William W Cohen. Semi-structured llm reasoners can be rigorously audited. arXiv preprint arXiv:2505.24217, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 345, + 504, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 345, + 504, + 390 + ], + "spans": [ + { + "bbox": [ + 111, + 345, + 504, + 390 + ], + "type": "text", + "content": "[381] Adam Lerer, Hengyuan Hu, Jakob Foerster, and Noam Brown. Improving policies via search in cooperative partially observable games. Proceedings of the AAAI Conference on Artificial Intelligence, 34(05):7187-7194, Apr. 2020. doi: 10.1609/aaai.v34i05.6208. URL https://ojs.aaai.org/index.php/AAAI/article/view/6208." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 393, + 504, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 393, + 504, + 417 + ], + "spans": [ + { + "bbox": [ + 111, + 393, + 504, + 417 + ], + "type": "text", + "content": "[382] Belinda Z Li, Been Kim, and Zi Wang. Questbench: Can llms ask the right question to acquire information in reasoning tasks? arXiv preprint arXiv:2503.22674, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 420, + 506, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 420, + 506, + 453 + ], + "spans": [ + { + "bbox": [ + 111, + 420, + 506, + 453 + ], + "type": "text", + "content": "[383] Bingxuan Li, Yiwei Wang, Jiuming Gu, Kai-Wei Chang, and Nanyun Peng. Metal: A multiagent framework for chart generation with test-time scaling. arXiv preprint arXiv:2502.17651, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 457, + 506, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 457, + 506, + 502 + ], + "spans": [ + { + "bbox": [ + 111, + 457, + 506, + 502 + ], + "type": "text", + "content": "[384] Bohan Li, Jiannan Guan, Longxu Dou, Yunlong Feng, Dingzirui Wang, Yang Xu, Enbo Wang, Qiguang Chen, Bichen Wang, Xiao Xu, et al. Can large language models understand you better? an mbti personality detection dataset aligned with population traits. arXiv preprint arXiv:2412.12510, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 506, + 506, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 506, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 111, + 506, + 506, + 540 + ], + "type": "text", + "content": "[385] Chen Li, Weiqi Wang, Jingcheng Hu, Yixuan Wei, Nanning Zheng, Han Hu, Zheng Zhang, and Houwen Peng. Common 7b language models already possess strong math capabilities. arXiv preprint arXiv:2403.04706, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 543, + 504, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 543, + 504, + 567 + ], + "spans": [ + { + "bbox": [ + 111, + 543, + 504, + 567 + ], + "type": "text", + "content": "[386] Chen Li, Nazhou Liu, and Kai Yang. Adaptive group policy optimization: Towards stable training and token-efficient reasoning. arXiv preprint arXiv:2503.15952, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 570, + 504, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 570, + 504, + 604 + ], + "spans": [ + { + "bbox": [ + 111, + 570, + 504, + 604 + ], + "type": "text", + "content": "[387] Chengpeng Li, Zhengyang Tang, Ziniu Li, Mingfeng Xue, Keqin Bao, Tian Ding, Ruoyu Sun, Benyou Wang, Xiang Wang, Junyang Lin, et al. Cort: Code-integrated reasoning within thinking. arXiv preprint arXiv:2506.09820, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 607, + 504, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 607, + 504, + 641 + ], + "spans": [ + { + "bbox": [ + 111, + 607, + 504, + 641 + ], + "type": "text", + "content": "[388] Chengpeng Li, Mingfeng Xue, Zhenru Zhang, Jiaxi Yang, Beichen Zhang, Xiang Wang, Bowen Yu, Binyuan Hui, Junyang Lin, and Dayiheng Liu. Start: Self-taught reasoner with tools. arXiv preprint arXiv:2503.04625, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 644, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 644, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 644, + 506, + 723 + ], + "type": "text", + "content": "[389] Chengshu Li, Jacky Liang, Andy Zeng, Xinyun Chen, Karol Hausman, Dorsa Sadigh, Sergey Levine, Li Fei-Fei, Fei Xia, and Brian Ichter. Chain of code: Reasoning with a language model-augmented code emulator. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 28259-28277. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/1i24ar.html." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "61" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 60 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[390] Chengzhu Li, Wenshan Wu, Huanyu Zhang, Yan Xia, Shaoguang Mao, Li Dong, Ivan Vulic, and Furu Wei. Imagine while reasoning in space: Multimodal visualization-of-thought. arXiv preprint arXiv:2501.07542, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 110, + 506, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 110, + 506, + 144 + ], + "spans": [ + { + "bbox": [ + 111, + 110, + 506, + 144 + ], + "type": "text", + "content": "[391] Cheryl Li, Tianyuan Xu, and Yiwen Guo. Reasoning-as-logic-units: Scaling test-time reasoning in large language models through logic unit alignment. arXiv preprint arXiv:2502.07803, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 148, + 506, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 148, + 506, + 182 + ], + "spans": [ + { + "bbox": [ + 111, + 148, + 506, + 182 + ], + "type": "text", + "content": "[392] Dacheng Li, Shiyi Cao, Chengkun Cao, Xiuyu Li, Shangyin Tan, Kurt Keutzer, Jiarong Xing, Joseph E Gonzalez, and Ion Stoica. S*: Test time scaling for code generation. arXiv preprint arXiv:2502.14382, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 186, + 505, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 186, + 505, + 220 + ], + "spans": [ + { + "bbox": [ + 111, + 186, + 505, + 220 + ], + "type": "text", + "content": "[393] Dacheng Li, Shiyi Cao, Tyler Griggs, Shu Liu, Xiangxi Mo, Shishir G Patil, Matei Zaharia, Joseph E Gonzalez, and Ion Stoica. Llms can easily learn to reason from demonstrations structure, not content, is what matters! arXiv preprint arXiv:2502.07374, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 224, + 506, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 224, + 506, + 269 + ], + "spans": [ + { + "bbox": [ + 111, + 224, + 506, + 269 + ], + "type": "text", + "content": "[394] Dawei Li, Bohan Jiang, Liangjie Huang, Alimohammad Beigi, Chengshuai Zhao, Zhen Tan, Amrita Bhattacharjee, Yuxuan Jiang, Canyu Chen, Tianhao Wu, et al. From generation to judgment: Opportunities and challenges of llm-as-a-judge. arXiv preprint arXiv:2411.16594, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 274, + 504, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 274, + 504, + 297 + ], + "spans": [ + { + "bbox": [ + 111, + 274, + 504, + 297 + ], + "type": "text", + "content": "[395] Gengxu Li, Tingyu Xia, Yi Chang, and Yuan Wu. Length-controlled margin-based preference optimization without reference model. arXiv preprint arXiv:2502.14643, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 301, + 504, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 301, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 111, + 301, + 504, + 335 + ], + "type": "text", + "content": "[396] Haitao Li, Qian Dong, Junjie Chen, Huixue Su, Yujia Zhou, Qingyao Ai, Ziyi Ye, and Yiqun Liu. Llms-as-judges: a comprehensive survey on llm-based evaluation methods. arXiv preprint arXiv:2412.05579, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 339, + 506, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 339, + 506, + 384 + ], + "spans": [ + { + "bbox": [ + 111, + 339, + 506, + 384 + ], + "type": "text", + "content": "[397] Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. Numinamath. https://huggingface.co/AI-MO/NuminaMath-CoT, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 388, + 504, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 388, + 504, + 412 + ], + "spans": [ + { + "bbox": [ + 111, + 388, + 504, + 412 + ], + "type": "text", + "content": "[398] Jia-Nan Li, Jian Guan, Wei Wu, and Rui Yan. Extended inductive reasoning for personalized preference inference from behavioral signals. arXiv preprint arXiv:2505.18071, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 415, + 506, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 415, + 506, + 449 + ], + "spans": [ + { + "bbox": [ + 111, + 415, + 506, + 449 + ], + "type": "text", + "content": "[399] Jiachun Li, Pengfei Cao, Yubo Chen, Jiexin Xu, Huajun Li, Xiaojian Jiang, Kang Liu, and Jun Zhao. Rewarding curse: Analyze and mitigate reward modeling issues for llm reasoning. arXiv preprint arXiv:2503.05188, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 453, + 506, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 453, + 506, + 487 + ], + "spans": [ + { + "bbox": [ + 111, + 453, + 506, + 487 + ], + "type": "text", + "content": "[400] Jierui Li, Hung Le, Yinbo Zhou, Caiming Xiong, Silvio Savarese, and Doyen Sahoo. Codetree: Agent-guided tree search for code generation with large language models. arXiv preprint arXiv:2411.04329, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 492, + 506, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 492, + 506, + 525 + ], + "spans": [ + { + "bbox": [ + 111, + 492, + 506, + 525 + ], + "type": "text", + "content": "[401] Junlong Li, Daya Guo, Dejian Yang, Runxin Xu, Yu Wu, and Junxian He. Codei/o: Condensing reasoning patterns via code input-output prediction. arXiv preprint arXiv:2502.07316, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 530, + 506, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 530, + 506, + 563 + ], + "spans": [ + { + "bbox": [ + 111, + 530, + 506, + 563 + ], + "type": "text", + "content": "[402] Kaixin Li. Verified taco problems. https://huggingface.co/datasets/likaixin/TACO-verified, 2024. URL https://huggingface.co/datasets/likaixin/TACO-verified." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 568, + 504, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 568, + 504, + 592 + ], + "spans": [ + { + "bbox": [ + 111, + 568, + 504, + 592 + ], + "type": "text", + "content": "[403] Kechen Li, Wenqi Zhu, Coralia Cartis, Tianbo Ji, and Shiwei Liu. Sos1: O1 and r1-like reasoning llms are sum-of-square solvers. arXiv preprint arXiv:2502.20545, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 596, + 506, + 630 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 596, + 506, + 630 + ], + "spans": [ + { + "bbox": [ + 111, + 596, + 506, + 630 + ], + "type": "text", + "content": "[404] Long Li, Weiwen Xu, Jiayan Guo, Ruochen Zhao, Xingxuan Li, Yuqian Yuan, Boqiang Zhang, Yuming Jiang, Yifei Xin, Ronghao Dang, et al. Chain of ideas: Revolutionizing research via novel idea development with llm agents. arXiv preprint arXiv:2410.13185, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 634, + 504, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 634, + 504, + 657 + ], + "spans": [ + { + "bbox": [ + 111, + 634, + 504, + 657 + ], + "type": "text", + "content": "[405] Margaret Li, Sneha Kudugunta, and Luke Zettlemoyer. (mis) fitting: A survey of scaling laws. arXiv preprint arXiv:2502.18969, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 661, + 504, + 694 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 661, + 504, + 694 + ], + "spans": [ + { + "bbox": [ + 111, + 661, + 504, + 694 + ], + "type": "text", + "content": "[406] Ming Li, Lichang Chen, Jiuhai Chen, Shwai He, Heng Huang, Jiuming Gu, and Tianyi Zhou. Reflection-tuning: Data recycling improves llm instruction-tuning. arXiv preprint arXiv:2310.11716, 2023." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 111, + 699, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 699, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 699, + 504, + 723 + ], + "type": "text", + "content": "[407] Ming Li, Yanhong Li, and Tianyi Zhou. What happened in llms layers when trained for fast vs. slow thinking: A gradient perspective. arXiv preprint arXiv:2410.23743, 2024." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "62" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 61 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 506, + 117 + ], + "type": "text", + "content": "[408] Minzhi Li, Zhengyuan Liu, Shumin Deng, Shafiq Joty, Nancy Chen, and Min-Yen Kan. Dna-eval: Enhancing large language model evaluation through decomposition and aggregation. In Proceedings of the 31st International Conference on Computational Linguistics, pages 2277-2290, January 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 119, + 505, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 119, + 505, + 153 + ], + "spans": [ + { + "bbox": [ + 111, + 119, + 505, + 153 + ], + "type": "text", + "content": "[409] Moxin Li, Yuantao Zhang, Wenjie Wang, Wentao Shi, Zhuo Liu, Fuli Feng, and Tat-Seng Chua. Self-improvement towards pareto optimality: Mitigating preference conflicts in multi-objective alignment. arXiv preprint arXiv:2502.14354, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 156, + 506, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 156, + 506, + 190 + ], + "spans": [ + { + "bbox": [ + 111, + 156, + 506, + 190 + ], + "type": "text", + "content": "[410] Peiji Li, Kai Lv, Yunfan Shao, Yichuan Ma, Linyang Li, Xiaqing Zheng, Xipeng Qiu, and Qipeng Guo. Fastmcts: A simple sampling strategy for data synthesis. arXiv preprint arXiv:2502.11476, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 194, + 506, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 194, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 111, + 194, + 506, + 228 + ], + "type": "text", + "content": "[411] Qingyao Li, Wei Xia, Kounianhua Du, Xinyi Dai, Ruiming Tang, Yasheng Wang, Yong Yu, and Weinan Zhang. Rethinkmcts: Refining erroneous thoughts in monte carlo tree search for code generation. arXiv preprint arXiv:2409.09584, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 230, + 504, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 230, + 504, + 275 + ], + "spans": [ + { + "bbox": [ + 111, + 230, + 504, + 275 + ], + "type": "text", + "content": "[412] Shuangtao Li, Shuaihao Dong, Kexin Luan, Xinhan Di, and Chaofan Ding. Enhancing reasoning through process supervision with monte carlo tree search. In The First Workshop on Neural Reasoning and Mathematical Discovery at AAAI'2025, January 2025. URL https://openreview.net/forum?id=OupEEi1341." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 278, + 504, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 278, + 504, + 302 + ], + "spans": [ + { + "bbox": [ + 111, + 278, + 504, + 302 + ], + "type": "text", + "content": "[413] Siheng Li, Zhanhui Zhou, Wai Lam, Chao Yang, and Chaochao Lu. Repo: Replay-enhanced policy optimization. arXiv preprint arXiv:2506.09340, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 304, + 506, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 304, + 506, + 338 + ], + "spans": [ + { + "bbox": [ + 111, + 304, + 506, + 338 + ], + "type": "text", + "content": "[414] Wen-Ding Li, Keya Hu, Carter Larsen, Yuqing Wu, Simon Alford, Caleb Woo, Spencer M Dunn, Hao Tang, Michelangelo Naim, Dat Nguyen, et al. Combining induction and transduction for abstract reasoning. arXiv preprint arXiv:2411.02272, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 341, + 504, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 341, + 504, + 364 + ], + "spans": [ + { + "bbox": [ + 111, + 341, + 504, + 364 + ], + "type": "text", + "content": "[415] Wendi Li and Yixuan Li. Process reward model with q-value rankings. arXiv preprint arXiv:2410.11287, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 367, + 506, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 367, + 506, + 400 + ], + "spans": [ + { + "bbox": [ + 111, + 367, + 506, + 400 + ], + "type": "text", + "content": "[416] Wenjun Li, Changyu Chen, and Pradeep Varakantham. Unlocking large language model's planning capabilities with maximum diversity fine-tuning. arXiv preprint arXiv:2406.10479, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 403, + 506, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 403, + 506, + 460 + ], + "spans": [ + { + "bbox": [ + 111, + 403, + 506, + 460 + ], + "type": "text", + "content": "[417] Xiaonan Li and Xipeng Qiu. MoT: Memory-of-thought enables ChatGPT to self-improve. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 6354-6374, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.392. URL https://aclanthology.org/2023.emnlp-main.392/." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 462, + 506, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 462, + 506, + 497 + ], + "spans": [ + { + "bbox": [ + 111, + 462, + 506, + 497 + ], + "type": "text", + "content": "[418] Xiaoxi Li, Guanting Dong, Jiajie Jin, Yuyao Zhang, Yujia Zhou, Yutao Zhu, Peitian Zhang, and Zhicheng Dou. Search-o1: Agentic search-enhanced large reasoning models. arXiv preprint arXiv:2501.05366, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 499, + 504, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 499, + 504, + 523 + ], + "spans": [ + { + "bbox": [ + 111, + 499, + 504, + 523 + ], + "type": "text", + "content": "[419] Xinzhe Li. A survey on llm test-time compute via search: Tasks, llm profiling, search algorithms, and relevant frameworks. arXiv preprint arXiv:2501.10069, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 525, + 504, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 525, + 504, + 548 + ], + "spans": [ + { + "bbox": [ + 111, + 525, + 504, + 548 + ], + "type": "text", + "content": "[420] Xuefeng Li, Haoyang Zou, and Pengfei Liu. Limr: Less is more for rl scaling. arXiv preprint arXiv:2502.11886, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 552, + 506, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 552, + 506, + 585 + ], + "spans": [ + { + "bbox": [ + 111, + 552, + 506, + 585 + ], + "type": "text", + "content": "[421] Yafu Li, Zhilin Wang, Tingchen Fu, Ganqu Cui, Sen Yang, and Yu Cheng. From drafts to answers: Unlocking lIm potential via aggregation fine-tuning. arXiv preprint arXiv:2501.11877, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 588, + 504, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 588, + 504, + 611 + ], + "spans": [ + { + "bbox": [ + 111, + 588, + 504, + 611 + ], + "type": "text", + "content": "[422] Yang Li. Policy guided tree search for enhanced ltm reasoning. arXiv preprint arXiv:2502.06813, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 614, + 506, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 614, + 506, + 649 + ], + "spans": [ + { + "bbox": [ + 111, + 614, + 506, + 649 + ], + "type": "text", + "content": "[423] Yang Li, Dong Du, Linfeng Song, Chen Li, Weikang Wang, Tao Yang, and Haitao Mi. Hunyuanprover: A scalable data synthesis framework and guided tree search for automated theorem proving. arXiv preprint arXiv:2412.20735, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 651, + 506, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 651, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 111, + 651, + 506, + 696 + ], + "type": "text", + "content": "[424] Yang Li, Youssef Emad, Karthik Padthe, Jack Lanchantin, Weizhe Yuan, Thao Nguyen, Jason Weston, Shang-Wen Li, Dong Wang, Ilia Kulikov, et al. Naturalthoughts: Selecting and distilling reasoning traces for general reasoning tasks. arXiv preprint arXiv:2507.01921, 2025." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 111, + 699, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 699, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 699, + 506, + 723 + ], + "type": "text", + "content": "[425] Yifei Li, Zeqi Lin, Shizhuo Zhang, Qiang Fu, Bei Chen, Jian-Guang Lou, and Weizhu Chen. Making language models better reasoners with step-aware verifier. In Anna Rogers, Jordan" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "63" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 62 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 135, + 72, + 506, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 72, + 506, + 118 + ], + "spans": [ + { + "bbox": [ + 135, + 72, + 506, + 118 + ], + "type": "text", + "content": "Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 5315-5333, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.291. URL https://aclanthology.org/2023.acl-long.291/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 121, + 506, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 121, + 506, + 166 + ], + "spans": [ + { + "bbox": [ + 111, + 121, + 506, + 166 + ], + "type": "text", + "content": "[426] Yiwei Li, Ji Zhang, Shaoxiong Feng, Peiwen Yuan, Xinglin Wang, Jiayi Shi, Yueqi Zhang, Chuyi Tan, Boyuan Pan, Yao Hu, et al. Revisiting self-consistency from dynamic distributional alignment perspective on answer aggregation. arXiv preprint arXiv:2502.19830, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 171, + 506, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 171, + 506, + 239 + ], + "spans": [ + { + "bbox": [ + 111, + 171, + 506, + 239 + ], + "type": "text", + "content": "[427] Yujia Li, David Choi, Junyoung Chung, Nate Kushman, Julian Schrittwieser, Rémi Leblond, Tom Eccles, James Keeling, Felix Gimeno, Agustin Dal Lago, Thomas Hubert, Peter Choy, Cyprien de Masson d'Autume, Igor Babuschkin, Xinyun Chen, Po-Sen Huang, Johannes Welbl, Sven Gowal, Alexey Cherepanov, James Molloy, Daniel Mankowitz, Esme Sutherland Robson, Pushmeet Kohli, Nando de Freitas, Koray Kavukcuoglu, and Oriol Vinyals. Competition-level code generation with alphabet. arXiv preprint arXiv:2203.07814, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 243, + 506, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 243, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 111, + 243, + 506, + 277 + ], + "type": "text", + "content": "[428] Yunxin Li, Zhenyu Liu, Zitao Li, Xuanyu Zhang, Zhenran Xu, Xinyu Chen, Haoyuan Shi, Shenyuan Jiang, Xintong Wang, Jifang Wang, et al. Perception, reason, think, and plan: A survey on large multimodal reasoning models. arXiv preprint arXiv:2505.04921, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 281, + 504, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 281, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 111, + 281, + 504, + 304 + ], + "type": "text", + "content": "[429] Zheng Li, Qingxiu Dong, Jingyuan Ma, Di Zhang, and Zhifang Sui. Selfbudgeter: Adaptive token allocation for efficient llm reasoning. arXiv preprint arXiv:2505.11274, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 308, + 506, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 308, + 506, + 354 + ], + "spans": [ + { + "bbox": [ + 111, + 308, + 506, + 354 + ], + "type": "text", + "content": "[430] Zhiyuan Li, Hong Liu, Denny Zhou, and Tengyu Ma. Chain of thought empowers transformers to solve inherently serial problems. In The Twelfth International Conference on Learning Representations, January 2023. URL https://openreview.net/pdf?id=3EWTEy9MTM." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 358, + 506, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 358, + 506, + 425 + ], + "spans": [ + { + "bbox": [ + 111, + 358, + 506, + 425 + ], + "type": "text", + "content": "[431] Zhiyuan Li, Dongnan Liu, Chaoyi Zhang, Heng Wang, Tengfei Xue, and Weidong Cai. Enhancing advanced visual reasoning ability of large language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 1915-1929, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.114. URL https://aclanthology.org/2024.emnlp-main.114/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 430, + 506, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 430, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 111, + 430, + 506, + 464 + ], + "type": "text", + "content": "[432] Zhong-Zhi Li, Duzhen Zhang, Ming-Liang Zhang, Jiaxin Zhang, Zengyan Liu, Yuxuan Yao, Haotian Xu, Junhao Zheng, Pei-Jie Wang, Xiuyi Chen, et al. From system 1 to system 2: A survey of reasoning large language models. arXiv preprint arXiv:2502.17419, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 468, + 506, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 468, + 506, + 545 + ], + "spans": [ + { + "bbox": [ + 111, + 468, + 506, + 545 + ], + "type": "text", + "content": "[433] Zhongzhi Li, Ming-Liang Zhang, Pei-Jie Wang, Jian Xu, Rui-Song Zhang, Yin Fei, Zhi-Long Ji, Jin-Feng Bai, Zhen-Ru Pan, Jiaxin Zhang, and Cheng-Lin Liu. CMMaTH: A Chinese multi-modal math skill evaluation benchmark for foundation models. In Owen Rambow, Leo Wanner, Marianna Apidianaki, Hend Al-Khalifa, Barbara Di Eugenio, and Steven Schockaert, editors, Proceedings of the 31st International Conference on Computational Linguistics, pages 2690–2726, Abu Dhabi, UAE, January 2025. Association for Computational Linguistics. URL https://aclanthology.org/2025.coling-main.184/." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 551, + 506, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 551, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 111, + 551, + 506, + 586 + ], + "type": "text", + "content": "[434] Zhuoqun Li, Haiyang Yu, Xuanang Chen, Hongyu Lin, Yaojie Lu, Fei Huang, Xianpei Han, Yongbin Li, and Le Sun. Deepsolution: Boosting complex engineering solution design via tree-based exploration and bi-point thinking. arXiv preprint arXiv:2502.20730, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 589, + 506, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 589, + 506, + 634 + ], + "spans": [ + { + "bbox": [ + 111, + 589, + 506, + 634 + ], + "type": "text", + "content": "[435] Zichao Li, Xueru Wen, Jie Lou, Yuqiu Ji, Yaojie Lu, Xianpei Han, Debing Zhang, and Le Sun. The devil is in the details: Tackling unimodal spurious correlations for generalizable multimodal reward models. In *Forty-second International Conference on Machine Learning*, 2025. URL https://openreview.net/forum?id=b0qRSUcQP7." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 639, + 506, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 639, + 506, + 684 + ], + "spans": [ + { + "bbox": [ + 111, + 639, + 506, + 684 + ], + "type": "text", + "content": "[436] Ziniu Li, Tian Xu, Yushun Zhang, Zhihang Lin, Yang Yu, Ruoyu Sun, and Zhi-Quan Luo. Remax: A simple, effective, and efficient reinforcement learning method for aligning large language models. In *Forty-first International Conference on Machine Learning*, May 2024. URL https://openreview.net/forum?id=Stn8hXkpe6." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 688, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 688, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 688, + 504, + 723 + ], + "type": "text", + "content": "[437] Jing Liang, Hongyao Tang, Yi Ma, Jinyi Liu, Yan Zheng, Shuyue Hu, Lei Bai, and Jianye Hao. Squeeze the soaked sponge: Efficient off-policy reinforcement finetuning for large language model. arXiv preprint arXiv:2507.06892, 2025." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "64" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 63 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[438] Jintao Liang, Gang Su, Huifeng Lin, You Wu, Rui Zhao, and Ziyue Li. Reasoning rag via system 1 or system 2: A survey on reasoning agentic retrieval-augmented generation for industry challenges. arXiv preprint arXiv:2506.10408, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 110, + 505, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 110, + 505, + 144 + ], + "spans": [ + { + "bbox": [ + 111, + 110, + 505, + 144 + ], + "type": "text", + "content": "[439] Xiao Liang, Zhong-Zhi Li, Yeyun Gong, Yang Wang, Hengyuan Zhang, Yelong Shen, Ying Nian Wu, and Weizhu Chen. Sws: Self-aware weakness-driven problem synthesis in reinforcement learning for llm reasoning. arXiv preprint arXiv:2506.08989, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 148, + 505, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 148, + 505, + 182 + ], + "spans": [ + { + "bbox": [ + 111, + 148, + 505, + 182 + ], + "type": "text", + "content": "[440] Xun Liang, Shichao Song, Zifan Zheng, Hanyu Wang, Qingchen Yu, Xunkai Li, Rong-Hua Li, Yi Wang, Zhonghao Wang, Feiyu Xiong, et al. Internal consistency and self-feedback in large language models: A survey. arXiv preprint arXiv:2407.14507, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 186, + 505, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 186, + 505, + 219 + ], + "spans": [ + { + "bbox": [ + 111, + 186, + 505, + 219 + ], + "type": "text", + "content": "[441] Baohao Liao, Xinyi Chen, Sara Rajaee, Yuhui Xu, Christian Herold, Anders Søgaard, Maarten de Rijke, and Christof Monz. Lost at the beginning of reasoning. arXiv preprint arXiv:2506.22058, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 223, + 505, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 223, + 505, + 257 + ], + "spans": [ + { + "bbox": [ + 111, + 223, + 505, + 257 + ], + "type": "text", + "content": "[442] Baohao Liao, Yuhui Xu, Hanze Dong, Junnan Li, Christof Monz, Silvio Savarese, Doyen Sahoo, and Caiming Xiong. Reward-guided speculative decoding for efficient ltm reasoning. arXiv preprint arXiv:2501.19324, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 261, + 505, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 261, + 505, + 317 + ], + "spans": [ + { + "bbox": [ + 111, + 261, + 505, + 317 + ], + "type": "text", + "content": "[443] Huanxuan Liao, Shizhu He, Yupu Hao, Xiang Li, Yanzhe Zhang, Jun Zhao, and Kang Liu. Skintern: Internalizing symbolic knowledge for distilling better cot capabilities into small language models. In Proceedings of the 31st International Conference on Computational Linguistics, pages 3203-3221, January 2025. URL https://aclanthology.org/2025.coling-main.215.pdf." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 321, + 505, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 321, + 505, + 355 + ], + "spans": [ + { + "bbox": [ + 111, + 321, + 505, + 355 + ], + "type": "text", + "content": "[444] Mengqi Liao, Xiangyu Xi, Ruinian Chen, Jia Leng, Yangen Hu, Ke Zeng, Shuai Liu, and Huaiyu Wan. Enhancing efficiency and exploration in reinforcement learning for llms. arXiv preprint arXiv:2505.18573, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 358, + 505, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 358, + 505, + 382 + ], + "spans": [ + { + "bbox": [ + 111, + 358, + 505, + 382 + ], + "type": "text", + "content": "[445] Minpeng Liao, Wei Luo, Chengxi Li, Jing Wu, and Kai Fan. Mario: Math reasoning with code interpreter output-a reproducible pipeline. arXiv preprint arXiv:2401.08190, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 386, + 505, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 386, + 505, + 409 + ], + "spans": [ + { + "bbox": [ + 111, + 386, + 505, + 409 + ], + "type": "text", + "content": "[446] Weibin Liao, Xu Chu, and Yasha Wang. Tpo: Aligning large language models with multi-branch & multi-step preference trees. arXiv preprint arXiv:2410.12854, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 412, + 505, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 412, + 505, + 458 + ], + "spans": [ + { + "bbox": [ + 111, + 412, + 505, + 458 + ], + "type": "text", + "content": "[447] Jonathan Light, Min Cai, Weiqin Chen, Guanzhi Wang, Xiusi Chen, Wei Cheng, Yisong Yue, and Ziniu Hu. Strategist: Learning strategic skills by LLMs via bi-level tree search. In Automated Reinforcement Learning: Exploring Meta-Learning, AutoML, and LLMs, June 2024. URL https://openreview.net/forum?id=UHWbmZuJPF." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 461, + 505, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 461, + 505, + 495 + ], + "spans": [ + { + "bbox": [ + 111, + 461, + 505, + 495 + ], + "type": "text", + "content": "[448] Jonathan Light, Yue Wu, Yiyou Sun, Wenchao Yu, Xujiang Zhao, Ziniu Hu, Haifeng Chen, Wei Cheng, et al. Scattered forest search: Smarter code space exploration with llms. arXiv preprint arXiv:2411.05010, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 498, + 505, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 498, + 505, + 544 + ], + "spans": [ + { + "bbox": [ + 111, + 498, + 505, + 544 + ], + "type": "text", + "content": "[449] Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=v8L0pN6EOi." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 548, + 505, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 548, + 505, + 582 + ], + "spans": [ + { + "bbox": [ + 111, + 548, + 505, + 582 + ], + "type": "text", + "content": "[450] Bill Yuchen Lin, Ronan Le Bras, Kyle Richardson, Ashish Sabharwal, Radha Poovendran, Peter Clark, and Yejin Choi. Zebralogic: On the scaling limits of lms for logical reasoning. arXiv preprint arXiv:2502.01100, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 586, + 505, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 586, + 505, + 609 + ], + "spans": [ + { + "bbox": [ + 111, + 586, + 505, + 609 + ], + "type": "text", + "content": "[451] Haohan Lin, Zhiqing Sun, Yiming Yang, and Sean Welleck. Lean-star: Learning to interleave thinking and proving. arXiv preprint arXiv:2407.10040, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 613, + 505, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 613, + 505, + 647 + ], + "spans": [ + { + "bbox": [ + 111, + 613, + 505, + 647 + ], + "type": "text", + "content": "[452] Qingwen Lin, Boyan Xu, Guimin Hu, Zijian Li, Zhifeng Hao, Keli Zhang, and Ruichu Cai. Cmcts: A constrained monte carlo tree search framework for mathematical reasoning in large language model. arXiv preprint arXiv:2502.11169, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 651, + 505, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 651, + 505, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 651, + 505, + 685 + ], + "type": "text", + "content": "[453] Qingwen Lin, Boyan Xu, Zijian Li, Zhifeng Hao, Keli Zhang, and Ruichu Cai. Leveraging constrained monte carlo tree search to generate reliable long chain-of-thought for mathematical reasoning. arXiv preprint arXiv:2502.11169, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 689, + 505, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 505, + 723 + ], + "type": "text", + "content": "[454] Yen-Ting Lin, Di Jin, Tengyu Xu, Tianhao Wu, Sainbayar Sukhbaatar, Chen Zhu, Yun He, Yun-Nung Chen, Jason Weston, Yuandong Tian, et al. Step-kto: Optimizing mathematical reasoning through stepwise binary feedback. arXiv preprint arXiv:2501.10799, 2025." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "65" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 64 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 507, + 722 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 111, + 72, + 507, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 507, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 507, + 106 + ], + "type": "text", + "content": "[455] Yujie Lin, Ante Wang, Moye Chen, Jingyao Liu, Hao Liu, Jinsong Su, and Xinyan Xiao. Investigating inference-time scaling for chain of multi-modal thought: A preliminary study. arXiv preprint arXiv:2502.11514, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 110, + 507, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 110, + 507, + 177 + ], + "spans": [ + { + "bbox": [ + 111, + 110, + 507, + 177 + ], + "type": "text", + "content": "[456] Zicheng Lin, Zhibin Gou, Tian Liang, Ruilin Luo, Haowei Liu, and Yujiu Yang. CriticBench: Benchmarking LLMs for critique-correct reasoning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 1552–1587, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.91. URL https://aclanthology.org/2024-findings-acl.91/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 180, + 505, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 180, + 505, + 217 + ], + "spans": [ + { + "bbox": [ + 111, + 180, + 505, + 217 + ], + "type": "text", + "content": "[457] Zicheng Lin, Tian Liang, Jiahao Xu, Xing Wang, Ruilin Luo, Chufan Shi, Siheng Li, Yujiu Yang, and Zhaopeng Tu. Critical tokens matter: Token-level contrastive estimation enhance llm's reasoning capability. arXiv preprint arXiv:2411.19943, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 219, + 506, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 219, + 506, + 253 + ], + "spans": [ + { + "bbox": [ + 111, + 219, + 506, + 253 + ], + "type": "text", + "content": "[458] Zongyu Lin, Yao Tang, Xingcheng Yao, Da Yin, Ziniu Hu, Yizhou Sun, and Kai-Wei Chang. Qlass: Boosting language agent inference via q-guided stepwise search. arXiv preprint arXiv:2502.02584, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 257, + 504, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 257, + 504, + 291 + ], + "spans": [ + { + "bbox": [ + 111, + 257, + 504, + 291 + ], + "type": "text", + "content": "[459] Zehui Ling, Deshu Chen, Hongwei Zhang, Yifeng Jiao, Xin Guo, and Yuan Cheng. Fast on the easy, deep on the hard: Efficient reasoning via powered length penalty. arXiv preprint arXiv:2506.10446, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 295, + 506, + 362 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 295, + 506, + 362 + ], + "spans": [ + { + "bbox": [ + 111, + 295, + 506, + 362 + ], + "type": "text", + "content": "[460] Zhan Ling, Yunhao Fang, Xuanlin Li, Zhiao Huang, Mingu Lee, Roland Memisevic, and Hao Su. Deductive verification of chain-of-thought reasoning. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 36407-36433. Curran Associates, Inc., September 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/72393bd47a35f5b3bee4c609e7bba733-Paper-Conference.pdf." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 366, + 504, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 366, + 504, + 390 + ], + "spans": [ + { + "bbox": [ + 111, + 366, + 504, + 390 + ], + "type": "text", + "content": "[461] Philip Lippmann and Jie Yang. Style over substance: Distilled language models reason via stylistic replication. arXiv preprint arXiv:2504.01738, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 393, + 506, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 393, + 506, + 472 + ], + "spans": [ + { + "bbox": [ + 111, + 393, + 506, + 472 + ], + "type": "text", + "content": "[462] Aiwei Liu, Haoping Bai, Zhiyun Lu, Xiang Kong, Xiaoming Wang, Jiulong Shan, Meng Cao, and Lijie Wen. Direct large language model alignment through self-rewarding contrastive prompt distillation. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9688–9712, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.523. URL https://aclanthology.org/2024.acl-long.523/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 475, + 506, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 475, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 111, + 475, + 506, + 510 + ], + "type": "text", + "content": "[463] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 514, + 506, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 514, + 506, + 548 + ], + "spans": [ + { + "bbox": [ + 111, + 514, + 506, + 548 + ], + "type": "text", + "content": "[464] Bingbin Liu, Sebastien Bubeck, Ronen Eldan, Janardhan Kulkarni, Yanzhi Li, Anh Nguyen, Rachel Ward, and Yi Zhang. Tinygsm: achieving " + }, + { + "bbox": [ + 111, + 514, + 506, + 548 + ], + "type": "inline_equation", + "content": ">80\\%" + }, + { + "bbox": [ + 111, + 514, + 506, + 548 + ], + "type": "text", + "content": " on gsm8k with small language models. arXiv preprint arXiv:2312.09241, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 552, + 507, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 552, + 507, + 596 + ], + "spans": [ + { + "bbox": [ + 111, + 552, + 507, + 596 + ], + "type": "text", + "content": "[465] Bo Liu, Leon Guertler, Simon Yu, Zichen Liu, Penghui Qi, Daniel Balcells, Mickel Liu, Cheston Tan, Weiyan Shi, Min Lin, et al. Spiral: Self-play on zero-sum games incentivizes reasoning via multi-agent multi-turn reinforcement learning. arXiv preprint arXiv:2506.24119, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 601, + 506, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 601, + 506, + 635 + ], + "spans": [ + { + "bbox": [ + 111, + 601, + 506, + 635 + ], + "type": "text", + "content": "[466] Chris Yuhao Liu, Liang Zeng, Jiacai Liu, Rui Yan, Jujie He, Chaojie Wang, Shuicheng Yan, Yang Liu, and Yahui Zhou. Skywork-reward: Bag of tricks for reward modeling in llms. arXiv preprint arXiv:2410.18451, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 639, + 506, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 639, + 506, + 673 + ], + "spans": [ + { + "bbox": [ + 111, + 639, + 506, + 673 + ], + "type": "text", + "content": "[467] Chris Yuhao Liu, Liang Zeng, Yuzhen Xiao, Jujie He, Jiacai Liu, Chaojie Wang, Rui Yan, Wei Shen, Fuxiang Zhang, Jiacheng Xu, et al. Skywork-reward-v2: Scaling preference data curation via human-ai synergy. arXiv preprint arXiv:2507.01352, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 677, + 507, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 677, + 507, + 722 + ], + "spans": [ + { + "bbox": [ + 111, + 677, + 507, + 722 + ], + "type": "text", + "content": "[468] Cong Liu, Zhong Wang, ShengYu Shen, Jialiang Peng, Xiaoli Zhang, Zhen-Dong Du, and YaFang Wang. The chinese dataset distilled from deepseek-r1-671b. https://huggingface.co/datasets/Congliu/Chinese-DeepSeek-R1-Distill-data-110k, 2025." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 742, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 742, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 742, + 311, + 750 + ], + "type": "text", + "content": "66" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 65 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[469] Dancheng Liu, Amir Nassereldine, Ziming Yang, Chenhui Xu, Yuting Hu, Jiajie Li, Utkarsh Kumar, Changjae Lee, Ruiyang Qin, Yiyu Shi, et al. Large language models have intrinsic self-correction ability. arXiv preprint arXiv:2406.15673, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 110, + 504, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 110, + 504, + 133 + ], + "spans": [ + { + "bbox": [ + 111, + 110, + 504, + 133 + ], + "type": "text", + "content": "[470] Fan Liu, Wenshuo Chao, Naiqiang Tan, and Hao Liu. Bag of tricks for inference-time computation of lIm reasoning. arXiv preprint arXiv:2502.07191, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 137, + 504, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 137, + 504, + 171 + ], + "spans": [ + { + "bbox": [ + 111, + 137, + 504, + 171 + ], + "type": "text", + "content": "[471] Guanlin Liu, Kaixuan Ji, Renjie Zheng, Zheng Wu, Chen Dun, Quanquan Gu, and Lin Yan. Enhancing multi-step reasoning abilities of language models through direct q-function optimization. arXiv preprint arXiv:2410.09302, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 175, + 506, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 175, + 506, + 209 + ], + "spans": [ + { + "bbox": [ + 111, + 175, + 506, + 209 + ], + "type": "text", + "content": "[472] Hanbing Liu, Lang Cao, Yuanyi Ren, Mengyu Zhou, Haoyu Dong, Xiaojun Ma, Shi Han, and Dongmei Zhang. Bingo: Boosting efficient reasoning of llms via dynamic and significance-based reinforcement learning. arXiv preprint arXiv:2506.08125, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 213, + 506, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 213, + 506, + 246 + ], + "spans": [ + { + "bbox": [ + 111, + 213, + 506, + 246 + ], + "type": "text", + "content": "[473] Hanmeng Liu, Zhizhang Fu, Mengru Ding, Ruoxi Ning, Chaoli Zhang, Xiaozhang Liu, and Yue Zhang. Logical reasoning in large language models: A survey. arXiv preprint arXiv:2502.09100, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 251, + 506, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 251, + 506, + 283 + ], + "spans": [ + { + "bbox": [ + 111, + 251, + 506, + 283 + ], + "type": "text", + "content": "[474] Hao Liu, Zhengren Wang, Xi Chen, Zhiyu Li, Feiyu Xiong, Qinhan Yu, and Wentao Zhang. Hoprag: Multi-hop reasoning for logic-aware retrieval-augmented generation. arXiv preprint arXiv:2502.12442, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 289, + 506, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 289, + 506, + 334 + ], + "spans": [ + { + "bbox": [ + 111, + 289, + 506, + 334 + ], + "type": "text", + "content": "[475] Hongxuan Liu, Zhiyao Luo, and Tingting Zhu. Best of both worlds: Harmonizing LLM capabilities in decision-making and question-answering for treatment regimes. In Advances In Medical Foundation Models: Explainability, Robustness, Security, and Beyond, 2024. URL https://openreview.net/forum?id=afu9qhp7md." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 337, + 504, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 337, + 504, + 371 + ], + "spans": [ + { + "bbox": [ + 111, + 337, + 504, + 371 + ], + "type": "text", + "content": "[476] Jiacai Liu, Chaojie Wang, Chris Yuhao Liu, Liang Zeng, Rui Yan, Yiwen Sun, Yang Liu, and Yahui Zhou. Improving multi-step reasoning abilities of large language models with direct advantage policy optimization. arXiv preprint arXiv:2412.18279, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 375, + 504, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 375, + 504, + 420 + ], + "spans": [ + { + "bbox": [ + 111, + 375, + 504, + 420 + ], + "type": "text", + "content": "[477] Jiacheng Liu, Andrew Cohen, Ramakanth Pasunuru, Yejin Choi, Hannaneh Hajishirzi, and Asli Celikyilmaz. Don't throw away your value model! generating more preferable text with value-guided monte-carlo tree search decoding. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=kh9Zt2Ldmn." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 424, + 506, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 424, + 506, + 458 + ], + "spans": [ + { + "bbox": [ + 111, + 424, + 506, + 458 + ], + "type": "text", + "content": "[478] Jiacheng Liu, Andrew Cohen, Ramakanth Pasunuru, Yejin Choi, Hannaneh Hajishirzi, and Asli Celikyilmaz. Making PPO even better: Value-guided monte-carlo tree search decoding, September 2024. URL https://openreview.net/forum?id=QaODpeRaOK." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 461, + 506, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 461, + 506, + 495 + ], + "spans": [ + { + "bbox": [ + 111, + 461, + 506, + 495 + ], + "type": "text", + "content": "[479] Junnan Liu, Hongwei Liu, Linchen Xiao, Shudong Liu, Taolin Zhang, Zihan Ma, Songyang Zhang, and Kai Chen. Deciphering trajectory-aided lIm reasoning: An optimization perspective. arXiv preprint arXiv:2505.19815, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 499, + 506, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 499, + 506, + 533 + ], + "spans": [ + { + "bbox": [ + 111, + 499, + 506, + 533 + ], + "type": "text", + "content": "[480] Junnan Liu, Linhao Luo, Thuy-Trang Vu, and Gholamreza Haffari. Situatedthinker: Grounding llm reasoning with real-world through situated thinking. arXiv preprint arXiv:2505.19300, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 537, + 504, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 537, + 504, + 572 + ], + "spans": [ + { + "bbox": [ + 111, + 537, + 504, + 572 + ], + "type": "text", + "content": "[481] Junteng Liu, Yuanxiang Fan, Zhuo Jiang, Han Ding, Yongyi Hu, Chi Zhang, Yiqi Shi, Shitong Weng, Aili Chen, Shiqi Chen, et al. Synlogic: Synthesizing verifiable reasoning data at scale for learning logical reasoning and beyond. arXiv preprint arXiv:2505.19641, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 575, + 504, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 575, + 504, + 609 + ], + "spans": [ + { + "bbox": [ + 111, + 575, + 504, + 609 + ], + "type": "text", + "content": "[482] Liping Liu, Chunhong Zhang, Likang Wu, Chuang Zhao, Zheng Hu, Ming He, and Jianping Fan. Instruct-of-reflection: Enhancing large language models iterative reflection capabilities via dynamic-meta instruction. arXiv preprint arXiv:2503.00902, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 613, + 504, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 613, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 111, + 613, + 504, + 647 + ], + "type": "text", + "content": "[483] Mingjie Liu, Shizhe Diao, Ximing Lu, Jian Hu, Xin Dong, Yejin Choi, Jan Kautz, and Yi Dong. Prorl: Prolonged reinforcement learning expands reasoning boundaries in large language models. arXiv preprint arXiv:2505.24864, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 651, + 506, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 651, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 651, + 506, + 685 + ], + "type": "text", + "content": "[484] Qiang Liu, Xinlong Chen, Yue Ding, Shizhen Xu, Shu Wu, and Liang Wang. Attention-guided self-reflection for zero-shot hallucination detection in large language models. arXiv preprint arXiv:2501.09997, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "type": "text", + "content": "[485] Qin Liu, Wenxuan Zhou, Nan Xu, James Y Huang, Fei Wang, Sheng Zhang, Hoifung Poon, and Muhao Chen. Metascale: Test-time scaling with evolving meta-thoughts. arXiv preprint arXiv:2503.13447, 2025." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "67" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 66 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 507, + 722 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[486] Runze Liu, Junqi Gao, Jian Zhao, Kaiyan Zhang, Xiu Li, Biqing Qi, Wanli Ouyang, and Bowen Zhou. Can 1b llm surpass 405b llm? rethinking compute-optimal test-time scaling. arXiv preprint arXiv:2502.06703, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 108, + 504, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 108, + 504, + 142 + ], + "spans": [ + { + "bbox": [ + 111, + 108, + 504, + 142 + ], + "type": "text", + "content": "[487] Tengxuan Liu, Shiyao Li, Jiayi Yang, Tianchen Zhao, Feng Zhou, Xiaohui Song, Guohao Dai, Shengen Yan, Huazhong Yang, and Yu Wang. Pm-kvq: Progressive mixed-precision kv cache quantization for long-cot llms. arXiv preprint arXiv:2505.18610, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 144, + 506, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 144, + 506, + 178 + ], + "spans": [ + { + "bbox": [ + 111, + 144, + 506, + 178 + ], + "type": "text", + "content": "[488] Wanlong Liu, Junxiao Xu, Fei Yu, Yukang Lin, Ke Ji, Wenyu Chen, Yan Xu, Yasheng Wang, Lifeng Shang, and Benyou Wang. Qfft, question-free fine-tuning for adaptive reasoning. arXiv preprint arXiv:2506.12860, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 180, + 504, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 180, + 504, + 204 + ], + "spans": [ + { + "bbox": [ + 111, + 180, + 504, + 204 + ], + "type": "text", + "content": "[489] Wei Liu, Junlong Li, Xiwen Zhang, Fan Zhou, Yu Cheng, and Junxian He. Diving into self-evolving training for multimodal reasoning. arXiv preprint arXiv:2412.17451, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 206, + 506, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 206, + 506, + 239 + ], + "spans": [ + { + "bbox": [ + 111, + 206, + 506, + 239 + ], + "type": "text", + "content": "[490] Wei Liu, Ruochen Zhou, Yiyun Deng, Yuzhen Huang, Junteng Liu, Yuntian Deng, Yizhe Zhang, and Junxian He. Learn to reason efficiently with adaptive length-based reward shaping. arXiv preprint arXiv:2505.15612, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 242, + 504, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 242, + 504, + 265 + ], + "spans": [ + { + "bbox": [ + 111, + 242, + 504, + 265 + ], + "type": "text", + "content": "[491] Ye Liu, Kevin Qinghong Lin, Chang Wen Chen, and Mike Zheng Shou. Videomind: A chain-of-lora agent for long video reasoning. arXiv preprint arXiv:2503.13444, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 267, + 506, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 267, + 506, + 300 + ], + "spans": [ + { + "bbox": [ + 111, + 267, + 506, + 300 + ], + "type": "text", + "content": "[492] Yongjiang Liu, Haoxi Li, Xiaosong Ma, Jie Zhang, and Song Guo. Think how to think: Mitigating overthinking with autonomous difficulty cognition in large reasoning models. arXiv preprint arXiv:2507.02663, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 303, + 506, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 303, + 506, + 336 + ], + "spans": [ + { + "bbox": [ + 111, + 303, + 506, + 336 + ], + "type": "text", + "content": "[493] Yue Liu, Hongcheng Gao, Shengfang Zhai, Jun Xia, Tianyi Wu, Zhiwei Xue, Yulin Chen, Kenji Kawaguchi, Jiaheng Zhang, and Bryan Hooi. Guardreasoner: Towards reasoning-based llm safeguards. arXiv preprint arXiv:2501.18492, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 339, + 506, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 339, + 506, + 372 + ], + "spans": [ + { + "bbox": [ + 111, + 339, + 506, + 372 + ], + "type": "text", + "content": "[494] Yue Liu, Jiaying Wu, Yufei He, Hongcheng Gao, Hongyu Chen, Baolong Bi, Ruihan Gong, Jiaheng Zhang, Zhiqi Huang, and Bryan Hooi. Efficient inference for large reasoning models: A survey. arXiv preprint arXiv:2503.23077, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 375, + 504, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 375, + 504, + 408 + ], + "spans": [ + { + "bbox": [ + 111, + 375, + 504, + 408 + ], + "type": "text", + "content": "[495] Yuliang Liu, Junjie Lu, Zhaoling Chen, Chaofeng Qu, Jason Klein Liu, Chonghan Liu, Zefan Cai, Yunhui Xia, Li Zhao, Jiang Bian, et al. Adaptivestep: Automatically dividing reasoning step through model confidence. arXiv preprint arXiv:2502.13943, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 411, + 506, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 411, + 506, + 445 + ], + "spans": [ + { + "bbox": [ + 111, + 411, + 506, + 445 + ], + "type": "text", + "content": "[496] Zhaowei Liu, Xin Guo, Fangqi Lou, Lingfeng Zeng, Jinyi Niu, Zixuan Wang, Jiajie Xu, Weige Cai, Ziwei Yang, Xueqian Zhao, et al. Fin-r1: A large language model for financial reasoning through reinforcement learning. arXiv preprint arXiv:2503.16252, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 447, + 506, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 447, + 506, + 480 + ], + "spans": [ + { + "bbox": [ + 111, + 447, + 506, + 480 + ], + "type": "text", + "content": "[497] Zhiyuan Liu, Yuting Zhang, Feng Liu, Changwang Zhang, Ying Sun, and Jun Wang. Othinkmr1: Stimulating multimodal generalized reasoning capabilities through dynamic reinforcement learning. arXiv preprint arXiv:2503.16081, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 483, + 507, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 483, + 507, + 517 + ], + "spans": [ + { + "bbox": [ + 111, + 483, + 507, + 517 + ], + "type": "text", + "content": "[498] Zichen Liu, Changyu Chen, Wenjun Li, Tianyu Pang, Chao Du, and Min Lin. There may not be aha moment in r1-zero-like training — a pilot study. https://oatllm.notion.site/oat-zero, 2025. Notion Blog." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 519, + 506, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 519, + 506, + 552 + ], + "spans": [ + { + "bbox": [ + 111, + 519, + 506, + 552 + ], + "type": "text", + "content": "[499] Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 555, + 506, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 555, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 111, + 555, + 506, + 588 + ], + "type": "text", + "content": "[500] Zihan Liu, Yang Chen, Mohammad Shoeybi, Bryan Catanzaro, and Wei Ping. Acemath: Advancing frontier math reasoning with post-training and reward modeling. arXiv preprint arXiv:2412.15084, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 591, + 506, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 591, + 506, + 624 + ], + "spans": [ + { + "bbox": [ + 111, + 591, + 506, + 624 + ], + "type": "text", + "content": "[501] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 628, + 504, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 628, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 111, + 628, + 504, + 651 + ], + "type": "text", + "content": "[502] Elita Lobo, Chirag Agarwal, and Himabindu Lakkaraju. On the impact of fine-tuning on chain-of-thought reasoning. arXiv preprint arXiv:2411.15382, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 111, + 653, + 506, + 686 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 653, + 506, + 686 + ], + "spans": [ + { + "bbox": [ + 111, + 653, + 506, + 686 + ], + "type": "text", + "content": "[503] Chenwei Lou, Zewei Sun, Xinnian Liang, Meng Qu, Wei Shen, Wenqi Wang, Yuntao Li, Qingping Yang, and Shuangzhi Wu. Adacot: Pareto-optimal adaptive chain-of-thought triggering via reinforcement learning. arXiv preprint arXiv:2505.11896, 2025." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 111, + 689, + 506, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 506, + 722 + ], + "type": "text", + "content": "[504] Dakuan Lu, Xiaoyu Tan, Rui Xu, Tianchu Yao, Chao Qu, Wei Chu, Yinghui Xu, and Yuan Qi. Scp-116k: A high-quality problem-solution dataset and a generalized pipeline for automated extraction in the higher education science domain, 2025." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "68" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 67 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[505] Haolang Lu, Yilian Liu, Jingxin Xu, Guoshun Nan, Yuanlong Yu, Zhican Chen, and Kun Wang. Auditing meta-cognitive hallucinations in reasoning large language models. arXiv preprint arXiv:2505.13143, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 109, + 506, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 109, + 506, + 187 + ], + "spans": [ + { + "bbox": [ + 111, + 109, + 506, + 187 + ], + "type": "text", + "content": "[506] Jianqiao Lu, Zhiyang Dou, Hongru WANG, Zeyu Cao, Jianbo Dai, Yunlong Feng, and Zhijiang Guo. Autopsy: Automated process-supervised verifier. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 79935-79962. Curran Associates, Inc., December 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/9246aa822579d9b29a140ecdac36ad60-Paper-Conference.pdf." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 190, + 506, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 190, + 506, + 246 + ], + "spans": [ + { + "bbox": [ + 111, + 190, + 506, + 246 + ], + "type": "text", + "content": "[507] Pan Lu, Swaroop Mishra, Tony Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho, editors, Advances in Neural Information Processing Systems, November 2022. URL https://openreview.net/forum?id=HjwK-Tc_Bc." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 248, + 506, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 248, + 506, + 304 + ], + "spans": [ + { + "bbox": [ + 111, + 248, + 506, + 304 + ], + "type": "text", + "content": "[508] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=KUNzEQMWU7." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 308, + 506, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 308, + 506, + 341 + ], + "spans": [ + { + "bbox": [ + 111, + 308, + 506, + 341 + ], + "type": "text", + "content": "[509] Pan Lu, Bowen Chen, Sheng Liu, Rahul Thapa, Joseph Boen, and James Zou. Octo tools: An agentic framework with extensible tools for complex reasoning. arXiv preprint arXiv:2502.11271, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 344, + 505, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 344, + 505, + 379 + ], + "spans": [ + { + "bbox": [ + 111, + 344, + 505, + 379 + ], + "type": "text", + "content": "[510] Rubing Lu, João Sedoc, and Arun Sundararajan. Reasoning and the trusting behavior of deepseek and gpt: An experiment revealing hidden fault lines in large language models. arXiv preprint arXiv:2502.12825, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 381, + 505, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 381, + 505, + 404 + ], + "spans": [ + { + "bbox": [ + 111, + 381, + 505, + 404 + ], + "type": "text", + "content": "[511] Wenquan Lu, Yuechuan Yang, Kyle Lee, Yanshu Li, and Enqi Liu. Latent chain-of-thought? decoding the depth-recurrent transformer. arXiv preprint arXiv:2507.02199, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 407, + 506, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 407, + 506, + 441 + ], + "spans": [ + { + "bbox": [ + 111, + 407, + 506, + 441 + ], + "type": "text", + "content": "[512] Zhengxi Lu, Yuxiang Chai, Yaxuan Guo, Xi Yin, Liang Liu, Hao Wang, Guanjing Xiong, and Hongsheng Li. Ui-r1: Enhancing action prediction of gui agents by reinforcement learning. arXiv preprint arXiv:2503.21620, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 445, + 506, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 445, + 506, + 479 + ], + "spans": [ + { + "bbox": [ + 111, + 445, + 506, + 479 + ], + "type": "text", + "content": "[513] Zimu Lu, Aojun Zhou, Houxing Ren, Ke Wang, Weikang Shi, Junting Pan, Mingjie Zhan, and Hongsheng Li. Mathgenie: Generating synthetic data with question back-translation for enhancing mathematical reasoning of llms. arXiv preprint arXiv:2402.16352, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 482, + 506, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 482, + 506, + 525 + ], + "spans": [ + { + "bbox": [ + 111, + 482, + 506, + 525 + ], + "type": "text", + "content": "[514] Haipeng Luo, Qingfeng Sun, Can Xu, Pu Zhao, Jianguang Lou, Chongyang Tao, Xiubo Geng, Qingwei Lin, Shifeng Chen, and Dongmei Zhang. Wizardmath: Empowering mathematical reasoning for large language models via reinforced evol-instruct. arXiv preprint arXiv:2308.09583, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 529, + 506, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 529, + 506, + 563 + ], + "spans": [ + { + "bbox": [ + 111, + 529, + 506, + 563 + ], + "type": "text", + "content": "[515] Hanjun Luo, Shenyu Dai, Chiming Ni, Xinfeng Li, Guibin Zhang, Kun Wang, Tongliang Liu, and Hanan Salam. Agent auditor: Human-level safety and security evaluation for lIm agents. arXiv preprint arXiv:2506.00641, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 567, + 504, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 567, + 504, + 601 + ], + "spans": [ + { + "bbox": [ + 111, + 567, + 504, + 601 + ], + "type": "text", + "content": "[516] Haotian Luo, Li Shen, Haiying He, Yibo Wang, Shiwei Liu, Wei Li, Naiqiang Tan, Xiaochun Cao, and Dacheng Tao. O1-pruner: Length-harmonizing fine-tuning for o1-like reasoning pruning. arXiv preprint arXiv:2501.12570, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 604, + 506, + 637 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 604, + 506, + 637 + ], + "spans": [ + { + "bbox": [ + 111, + 604, + 506, + 637 + ], + "type": "text", + "content": "[517] Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, et al. Improve mathematical reasoning in language models by automated process supervision. arXiv preprint arXiv:2406.06592, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 640, + 506, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 640, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 640, + 506, + 685 + ], + "type": "text", + "content": "[518] Michael Luo, Sijun Tan, Justin Wong, Xiaoxiang Shi, William Y. Tang, Manan Roongta, Colin Cai, Jeffrey Luo, Tianjun Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. Deepscaler: Surpassing o1-preview with a 1.5b model by scaling rl, February 2025. URL https://github.com/agentica-project/rllm. Notion Blog." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 689, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 504, + 723 + ], + "type": "text", + "content": "[519] Ruilin Luo, Zhuofan Zheng, Yifan Wang, Yiyao Yu, Xinzhe Ni, Zicheng Lin, Jin Zeng, and Yujiu Yang. Ursa: Understanding and verifying chain-of-thought reasoning in multimodal mathematics. arXiv preprint arXiv:2501.04686, 2025." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "69" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 68 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 507, + 723 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 111, + 72, + 507, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 507, + 149 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 507, + 149 + ], + "type": "text", + "content": "[520] Xianzhen Luo, Qingfu Zhu, Zhiming Zhang, Libo Qin, Xuanyu Zhang, Qing Yang, Dongliang Xu, and Wanxiang Che. Python is not always the best choice: Embracing multilingual program of thoughts. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 7185-7212, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.408. URL https://aclanthology.org/2024.emnlp-main.408/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 153, + 507, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 153, + 507, + 187 + ], + "spans": [ + { + "bbox": [ + 111, + 153, + 507, + 187 + ], + "type": "text", + "content": "[521] Yijia Luo, Yulin Song, Xingyao Zhang, Jiaheng Liu, Weixun Wang, GengRu Chen, Wenbo Su, and Bo Zheng. Deconstructing long chain-of-thought: A structured reasoning optimization framework for long cot distillation. arXiv preprint arXiv:2503.16385, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 190, + 505, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 190, + 505, + 224 + ], + "spans": [ + { + "bbox": [ + 111, + 190, + 505, + 224 + ], + "type": "text", + "content": "[522] Chengqi Lyu, Songyang Gao, Yuzhe Gu, Wenwei Zhang, Jianfei Gao, Kuikun Liu, Ziyi Wang, Shuaibin Li, Qian Zhao, Haian Huang, et al. Exploring the limit of outcome reward for learning mathematical reasoning. arXiv preprint arXiv:2502.06781, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 227, + 507, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 227, + 507, + 316 + ], + "spans": [ + { + "bbox": [ + 111, + 227, + 507, + 316 + ], + "type": "text", + "content": "[523] Qing Lyu, Shreya Havaldar, Adam Stein, Li Zhang, Delip Rao, Eric Wong, Marianna Apidianaki, and Chris Callison-Burch. Faithful chain-of-thought reasoning. In Jong C. Park, Yuki Arase, Baotian Hu, Wei Lu, Derry Wijaya, Ayu Purwarianti, and Adila Alfa Krisnadhi, editors, Proceedings of the 13th International Joint Conference on Natural Language Processing and the 3rd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pages 305-329, Nusa Dua, Bali, November 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.ijcnlp-main.20. URL https://aclanthology.org/2023.ijcnlp-main.20/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 319, + 505, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 319, + 505, + 385 + ], + "spans": [ + { + "bbox": [ + 111, + 319, + 505, + 385 + ], + "type": "text", + "content": "[524] Alexander Lyzhov, Yuliya Molchanova, Armenii Ashukha, Dmitry Molchanov, and Dmitry Vetrov. Greedy policy search: A simple baseline for learnable test-time augmentation. In Jonas Peters and David Sontag, editors, Proceedings of the 36th Conference on Uncertainty in Artificial Intelligence (UAI), volume 124 of Proceedings of Machine Learning Research, pages 1308-1317. PMLR, 03-06 Aug 2020. URL https://proceedings.mlr.press/v124/lyzhov20a.html." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 388, + 504, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 388, + 504, + 412 + ], + "spans": [ + { + "bbox": [ + 111, + 388, + 504, + 412 + ], + "type": "text", + "content": "[525] Jingyuan Ma, Rui Li, Zheng Li, Junfeng Liu, Lei Sha, and Zhifang Sui. Hauntattack: When attack follows reasoning as a shadow. arXiv preprint arXiv:2506.07031, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 415, + 507, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 415, + 507, + 449 + ], + "spans": [ + { + "bbox": [ + 111, + 415, + 507, + 449 + ], + "type": "text", + "content": "[526] Lu Ma, Hao Liang, Meiyi Qiang, Lexiang Tang, Xiaochen Ma, Zhen Hao Wong, Junbo Niu, Chengyu Shen, Running He, Bin Cui, et al. Learning what reinforcement learning can't: Interleaved online fine-tuning for hardest questions. arXiv preprint arXiv:2506.07527, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 451, + 505, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 451, + 505, + 485 + ], + "spans": [ + { + "bbox": [ + 111, + 451, + 505, + 485 + ], + "type": "text", + "content": "[527] Nanye Ma, Shangyuan Tong, Haolin Jia, Hexiang Hu, Yu-Chuan Su, Mingda Zhang, Xuan Yang, Yandong Li, Tommi Jaakkola, Xuhui Jia, et al. Inference-time scaling for diffusion models beyond scaling denoising steps. arXiv preprint arXiv:2501.09732, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 488, + 507, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 488, + 507, + 521 + ], + "spans": [ + { + "bbox": [ + 111, + 488, + 507, + 521 + ], + "type": "text", + "content": "[528] Qianli Ma, Haotian Zhou, Tingkai Liu, Jianbo Yuan, Pengfei Liu, Yang You, and Hongxia Yang. Let's reward step by step: Step-level reward model as the navigators for reasoning. arXiv preprint arXiv:2310.10080, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 525, + 507, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 525, + 507, + 559 + ], + "spans": [ + { + "bbox": [ + 111, + 525, + 507, + 559 + ], + "type": "text", + "content": "[529] Ruotian Ma, Peisong Wang, Cheng Liu, Xingyan Liu, Jiaqi Chen, Bang Zhang, Xin Zhou, Nan Du, and Jia Li. " + }, + { + "bbox": [ + 111, + 525, + 507, + 559 + ], + "type": "inline_equation", + "content": "S^2 r" + }, + { + "bbox": [ + 111, + 525, + 507, + 559 + ], + "type": "text", + "content": ": Teaching llms to self-verify and self-correct via reinforcement learning. arXiv preprint arXiv:2502.12853, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 563, + 507, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 563, + 507, + 586 + ], + "spans": [ + { + "bbox": [ + 111, + 563, + 507, + 586 + ], + "type": "text", + "content": "[530] Xinyin Ma, Guangnian Wan, Runpeng Yu, Gongfan Fang, and Xinchao Wang. Cot-valve: Length-compressible chain-of-thought tuning. arXiv preprint arXiv:2502.09601, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 589, + 507, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 589, + 507, + 621 + ], + "spans": [ + { + "bbox": [ + 111, + 589, + 507, + 621 + ], + "type": "text", + "content": "[531] Xueguang Ma, Qian Liu, Dongfu Jiang, Ge Zhang, Zejun Ma, and Wenhu Chen. Generalreasoner: Advancing llm reasoning across all domains. arXiv preprint arXiv:2505.14652, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 625, + 505, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 625, + 505, + 649 + ], + "spans": [ + { + "bbox": [ + 111, + 625, + 505, + 649 + ], + "type": "text", + "content": "[532] Xuetao Ma, Wenbin Jiang, and Hua Huang. Problem-solving logic guided curriculum in-context learning for llms complex reasoning. arXiv preprint arXiv:2502.15401, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 652, + 505, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 652, + 505, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 652, + 505, + 685 + ], + "type": "text", + "content": "[533] Yan Ma, Steffi Chern, Xuyang Shen, Yiran Zhong, and Pengfei Liu. Rethinking rl scaling for vision language models: A transparent, from-scratch framework and comprehensive evaluation scheme. arXiv preprint arXiv:2504.02587, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 689, + 507, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 507, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 507, + 723 + ], + "type": "text", + "content": "[534] Yiran Ma, Zui Chen, Tianqiao Liu, Mi Tian, Zhuo Liu, Zitao Liu, and Weiqi Luo. What are step-level reward models rewarding? counterintuitive findings from mcts-boosted mathematical reasoning. arXiv preprint arXiv:2412.15904, 2024." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "70" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 69 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 507, + 722 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 111, + 72, + 507, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 507, + 105 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 507, + 105 + ], + "type": "text", + "content": "[535] Zexiong Ma, Chao Peng, Pengfei Gao, Xiangxin Meng, Yanzhen Zou, and Bing Xie. Sortf: Issue resolving with subtask-oriented reinforced fine-tuning. arXiv preprint arXiv:2502.20127, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 109, + 505, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 109, + 505, + 133 + ], + "spans": [ + { + "bbox": [ + 111, + 109, + 505, + 133 + ], + "type": "text", + "content": "[536] Zeyao Ma, Xiaokang Zhang, Jing Zhang, Jifan Yu, Sijia Luo, and Jie Tang. Dynamic scaling of unit tests for code reward modeling. arXiv preprint arXiv:2501.01054, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 135, + 506, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 135, + 506, + 168 + ], + "spans": [ + { + "bbox": [ + 111, + 135, + 506, + 168 + ], + "type": "text", + "content": "[537] Ziyang Ma, Zhuo Chen, Yuping Wang, Eng Siong Chng, and Xie Chen. Audio-cot: Exploring chain-of-thought reasoning in large audio language model. arXiv preprint arXiv:2501.07246, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 172, + 506, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 172, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 111, + 172, + 506, + 228 + ], + "type": "text", + "content": "[538] Aman Madaan, Katherine Hermann, and Amir Yazdanbakhsh. What makes chain-of-thought prompting effective? a counterfactual study. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 1448-1535, Singapore, December 2023. URL https://aclanthology.org/2023.findings-emnlp.101.pdf." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 231, + 506, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 231, + 506, + 319 + ], + "spans": [ + { + "bbox": [ + 111, + 231, + 506, + 319 + ], + "type": "text", + "content": "[539] Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, Shashank Gupta, Bodhisattwa Prasad Majumder, Katherine Hermann, Sean Welleck, Amir Yazdanbakhsh, and Peter Clark. Self-refine: Iterative refinement with self-feedback. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 46534-46594. Curran Associates, Inc., March 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/91edff07232fb1b55a505a9e9f6c0ff3-Paper-Conference.pdf." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 322, + 506, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 322, + 506, + 356 + ], + "spans": [ + { + "bbox": [ + 111, + 322, + 506, + 356 + ], + "type": "text", + "content": "[540] Sathwik Tejaswi Madhusudhan, Shruthan Radhakrishna, Jash Mehta, and Toby Liang. Millions scale dataset distilled from r1-32b. https://huggingface.co/datasets/ServiceNow-AI/R1-Distill-SFT, February 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 359, + 506, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 359, + 506, + 393 + ], + "spans": [ + { + "bbox": [ + 111, + 359, + 506, + 393 + ], + "type": "text", + "content": "[541] Sadegh Mahdavi, Muchen Li, Kaiwen Liu, Christos Thrampoulidis, Leonid Sigal, and Renjie Liao. Leveraging online olympiad-level math problems for llms training and contamination-resistant evaluation. arXiv preprint arXiv:2501.14275, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 396, + 504, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 396, + 504, + 419 + ], + "spans": [ + { + "bbox": [ + 111, + 396, + 504, + 419 + ], + "type": "text", + "content": "[542] Tobias Materzok. Cos (m+ o) s: Curiosity and rl-enhanced mcts for exploring story space via language models. arXiv preprint arXiv:2501.17104, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 422, + 506, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 422, + 506, + 468 + ], + "spans": [ + { + "bbox": [ + 111, + 422, + 506, + 468 + ], + "type": "text", + "content": "[543] Justus Mattern, Sami Jaghourar, Manveer Basra, Jannik Straube, Matthew Di Ferrante, Felix Gabriel, Jack Min Ong, Vincent Weisser, and Johannes Hagemann. Synthetic-1: Two million collaboratively generated reasoning traces from deepseek-r1, 2025. URL https://www.primeintellect.ai/blog/synthetic-1-release." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 471, + 506, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 471, + 506, + 503 + ], + "spans": [ + { + "bbox": [ + 111, + 471, + 506, + 503 + ], + "type": "text", + "content": "[544] Nat McAleese, Rai Michael Pokorny, Juan Felipe Ceron Uribe, Evgenia Nitishinskaya, Maja Trebacz, and Jan Leike. Llm critics help catch llm bugs. arXiv preprint arXiv:2407.00215, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 507, + 506, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 507, + 506, + 541 + ], + "spans": [ + { + "bbox": [ + 111, + 507, + 506, + 541 + ], + "type": "text", + "content": "[545] R Thomas McCoy, Shunyu Yao, Dan Friedman, Mathew D Hardy, and Thomas L Grifths. When a language model is optimized for reasoning, does it still show embers of autoregression? an analysis of openai o1. arXiv preprint arXiv:2410.01792, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 544, + 504, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 544, + 504, + 588 + ], + "spans": [ + { + "bbox": [ + 111, + 544, + 504, + 588 + ], + "type": "text", + "content": "[546] Lingrui Mei, Jiayu Yao, Yuyao Ge, Yiwei Wang, Baolong Bi, Yujun Cai, Jiazhi Liu, Mingyu Li, Zhong-Zhi Li, Duzhen Zhang, Chenlin Zhou, Jiayi Mao, Tianze Xia, Jiafeng Guo, and Shenghua Liu. A survey of context engineering for large language models. arXiv preprint arXiv:2507.13334, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 592, + 506, + 637 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 592, + 506, + 637 + ], + "spans": [ + { + "bbox": [ + 111, + 592, + 506, + 637 + ], + "type": "text", + "content": "[547] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, Ping Luo, Yu Qiao, Qiaosheng Zhang, and Wenqi Shao. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 640, + 504, + 674 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 640, + 504, + 674 + ], + "spans": [ + { + "bbox": [ + 111, + 640, + 504, + 674 + ], + "type": "text", + "content": "[548] William Merrill and Ashish Sabharwal. The expressive power of transformers with chain of thought. In *The Twelfth International Conference on Learning Representations*, January 2023. URL https://openreview.net/pdf?id=CDmerQ37Zs." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 677, + 506, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 677, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 111, + 677, + 506, + 722 + ], + "type": "text", + "content": "[549] Ning Miao, Yee Whye Teh, and Tom Rainforth. Selfcheck: Using LLMs to zero-shot check their own step-by-step reasoning. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id= pTHfApDakA." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "71" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 70 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 721 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 116 + ], + "type": "text", + "content": "[550] Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, et al. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems. arXiv preprint arXiv:2412.09413, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 120, + 505, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 120, + 505, + 176 + ], + "spans": [ + { + "bbox": [ + 111, + 120, + 505, + 176 + ], + "type": "text", + "content": "[551] Seyed Iman Mirzadeh, Keivan Alizadeh, Hooman Shahrokhi, Oncel Tuzel, Samy Bengio, and Mehrdad Farajtabar. GSM-symbolic: Understanding the limitations of mathematical reasoning in large language models. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=AjXkRZIvjb." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 178, + 505, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 178, + 505, + 213 + ], + "spans": [ + { + "bbox": [ + 111, + 178, + 505, + 213 + ], + "type": "text", + "content": "[552] Prakamya Mishra, Jiang Liu, Jialian Wu, Xiaodong Yu, Zicheng Liu, and Emad Barsoum. Tttbench: A benchmark for evaluating reasoning ability with simple and novel tic-tac-toe-style games. arXiv preprint arXiv:2506.10209, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 216, + 505, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 216, + 505, + 249 + ], + "spans": [ + { + "bbox": [ + 111, + 216, + 505, + 249 + ], + "type": "text", + "content": "[553] Arindam Mitra, Hamed Khanpour, Corby Rosset, and Ahmed Awadallah. Orca-math: Unlocking the potential of slms in grade school math. arXiv preprint arXiv:2402.14830, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 253, + 505, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 253, + 505, + 287 + ], + "spans": [ + { + "bbox": [ + 111, + 253, + 505, + 287 + ], + "type": "text", + "content": "[554] Chancharik Mitra, Brandon Huang, Trevor Darrell, and Roei Herzig. Compositional chain-of-thought prompting for large multimodal models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14420-14431, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 290, + 505, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 290, + 505, + 312 + ], + "spans": [ + { + "bbox": [ + 111, + 290, + 505, + 312 + ], + "type": "text", + "content": "[555] Purbesh Mitra and Sennur Ulukus. Motif: Modular thinking via reinforcement fine-tuning in llms. arXiv preprint arXiv:2507.02851, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 316, + 505, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 316, + 505, + 361 + ], + "spans": [ + { + "bbox": [ + 111, + 316, + 505, + 361 + ], + "type": "text", + "content": "[556] Shentong Mo and Miao Xin. Tree of uncertain thoughts reasoning for large language models. In ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 12742-12746, April 2024. doi: 10.1109/ICASSP48485.2024.10448355. URL https://ieeexplore.ieee.org/document/10448355." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 364, + 505, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 364, + 505, + 387 + ], + "spans": [ + { + "bbox": [ + 111, + 364, + 505, + 387 + ], + "type": "text", + "content": "[557] Philipp Mondorf and Barbara Plank. Beyond accuracy: Evaluating the reasoning behavior of large language models—a survey. arXiv preprint arXiv:2404.01869, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 390, + 505, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 390, + 505, + 468 + ], + "spans": [ + { + "bbox": [ + 111, + 390, + 505, + 468 + ], + "type": "text", + "content": "[558] Terufumi Morishita, Gaku Morio, Atsuki Yamaguchi, and Yasuhiro Sogawa. Enhancing reasoning capabilities of llms via principled synthetic logic corpus. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 73572-73604. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/8678da90126aa58326b2fc0254b33a8c-Paper-Conference.pdf." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 471, + 505, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 471, + 505, + 504 + ], + "spans": [ + { + "bbox": [ + 111, + 471, + 505, + 504 + ], + "type": "text", + "content": "[559] Yongyu Mu, Jiali Zeng, Bei Li, Xinyan Guan, Fandong Meng, Jie Zhou, Tong Xiao, and Jingbo Zhu. Dissecting long reasoning models: An empirical study. arXiv preprint arXiv:2506.04913, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 507, + 505, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 507, + 505, + 541 + ], + "spans": [ + { + "bbox": [ + 111, + 507, + 505, + 541 + ], + "type": "text", + "content": "[560] Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 544, + 505, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 544, + 505, + 578 + ], + "spans": [ + { + "bbox": [ + 111, + 544, + 505, + 578 + ], + "type": "text", + "content": "[561] Tergel Munkhbat, Namgyu Ho, Seohyun Kim, Yongjin Yang, Yujin Kim, and Se-Young Yun. Self-training elicits concise reasoning in large language models. arXiv preprint arXiv:2502.20122, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 582, + 505, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 582, + 505, + 604 + ], + "spans": [ + { + "bbox": [ + 111, + 582, + 505, + 604 + ], + "type": "text", + "content": "[562] Vaskar Nath, Pranav Raja, Claire Yoon, and Sean Hendryx. Toolcomp: A multi-tool reasoning & process supervision benchmark. arXiv preprint arXiv:2501.01290, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 608, + 505, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 608, + 505, + 642 + ], + "spans": [ + { + "bbox": [ + 111, + 608, + 505, + 642 + ], + "type": "text", + "content": "[563] Sania Nayab, Giulio Rossolini, Marco Simoni, Andrea Saracino, Giorgio Buttazzo, Nicola Maria Manes, and Fabrizio Giacomelli. Concise thoughts: Impact of output length on llm reasoning and cost. arXiv preprint arXiv:2407.19825, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 644, + 505, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 644, + 505, + 721 + ], + "spans": [ + { + "bbox": [ + 111, + 644, + 505, + 721 + ], + "type": "text", + "content": "[564] Ansong Ni, Srini Iyer, Dragomir Radev, Veselin Stoyanov, Wen-Tau Yih, Sida Wang, and Xi Victoria Lin. LEVER: Learning to verify language-to-code generation with execution. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett, editors, Proceedings of the 40th International Conference on Machine Learning, volume 202 of Proceedings of Machine Learning Research, pages 26106-26128. PMLR, 23-29 Jul 2023. URL https://proceedings.mlr.press/v202/ni23b.html." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "72" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 71 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 507, + 721 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[565] Ziyi Ni, Yifan Li, Ning Yang, Dou Shen, Pin Lv, and Daxiang Dong. Tree-of-code: A tree-structured exploring framework for end-to-end code generation and execution in complex task handling. arXiv preprint arXiv:2412.15305, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 109, + 506, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 109, + 506, + 142 + ], + "spans": [ + { + "bbox": [ + 111, + 109, + 506, + 142 + ], + "type": "text", + "content": "[566] Allen Nie, Yi Su, Bo Chang, Jonathan N Lee, Ed H Chi, Quoc V Le, and Minmin Chen. Evolve: Evaluating and optimizing llms for exploration. arXiv preprint arXiv:2410.06238, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 146, + 506, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 146, + 506, + 180 + ], + "spans": [ + { + "bbox": [ + 111, + 146, + 506, + 180 + ], + "type": "text", + "content": "[567] Yansong Ning, Wei Li, Jun Fang, Naiqiang Tan, and Hao Liu. Not all thoughts are generated equal: Efficient lIm reasoning via multi-turn reinforcement learning. arXiv preprint arXiv:2505.11827, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 183, + 506, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 183, + 506, + 218 + ], + "spans": [ + { + "bbox": [ + 111, + 183, + 506, + 218 + ], + "type": "text", + "content": "[568] Harsha Nori, Naoto Usuyama, Nicholas King, Scott Mayer McKinney, Xavier Fernandes, Sheng Zhang, and Eric Horvitz. From medprompt to o1: Exploration of run-time strategies for medical challenge problems and beyond. arXiv preprint arXiv:2411.03590, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 220, + 506, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 220, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 111, + 220, + 506, + 277 + ], + "type": "text", + "content": "[569] Maxwell Nye, Anders Johan Andreassen, Guy Gur-Ari, Henryk Michalewski, Jacob Austin, David Bieber, David Dohan, Aitor Lewkowycz, Maarten Bosma, David Luan, Charles Sutton, and Augustus Odena. Show your work: Scratchpads for intermediate computation with language models. In Deep Learning for Code Workshop, March 2022. URL https://openreview.net/forum?id=HB1x2idbkbq." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 280, + 506, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 280, + 506, + 302 + ], + "spans": [ + { + "bbox": [ + 111, + 280, + 506, + 302 + ], + "type": "text", + "content": "[570] Skywork o1 Team. Skywork-o1 open series. https://huggingface.co/Skywork, November 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 305, + 506, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 305, + 506, + 328 + ], + "spans": [ + { + "bbox": [ + 111, + 305, + 506, + 328 + ], + "type": "text", + "content": "[571] OpenCompass. Aime 2025. https://huggingface.co/datasets/opencompass/AIME2025, February 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 332, + 506, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 332, + 506, + 366 + ], + "spans": [ + { + "bbox": [ + 111, + 332, + 506, + 366 + ], + "type": "text", + "content": "[572] Yixin Ou, Yunzhi Yao, Ningyu Zhang, Hui Jin, Jiacheng Sun, Shumin Deng, Zhenguo Li, and Huajun Chen. How do llms acquire new knowledge? a knowledge circuits perspective on continual pre-training. arXiv preprint arXiv:2502.11196, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 369, + 506, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 369, + 506, + 393 + ], + "spans": [ + { + "bbox": [ + 111, + 369, + 506, + 393 + ], + "type": "text", + "content": "[573] Alexander Pan, Kush Bhatia, and Jacob Steinhardt. The effects of reward misspecification: Mapping and mitigating misaligned models. arXiv preprint arXiv:2201.03544, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 396, + 507, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 396, + 507, + 473 + ], + "spans": [ + { + "bbox": [ + 111, + 396, + 507, + 473 + ], + "type": "text", + "content": "[574] Jiabao Pan, Yan Zhang, Chen Zhang, Zuozhu Liu, Hongwei Wang, and Haizhou Li. DynaThink: Fast or slow? a dynamic decision-making framework for large language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 14686-14695, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.814. URL https://aclanthology.org/2024.emnlp-main.814/." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 476, + 506, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 476, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 111, + 476, + 506, + 510 + ], + "type": "text", + "content": "[575] Jianfeng Pan, Senyou Deng, and Shaomang Huang. Coat: Chain-of-associated-thoughts framework for enhancing large language models reasoning. arXiv preprint arXiv:2502.02390, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 514, + 506, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 514, + 506, + 536 + ], + "spans": [ + { + "bbox": [ + 111, + 514, + 506, + 536 + ], + "type": "text", + "content": "[576] Jiayi Pan, Junjie Zhang, Xingyao Wang, Lifan Yuan, Hao Peng, and Alane Suhr. Tinyzero. https://github.com/Jiayi-Pan/TinyZero, 2025. Accessed: 2025-01-24." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 540, + 506, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 540, + 506, + 584 + ], + "spans": [ + { + "bbox": [ + 111, + 540, + 506, + 584 + ], + "type": "text", + "content": "[577] Jiazhen Pan, Che Liu, Junde Wu, Fenglin Liu, Jiayuan Zhu, Hongwei Bran Li, Chen Chen, Cheng Ouyang, and Daniel Rueckert. Medvlm-r1: Incentivizing medical reasoning capability of vision-language models (vlms) via reinforcement learning. arXiv preprint arXiv:2502.19634, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 588, + 504, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 588, + 504, + 622 + ], + "spans": [ + { + "bbox": [ + 111, + 588, + 504, + 622 + ], + "type": "text", + "content": "[578] Liangming Pan, Michael Saxon, Wenda Xu, Deepak Nathani, Xinyi Wang, and William Yang Wang. Automatically correcting large language models: Surveying the landscape of diverse self-correction strategies. arXiv preprint arXiv:2308.03188, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 624, + 506, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 624, + 506, + 658 + ], + "spans": [ + { + "bbox": [ + 111, + 624, + 506, + 658 + ], + "type": "text", + "content": "[579] Wenbo Pan, Zhichao Liu, Qiguang Chen, Xiangyang Zhou, Haining Yu, and Xiaohua Jia. The hidden dimensions of llm alignment: A multi-dimensional safety analysis. arXiv preprint arXiv:2502.09674, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 662, + 504, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 662, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 662, + 504, + 685 + ], + "type": "text", + "content": "[580] Zhihong Pan, Kai Zhang, Yuze Zhao, and Yupeng Han. Route to reason: Adaptive routing for lIm and reasoning strategy selection. arXiv preprint arXiv:2505.19435, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 689, + 506, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 506, + 721 + ], + "type": "text", + "content": "[581] Bo Pang, Hanze Dong, Jiacheng Xu, Silvio Savarese, Yingbo Zhou, and Caiming Xiong. Bolt: Bootstrap long chain-of-thought in language models without distillation. arXiv preprint arXiv:2502.03860, 2025." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "73" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 72 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 722 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 149 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 149 + ], + "type": "text", + "content": "[582] Richard Yuanzhe Pang, Weizhe Yuan, He He, Kyunghyun Cho, Sainbayar Sukhbaatar, and Jason Weston. Iterative reasoning preference optimization. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 116617-116637. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/d37c9ad425fe5b65304d500c6edcba00-Paper-Conference.pdf." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 153, + 505, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 153, + 505, + 187 + ], + "spans": [ + { + "bbox": [ + 111, + 153, + 505, + 187 + ], + "type": "text", + "content": "[583] Shubham Parashar, Blake Olson, Sambhav Khurana, Eric Li, Hongyi Ling, James Caverlee, and Shuiwang Ji. Inference-time computations for llm reasoning and planning: A benchmark and insights. arXiv preprint arXiv:2502.12521, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 191, + 506, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 191, + 506, + 224 + ], + "spans": [ + { + "bbox": [ + 111, + 191, + 506, + 224 + ], + "type": "text", + "content": "[584] Chanwoo Park, Seungju Han, Xingzhi Guo, Asuman Ozdaglar, Kaiqing Zhang, and Joo-Kyung Kim. Maporl: Multi-agent post-co-training for collaborative large language models with reinforcement learning. arXiv preprint arXiv:2502.18439, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 228, + 504, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 228, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 111, + 228, + 504, + 251 + ], + "type": "text", + "content": "[585] Junsoo Park, Seungyeon Jwa, Meiying Ren, Daeyoung Kim, and Sanghyuk Choi. Offsetbias: Leveraging debiased data for tuning evaluators, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 255, + 505, + 288 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 255, + 505, + 288 + ], + "spans": [ + { + "bbox": [ + 111, + 255, + 505, + 288 + ], + "type": "text", + "content": "[586] Sungjin Park, Xiao Liu, Yeyun Gong, and Edward Choi. Ensembling large language models with process reward-guided tree search for better complex reasoning. arXiv preprint arXiv:2412.15797, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 293, + 504, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 293, + 504, + 325 + ], + "spans": [ + { + "bbox": [ + 111, + 293, + 504, + 325 + ], + "type": "text", + "content": "[587] Manojkumar Parmar and Yuvaraj Govindarajulu. Challenges in ensuring ai safety in deepseek-r1 models: The shortcomings of reinforcement learning strategies. arXiv preprint arXiv:2501.17030, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 330, + 504, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 330, + 504, + 353 + ], + "spans": [ + { + "bbox": [ + 111, + 330, + 504, + 353 + ], + "type": "text", + "content": "[588] Avinash Patil. Advancing reasoning in large language models: Promising methods and approaches. arXiv preprint arXiv:2502.03671, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 357, + 504, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 357, + 504, + 388 + ], + "spans": [ + { + "bbox": [ + 111, + 357, + 504, + 388 + ], + "type": "text", + "content": "[589] Avinash Patil and Amardeep Kour Gedhu. Cognitive-mental-llm: Leveraging reasoning in large language models for mental health prediction via online text. arXiv preprint arXiv:2503.10095, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 393, + 506, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 393, + 506, + 460 + ], + "spans": [ + { + "bbox": [ + 111, + 393, + 506, + 460 + ], + "type": "text", + "content": "[590] Debjit Paul, Mete Ismayilzada, Maxime Peyrard, Beatrix Borges, Antoine Bosselut, Robert West, and Boi Faltings. REFINER: Reasoning feedback on intermediate representations. In Yvette Graham and Matthew Purver, editors, Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1100–1126, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.67/." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 464, + 505, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 464, + 505, + 508 + ], + "spans": [ + { + "bbox": [ + 111, + 464, + 505, + 508 + ], + "type": "text", + "content": "[591] Patomporn Payoungkhamdee, Pume Tuchinda, Jinheon Baek, Samuel Cahyawijaya, Can Udomcharoenchaikit, Potsawee Manakul, Peerat Limkonchotiwat, Ekapol Chuangsuwanich, and Sarana Nutanong. Towards better understanding of program-of-thought reasoning in cross-lingual and multilingual environments. arXiv preprint arXiv:2502.17956, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 512, + 504, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 512, + 504, + 546 + ], + "spans": [ + { + "bbox": [ + 111, + 512, + 504, + 546 + ], + "type": "text", + "content": "[592] Chunyi Peng, Zhipeng Xu, Zhenghao Liu, Yishan Li, Yukun Yan, Shuo Wang, Zhiyuan Liu, Yu Gu, Minghe Yu, Ge Yu, et al. Learning to route queries across knowledge bases for step-wise retrieval-augmented reasoning. arXiv preprint arXiv:2505.22095, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 550, + 505, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 550, + 505, + 583 + ], + "spans": [ + { + "bbox": [ + 111, + 550, + 505, + 583 + ], + "type": "text", + "content": "[593] Dengyun Peng, Yuhang Zhou, Qiguang Chen, Jinhao Liu, Jingjing Chen, and Libo Qin. Dlpo: Towards a robust, efficient, and generalizable prompt optimization framework from a deep-learning perspective. arXiv preprint arXiv:2503.13413, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 587, + 504, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 587, + 504, + 620 + ], + "spans": [ + { + "bbox": [ + 111, + 587, + 504, + 620 + ], + "type": "text", + "content": "[594] Hao Peng, Yunjia Qi, Xiaozhi Wang, Zijun Yao, Bin Xu, Lei Hou, and Juanzi Li. Agentic reward modeling: Integrating human preferences with verifiable correctness signals for reliable reward systems. arXiv preprint arXiv:2502.19328, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 624, + 504, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 624, + 504, + 658 + ], + "spans": [ + { + "bbox": [ + 111, + 624, + 504, + 658 + ], + "type": "text", + "content": "[595] Keqin Peng, Liang Ding, Yuanxin Ouyang, Meng Fang, and Dacheng Tao. Revisiting overthinking in long chain-of-thought from the perspective of self-doubt. arXiv preprint arXiv:2505.23480, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 662, + 504, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 662, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 662, + 504, + 685 + ], + "type": "text", + "content": "[596] Miao Peng, Nuo Chen, Zongrui Suo, and Jia Li. Rewarding graph reasoning process makes llms more generalized reasoners. arXiv preprint arXiv:2503.00845, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 689, + 504, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 504, + 722 + ], + "type": "text", + "content": "[597] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b lmms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536, 2025." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "74" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 73 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[598] Ivo Petrov, Jasper Dekoninck, Lyuben Baltadzhiev, Maria Drencheva, Kristian Minchev, Mislav Balunovic, Nikola Jovanovic, and Martin Vechev. Proof or bluff? evaluating llms on 2025 usa math olympiad. arXiv preprint arXiv:2503.21934, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 110, + 506, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 110, + 506, + 133 + ], + "spans": [ + { + "bbox": [ + 111, + 110, + 506, + 133 + ], + "type": "text", + "content": "[599] Rolf Pfister and Hansueli Jud. Understanding and benchmarking artificial intelligence: Openai's o3 is not agi. arXiv preprint arXiv:2501.07458, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 136, + 504, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 136, + 504, + 170 + ], + "spans": [ + { + "bbox": [ + 111, + 136, + 504, + 170 + ], + "type": "text", + "content": "[600] Quang Hieu Pham, Thuy Duong Nguyen, Tung Pham, Anh Tuan Luu, and Dat Quoc Nguyen. Clozemath: Improving mathematical reasoning in language models by learning to fill equations. arXiv preprint arXiv:2506.03763, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 173, + 505, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 173, + 505, + 206 + ], + "spans": [ + { + "bbox": [ + 111, + 173, + 505, + 206 + ], + "type": "text", + "content": "[601] Thinh Pham, Nguyen Nguyen, Pratibha Zunjare, Weiyuan Chen, Yu-Min Tseng, and Tu Vu. Sealqa: Raising the bar for reasoning in search-augmented language models. arXiv preprint arXiv:2506.01062, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 211, + 506, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 211, + 506, + 244 + ], + "spans": [ + { + "bbox": [ + 111, + 211, + 506, + 244 + ], + "type": "text", + "content": "[602] Long Phan, Alice Gatti, Ziwen Han, Nathaniel Li, Josephina Hu, Hugh Zhang, Sean Shi, Michael Choi, Anish Agrawal, Arnav Chopra, et al. Humanity's last exam. arXiv preprint arXiv:2501.14249, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 247, + 506, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 247, + 506, + 272 + ], + "spans": [ + { + "bbox": [ + 111, + 247, + 506, + 272 + ], + "type": "text", + "content": "[603] Aske Plaat, Annie Wong, Suzan Verberne, Joost Broekens, Niki van Stein, and Thomas Back. Reasoning with large language models, a survey. arXiv preprint arXiv:2407.11511, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 275, + 506, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 275, + 506, + 309 + ], + "spans": [ + { + "bbox": [ + 111, + 275, + 506, + 309 + ], + "type": "text", + "content": "[604] Gabriel Poesia, Kanishk Gandhi, Eric Zelikman, and Noah Goodman. Certified deductive reasoning with language models. Transactions on Machine Learning Research, May 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=yXnwrS2T16." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 312, + 504, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 312, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 111, + 312, + 504, + 335 + ], + "type": "text", + "content": "[605] Stanislas Polu and Ilya Sutskever. Generative language modeling for automated theorem proving. arXiv preprint arXiv:2009.03393, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 339, + 506, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 339, + 506, + 406 + ], + "spans": [ + { + "bbox": [ + 111, + 339, + 506, + 406 + ], + "type": "text", + "content": "[606] Archiki Prasad, Swarnadeep Saha, Xiang Zhou, and Mohit Bansal. ReCEval: Evaluating reasoning chains via correctness and informativeness. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 10066-10086, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.622. URL https://aclanthology.org/2023.emnlp-main.622/." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 409, + 506, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 409, + 506, + 476 + ], + "spans": [ + { + "bbox": [ + 111, + 409, + 506, + 476 + ], + "type": "text", + "content": "[607] Archiki Prasad, Alexander Koller, Mareike Hartmann, Peter Clark, Ashish Sabharwal, Mohit Bansal, and Tushar Khot. ADaPT: As-needed decomposition and planning with language models. In Kevin Duh, Helena Gomez, and Steven Bethard, editors, Findings of the Association for Computational Linguistics: NAACL 2024, pages 4226-4252, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-naacl.264. URL https://aclanthology.org/2024-findings-naacl.264/." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 479, + 504, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 479, + 504, + 503 + ], + "spans": [ + { + "bbox": [ + 111, + 479, + 504, + 503 + ], + "type": "text", + "content": "[608] Tidor-Vlad Pricope. Hardml: A benchmark for evaluating data science and machine learning knowledge and reasoning in ai. arXiv preprint arXiv:2501.15627, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 506, + 506, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 506, + 506, + 573 + ], + "spans": [ + { + "bbox": [ + 111, + 506, + 506, + 573 + ], + "type": "text", + "content": "[609] Ben Prystawski, Michael Li, and Noah Goodman. Why think step by step? reasoning emerges from the locality of experience. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 70926-70947. Curran Associates, Inc., September 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/e0af79ad53a336b4c4b4f7e2a68eb609-Paper-Conference.pdf." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 576, + 506, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 576, + 506, + 609 + ], + "spans": [ + { + "bbox": [ + 111, + 576, + 506, + 609 + ], + "type": "text", + "content": "[610] Israel Puerta-Merino, Carlos Núñez-Molina, Pablo Mesejo, and Juan Fernández-Olivares. A roadmap to guide the integration of llms in hierarchical planning. arXiv preprint arXiv:2501.08068, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 613, + 506, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 613, + 506, + 647 + ], + "spans": [ + { + "bbox": [ + 111, + 613, + 506, + 647 + ], + "type": "text", + "content": "[611] Haritz Puerto, Tilek Chubakov, Xiaodan Zhu, Harish Tayyar Madabushi, and Iryna Gurevych. Fine-tuning with divergent chains of thought boosts reasoning through self-correction in language models. arXiv preprint arXiv:2407.03181, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 651, + 506, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 651, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 651, + 506, + 685 + ], + "type": "text", + "content": "[612] Isha Puri, Shivchander Sudalairaj, Guangxuan Xu, Kai Xu, and Akash Srivastava. A probabilistic inference approach to inference-time scaling of llms using particle-based monte carlo methods. arXiv preprint arXiv:2502.01618, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "type": "text", + "content": "[613] Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv preprint arXiv:2408.07199, 2024." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "75" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 74 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "text", + "content": "[614] Penghui Qi, Zichen Liu, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Optimizing anytime reasoning via budget relative policy optimization. arXiv preprint arXiv:2505.13438, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 108, + 505, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 108, + 505, + 140 + ], + "spans": [ + { + "bbox": [ + 111, + 108, + 505, + 140 + ], + "type": "text", + "content": "[615] Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual reasoning makes smaller llms stronger problem-solvers. arXiv preprint arXiv:2408.06195, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 144, + 504, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 144, + 504, + 167 + ], + "spans": [ + { + "bbox": [ + 111, + 144, + 504, + 167 + ], + "type": "text", + "content": "[616] Hongjin Qian and Zheng Liu. Scent of knowledge: Optimizing search-enhanced reasoning with information foraging. arXiv preprint arXiv:2505.09316, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 170, + 506, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 170, + 506, + 236 + ], + "spans": [ + { + "bbox": [ + 111, + 170, + 506, + 236 + ], + "type": "text", + "content": "[617] Libo Qin, Qiguang Chen, Fuxuan Wei, Shijue Huang, and Wanxiang Che. Cross-lingual prompting: Improving zero-shot chain-of-thought reasoning across languages. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 2695–2709, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.163. URL https://aclanthology.org/2023.emnlp-main.163/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 238, + 506, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 238, + 506, + 270 + ], + "spans": [ + { + "bbox": [ + 111, + 238, + 506, + 270 + ], + "type": "text", + "content": "[618] Libo Qin, Qiguang Chen, Hao Fei, Zhi Chen, Min Li, and Wanxiang Che. What factors affect multi-modal in-context learning? an in-depth exploration. arXiv preprint arXiv:2410.20482, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 274, + 506, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 274, + 506, + 307 + ], + "spans": [ + { + "bbox": [ + 111, + 274, + 506, + 307 + ], + "type": "text", + "content": "[619] Libo Qin, Qiguang Chen, Xiachong Feng, Yang Wu, Yongheng Zhang, Yinghui Li, Min Li, Wanxiang Che, and Philip S Yu. Large language models meet nlp: A survey. arXiv preprint arXiv:2405.12819, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 310, + 505, + 344 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 310, + 505, + 344 + ], + "spans": [ + { + "bbox": [ + 111, + 310, + 505, + 344 + ], + "type": "text", + "content": "[620] Libo Qin, Qiguang Chen, Yuhang Zhou, Zhi Chen, Yinghui Li, Lizi Liao, Min Li, Wanxiang Che, and Philip S Yu. Multilingual large language model: A survey of resources, taxonomy and frontiers. arXiv preprint arXiv:2404.04925, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 346, + 506, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 346, + 506, + 390 + ], + "spans": [ + { + "bbox": [ + 111, + 346, + 506, + 390 + ], + "type": "text", + "content": "[621] Libo Qin, Qiguang Chen, Jingxuan Zhou, Jin Wang, Hao Fei, Wanxiang Che, and Min Li. Divide-solve-combine: An interpretable and accurate prompting framework for zero-shot multi-intent detection. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 39, pages 25038-25046, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 393, + 505, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 393, + 505, + 437 + ], + "spans": [ + { + "bbox": [ + 111, + 393, + 505, + 437 + ], + "type": "text", + "content": "[622] Libo Qin, Qiguang Chen, Yuhang Zhou, Zhi Chen, Yinghui Li, Lizi Liao, Min Li, Wanxiang Che, and S Yu Philip. A survey of multilingual large language models. Patterns, 6(1), January 2025. URL https://www.cell.com/patterns/fulltext/S2666-3899(24)00290-3." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 440, + 505, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 440, + 505, + 473 + ], + "spans": [ + { + "bbox": [ + 111, + 440, + 505, + 473 + ], + "type": "text", + "content": "[623] Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, et al. O1 replication journey: A strategic progress report-part 1. arXiv preprint arXiv:2410.18982, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 476, + 506, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 476, + 506, + 509 + ], + "spans": [ + { + "bbox": [ + 111, + 476, + 506, + 509 + ], + "type": "text", + "content": "[624] Yulei Qin, Gang Li, Zongyi Li, Zihan Xu, Yuchen Shi, Zhekai Lin, Xiao Cui, Ke Li, and Xing Sun. Incentivizing reasoning for advanced instruction-following of large language models. arXiv preprint arXiv:2506.01413, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 512, + 505, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 512, + 505, + 546 + ], + "spans": [ + { + "bbox": [ + 111, + 512, + 505, + 546 + ], + "type": "text", + "content": "[625] Jiahao Qiu, Yifu Lu, Yifan Zeng, Jiacheng Guo, Jiayi Geng, Huazheng Wang, Kaixuan Huang, Yue Wu, and Mengdi Wang. Treebon: Enhancing inference-time alignment with speculative tree-search and best-of-n sampling. arXiv preprint arXiv:2410.16033, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 548, + 506, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 548, + 506, + 591 + ], + "spans": [ + { + "bbox": [ + 111, + 548, + 506, + 591 + ], + "type": "text", + "content": "[626] Xiaoye Qu, Yafu Li, Zhaochen Su, Weigao Sun, Jianhao Yan, Dongrui Liu, Ganqu Cui, Daizong Liu, Shuxian Liang, Junxian He, et al. A survey of efficient reasoning for large reasoning models: Language, multimodality, and beyond. arXiv preprint arXiv:2503.21614, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 595, + 506, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 595, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 111, + 595, + 506, + 639 + ], + "type": "text", + "content": "[627] Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive introspection: Teaching language model agents how to self-improve. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=DRC9pZwBwR." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 642, + 506, + 686 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 642, + 506, + 686 + ], + "spans": [ + { + "bbox": [ + 111, + 642, + 506, + 686 + ], + "type": "text", + "content": "[628] Yuxiao Qu, Matthew Y. R. Yang, Amrith Setlur, Lewis Tunstall, Edward Emanuel Beeching, Ruslan Salakhutdinov, and Aviral Kumar. Optimizing test-time compute via meta reinforcement finetuning. In Workshop on Reasoning and Planning for Large Language Models, March 2025. URL https://openreview.net/forum?id=WGz4ytjolh." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "type": "text", + "content": "[629] Gollam Rabby, Farhana Keya, Parvez Zamil, and Soren Auer. Mc-nest-enhancing mathematical reasoning in large language models with a monte carlo nash equilibrium self-refine tree. arXiv preprint arXiv:2411.15645, 2024." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "76" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 75 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 722 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "type": "text", + "content": "[630] Santosh Kumar Radha and Oktay Goktas. On the reasoning capacity of ai models and how to quantify it. arXiv preprint arXiv:2501.13833, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 99, + 505, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 99, + 505, + 144 + ], + "spans": [ + { + "bbox": [ + 111, + 99, + 505, + 144 + ], + "type": "text", + "content": "[631] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023. URL https://openreview.net/pdf?id=HPuSIXJaa9." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 148, + 505, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 148, + 505, + 215 + ], + "spans": [ + { + "bbox": [ + 111, + 148, + 505, + 215 + ], + "type": "text", + "content": "[632] Daking Rai and Ziyu Yao. An investigation of neuron activation as a unified lens to explain chain-of-thought eliciting arithmetic reasoning of LLMs. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 7174–7193, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.387. URL https://aclanthology.org/2024.acl-long.387/." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 219, + 505, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 219, + 505, + 286 + ], + "spans": [ + { + "bbox": [ + 111, + 219, + 505, + 286 + ], + "type": "text", + "content": "[633] Leonardo Ranaldi, Giulia Pucci, Federico Ranaldi, Elena Sofia Ruzzetti, and Fabio Massimo Zanzotto. A tree-of-thoughts to broaden multi-step reasoning across languages. In Kevin Duh, Helena Gomez, and Steven Bethard, editors, Findings of the Association for Computational Linguistics: NAACL 2024, pages 1229-1241, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-naacl.78. URL https://aclanthology.org/2024 findings-naacl.78/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 290, + 505, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 290, + 505, + 323 + ], + "spans": [ + { + "bbox": [ + 111, + 290, + 505, + 323 + ], + "type": "text", + "content": "[634] Leonardo Ranaldi, Marco Valentino, Alexander Polonsky, and André Freitas. Improving chain-of-thought reasoning via quasi-symbolic abstractions. arXiv preprint arXiv:2502.12616, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 328, + 505, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 328, + 505, + 361 + ], + "spans": [ + { + "bbox": [ + 111, + 328, + 505, + 361 + ], + "type": "text", + "content": "[635] Mohammad Raza and Natasha Milic-Frayling. Instantiation-based formalization of logical reasoning tasks using language models and logical solvers. arXiv preprint arXiv:2501.16961, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 366, + 505, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 366, + 505, + 390 + ], + "spans": [ + { + "bbox": [ + 111, + 366, + 505, + 390 + ], + "type": "text", + "content": "[636] Ali Razghandi, Seyed Mohammad Hadi Hosseini, and Mahdieh Soleymani Baghshah. Cer: Confidence enhanced reasoning in llms. arXiv preprint arXiv:2502.14634, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 393, + 505, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 393, + 505, + 438 + ], + "spans": [ + { + "bbox": [ + 111, + 393, + 505, + 438 + ], + "type": "text", + "content": "[637] David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. GPQA: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=Ti67584b98." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 443, + 505, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 443, + 505, + 466 + ], + "spans": [ + { + "bbox": [ + 111, + 443, + 505, + 466 + ], + "type": "text", + "content": "[638] Matthew Renze and Erhan Guven. Self-reflection in llm agents: Effects on problem-solving performance. arXiv preprint arXiv:2405.06682, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 470, + 505, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 470, + 505, + 504 + ], + "spans": [ + { + "bbox": [ + 111, + 470, + 505, + 504 + ], + "type": "text", + "content": "[639] Baptiste Roziere, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaqing Ellen Tan, Yossi Adi, Jingyu Liu, Romain Sauvestre, Tal Remez, et al. Code llama: Open foundation models for code. arXiv preprint arXiv:2308.12950, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 508, + 505, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 508, + 505, + 532 + ], + "spans": [ + { + "bbox": [ + 111, + 508, + 505, + 532 + ], + "type": "text", + "content": "[640] Yangjun Ruan, Neil Band, Chris J Maddison, and Tatsunori Hashimoto. Reasoning to learn from latent thoughts. arXiv preprint arXiv:2503.18866, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 536, + 505, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 536, + 505, + 570 + ], + "spans": [ + { + "bbox": [ + 111, + 536, + 505, + 570 + ], + "type": "text", + "content": "[641] Jon Saad-Falcon, Rajan Vivek, William Berrios, Nandita Shankar Naik, Matija Franklin, Bertie Vidgen, Amanpreet Singh, Douwe Kiela, and Shikib Mehri. Lmunit: Fine-grained evaluation with natural language unit tests. arXiv preprint arXiv:2412.13091, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 574, + 505, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 574, + 505, + 597 + ], + "spans": [ + { + "bbox": [ + 111, + 574, + 505, + 597 + ], + "type": "text", + "content": "[642] Nikta Gohari Sadr, Sangmitra Madhusudan, and Ali Emami. Think or step-by-step? unzipping the black box in zero-shot prompts. arXiv preprint arXiv:2502.03418, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 601, + 505, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 601, + 505, + 635 + ], + "spans": [ + { + "bbox": [ + 111, + 601, + 505, + 635 + ], + "type": "text", + "content": "[643] Swarnadeep Saha, Xian Li, Marjan Ghazvininejad, Jason Weston, and Tianlu Wang. Learning to plan & reason for evaluation with thinking-llm-as-a-judge. arXiv preprint arXiv:2501.18099, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 639, + 505, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 639, + 505, + 684 + ], + "spans": [ + { + "bbox": [ + 111, + 639, + 505, + 684 + ], + "type": "text", + "content": "[644] S Sauhandikaa, R Bhagavath Narethranath, and R Sathya Bama Krishna. Explainable ai in large language models: A review. In 2024 International Conference on Emerging Research in Computational Science (ICERCS), pages 1-6. IEEE, 2024. URL http://ieeexplore.ieee.org/abstract/document/10895578." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 688, + 505, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 688, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 111, + 688, + 505, + 722 + ], + "type": "text", + "content": "[645] William Saunders, Catherine Yeh, Jeff Wu, Steven Bills, Long Ouyang, Jonathan Ward, and Jan Leike. Self-critiquing models for assisting human evaluators. arXiv preprint arXiv:2206.05802, 2022." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "77" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 76 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "text", + "content": "[646] Nikunj Saunshi, Nishanth Dikkala, Zhiyuan Li, Sanjiv Kumar, and Sashank J Reddi. Reasoning with latent thoughts: On the power of looped transformers. arXiv preprint arXiv:2502.17416, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 109, + 506, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 109, + 506, + 153 + ], + "spans": [ + { + "bbox": [ + 111, + 109, + 506, + 153 + ], + "type": "text", + "content": "[647] Mark Schöne, Babak Rahmani, Heiner Kremer, Fabian Falck, Hitesh Ballani, and Jannes Gladrow. Implicit language models are RNNs: Balancing parallelization and expressivity. In *Forty-second International Conference on Machine Learning*, May 2025. URL https://openreview.net/forum?id=5EbiopWH6e." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 156, + 504, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 156, + 504, + 179 + ], + "spans": [ + { + "bbox": [ + 111, + 156, + 504, + 179 + ], + "type": "text", + "content": "[648] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 182, + 504, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 182, + 504, + 217 + ], + "spans": [ + { + "bbox": [ + 111, + 182, + 504, + 217 + ], + "type": "text", + "content": "[649] ByteDance Seed, Jiaze Chen, Tiantian Fan, Xin Liu, Lingjun Liu, Zhiqi Lin, Mingxuan Wang, Chengyi Wang, Xiangpeng Wei, Wenyuan Xu, et al. Seed1. 5-thinking: Advancing superb reasoning models with reinforcement learning. arXiv preprint arXiv:2504.13914, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 219, + 506, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 219, + 506, + 297 + ], + "spans": [ + { + "bbox": [ + 111, + 219, + 506, + 297 + ], + "type": "text", + "content": "[650] Amrith Setlur, Saurabh Garg, Xinyang Geng, Naman Garg, Virginia Smith, and Aviral Kumar. Rl on incorrect synthetic data scales the efficiency of lIm math reasoning by eight-fold. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 43000-43031. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/4b77d5b896c321a29277524a98a50215-Paper-Conference.pdf." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 300, + 506, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 300, + 506, + 355 + ], + "spans": [ + { + "bbox": [ + 111, + 300, + 506, + 355 + ], + "type": "text", + "content": "[651] Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, and Aviral Kumar. Rewarding progress: Scaling automated process verifiers for LLM reasoning. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=A6Y7Aq1zLW." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 357, + 504, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 357, + 504, + 381 + ], + "spans": [ + { + "bbox": [ + 111, + 357, + 504, + 381 + ], + "type": "text", + "content": "[652] Amrith Setlur, Nived Rajaraman, Sergey Levine, and Aviral Kumar. Scaling test-time compute without verification or r1 is suboptimal. arXiv preprint arXiv:2502.12118, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 384, + 504, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 384, + 504, + 418 + ], + "spans": [ + { + "bbox": [ + 111, + 384, + 504, + 418 + ], + "type": "text", + "content": "[653] Amrith Setlur, Matthew YR Yang, Charlie Snell, Jeremy Greer, Ian Wu, Virginia Smith, Max Simchowitz, and Aviral Kumar. e3: Learning to explore enables extrapolation of test-time compute for llms. arXiv preprint arXiv:2506.09026, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 420, + 504, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 420, + 504, + 443 + ], + "spans": [ + { + "bbox": [ + 111, + 420, + 504, + 443 + ], + "type": "text", + "content": "[654] Yu Shang, Yu Li, Fengli Xu, and Yong Li. Synergy-of-thoughts: Eliciting efficient reasoning in hybrid language models. arXiv preprint arXiv:2402.02563, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 446, + 506, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 446, + 506, + 480 + ], + "spans": [ + { + "bbox": [ + 111, + 446, + 506, + 480 + ], + "type": "text", + "content": "[655] Rulin Shao, Shuyue Stella Li, Rui Xin, Scott Geng, Yiping Wang, Sewoong Oh, Simon Shaolei Du, Nathan Lambert, Sewon Min, Ranjay Krishna, et al. Spurious rewards: Rethinking training signals in rlvr. arXiv preprint arXiv:2506.10947, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 483, + 506, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 483, + 506, + 517 + ], + "spans": [ + { + "bbox": [ + 111, + 483, + 506, + 517 + ], + "type": "text", + "content": "[656] Wenqi Shao, Qiaosheng Zhang, Lingxiao Du, Xiangyan Liu, and Fanqing Meng. R1-multimodal-journey. https://github.com/FanqingM/R1-Multimodal-Journey, February 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 520, + 506, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 520, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 111, + 520, + 506, + 597 + ], + "type": "text", + "content": "[657] Zhihong Shao, Yeyun Gong, Yelong Shen, Minlie Huang, Nan Duan, and Weizhu Chen. Synthetic prompting: Generating chain-of-thought demonstrations for large language models. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett, editors, Proceedings of the 40th International Conference on Machine Learning, volume 202 of Proceedings of Machine Learning Research, pages 30706-30775. PMLR, 23-29 Jul 2023. URL https://proceedings.mlr.press/v202/shao23a.html." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 600, + 504, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 600, + 504, + 634 + ], + "spans": [ + { + "bbox": [ + 111, + 600, + 504, + 634 + ], + "type": "text", + "content": "[658] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 637, + 504, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 637, + 504, + 660 + ], + "spans": [ + { + "bbox": [ + 111, + 637, + 504, + 660 + ], + "type": "text", + "content": "[659] Shuaijie She, Junxiao Liu, Yifeng Liu, Jiajun Chen, Xin Huang, and Shujian Huang. R-prm: Reasoning-driven process reward modeling. arXiv preprint arXiv:2503.21295, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 662, + 504, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 662, + 504, + 696 + ], + "spans": [ + { + "bbox": [ + 111, + 662, + 504, + 696 + ], + "type": "text", + "content": "[660] Haozhan Shen, Zilun Zhang, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. https://github.com/om-ai-lab/VLM-R1, February 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 700, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 700, + 504, + 723 + ], + "type": "text", + "content": "[661] Maohao Shen, Guangtao Zeng, Zhenting Qi, Zhang-Wei Hong, Zhenfang Chen, Wei Lu, Gregory Wornell, Subhro Das, David Cox, and Chuang Gan. Satori: Reinforcement learning" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "78" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 77 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 136, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 136, + 72, + 505, + 95 + ], + "type": "text", + "content": "with chain-of-action-thought enhances llm reasoning via autoregressive search. arXiv preprint arXiv:2502.02508, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 99, + 505, + 122 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 99, + 505, + 122 + ], + "spans": [ + { + "bbox": [ + 111, + 99, + 505, + 122 + ], + "type": "text", + "content": "[662] Xuan Shen, Yizhou Wang, Xiangxi Shi, Yanzhi Wang, Pu Zhao, and Jiuming Gu. Efficient reasoning with hidden thinking. arXiv preprint arXiv:2501.19201, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 126, + 506, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 126, + 506, + 160 + ], + "spans": [ + { + "bbox": [ + 111, + 126, + 506, + 160 + ], + "type": "text", + "content": "[663] Yi Shen, Jian Zhang, Jieyun Huang, Shuming Shi, Wenjing Zhang, Jiangze Yan, Ning Wang, Kai Wang, and Shiguo Lian. Dast: Difficulty-adaptive slow-thinking for large reasoning models. arXiv preprint arXiv:2503.04472, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 164, + 504, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 164, + 504, + 198 + ], + "spans": [ + { + "bbox": [ + 111, + 164, + 504, + 198 + ], + "type": "text", + "content": "[664] Yifan Shen, Yuanzhe Liu, Jingyuan Zhu, Xu Cao, Xiaofeng Zhang, Yixiao He, Wenming Ye, James Matthew Rehg, and Ismini Lourentzou. Fine-grained preference optimization improves spatial reasoning in vlms. arXiv preprint arXiv:2506.21656, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 202, + 504, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 202, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 111, + 202, + 504, + 236 + ], + "type": "text", + "content": "[665] Leheng Sheng, An Zhang, Zijian Wu, Weixiang Zhao, Changshuo Shen, Yi Zhang, Xiang Wang, and Tat-Seng Chua. On reasoning strength planning in large reasoning models. arXiv preprint arXiv:2506.08390, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 240, + 506, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 240, + 506, + 274 + ], + "spans": [ + { + "bbox": [ + 111, + 240, + 506, + 274 + ], + "type": "text", + "content": "[666] Hengyu Shi, Junhao Su, Huansheng Ning, Xiaoming Wei, and Jialin Gao. Layoutcot: Unleashing the deep reasoning potential of large language models for layout generation. arXiv preprint arXiv:2504.10829, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 279, + 506, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 279, + 506, + 312 + ], + "spans": [ + { + "bbox": [ + 111, + 279, + 506, + 312 + ], + "type": "text", + "content": "[667] Junhao Shi, Zhaoye Fei, Siyin Wang, Qipeng Guo, Jingjing Gong, and Xipeng Qiu. World-aware planning narratives enhance large vision-language model planner. arXiv preprint arXiv:2506.21230, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 317, + 506, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 317, + 506, + 350 + ], + "spans": [ + { + "bbox": [ + 111, + 317, + 506, + 350 + ], + "type": "text", + "content": "[668] Wenhao Shi, Zhiqiang Hu, Yi Bin, Yang Yang, See-Kiong Ng, and Heng Tao Shen. Multimodal mathematical reasoning with diverse solving perspective. arXiv preprint arXiv:2507.02804, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 355, + 506, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 355, + 506, + 422 + ], + "spans": [ + { + "bbox": [ + 111, + 355, + 506, + 422 + ], + "type": "text", + "content": "[669] Noah Shinn, Federico Cassano, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. Reflexion: language agents with verbal reinforcement learning. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 8634-8652. Curran Associates, Inc., December 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/1b44b878bb782e6954cd888628510e90-Paper-Conference.pdf." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 426, + 504, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 426, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 111, + 426, + 504, + 460 + ], + "type": "text", + "content": "[670] Safal Shrestha, Minwu Kim, and Keith Ross. Mathematical reasoning in large language models: Assessing logical and arithmetic errors across wide numerical ranges. arXiv preprint arXiv:2502.08680, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 464, + 506, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 464, + 506, + 531 + ], + "spans": [ + { + "bbox": [ + 111, + 464, + 506, + 531 + ], + "type": "text", + "content": "[671] Kashun Shum, Shizhe Diao, and Tong Zhang. Automatic prompt augmentation and selection with chain-of-thought from labeled data. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 12113-12139, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.811. URL https://aclanthology.org/2023.findings-emnlp.811/." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 536, + 506, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 536, + 506, + 559 + ], + "spans": [ + { + "bbox": [ + 111, + 536, + 506, + 559 + ], + "type": "text", + "content": "[672] Chenglei Si, Diyi Yang, and Tatsunori Hashimoto. Can llms generate novel research ideas? a large-scale human study with " + }, + { + "bbox": [ + 111, + 536, + 506, + 559 + ], + "type": "inline_equation", + "content": "100+" + }, + { + "bbox": [ + 111, + 536, + 506, + 559 + ], + "type": "text", + "content": " nlp researchers. arXiv preprint arXiv:2409.04109, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 563, + 506, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 563, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 111, + 563, + 506, + 596 + ], + "type": "text", + "content": "[673] Sam Silver, Jimin Sun, Ivan Zhang, Sara Hooker, and Eddie Kim. Language models can perform single-utterance self-correction of perturbed reasoning. arXiv preprint arXiv:2506.15894, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 601, + 504, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 601, + 504, + 646 + ], + "spans": [ + { + "bbox": [ + 111, + 601, + 504, + 646 + ], + "type": "text", + "content": "[674] Avi Singh, John D Co-Reyes, Rishabh Agarwal, Ankesh Anand, Piyush Patil, Xavier Garcia, Peter J Liu, James Harrison, Jaehoon Lee, Kelvin Xu, et al. Beyond human data: Scaling self-training for problem-solving with language models. Transactions on Machine Learning Research, April 2024. URL https://openreview.net/pdf?id=lnAyUngGFK." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 650, + 504, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 650, + 504, + 684 + ], + "spans": [ + { + "bbox": [ + 111, + 650, + 504, + 684 + ], + "type": "text", + "content": "[675] Oscar Skean, Md Rifat Arefin, Dan Zhao, Niket Patel, Jalal Naghiyev, Yann LeCun, and Ravid Shwartz-Ziv. Layer by layer: Uncovering hidden representations in language models. arXiv preprint arXiv:2502.02013, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 688, + 506, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 688, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 111, + 688, + 506, + 721 + ], + "type": "text", + "content": "[676] Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314, 2024." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "79" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 78 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[677] Huatong Song, Jinhao Jiang, Yingqian Min, Jie Chen, Zhipeng Chen, Wayne Xin Zhao, Lei Fang, and Ji-Rong Wen. R1-searcher: Incentivizing the search capability in llms via reinforcement learning. arXiv preprint arXiv:2503.05592, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 110, + 506, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 110, + 506, + 144 + ], + "spans": [ + { + "bbox": [ + 111, + 110, + 506, + 144 + ], + "type": "text", + "content": "[678] Jiwon Song, Dongwon Jo, Yulhwa Kim, and Jae-Joon Kim. Reasoning path compression: Compressing generation trajectories for efficient ltm reasoning. arXiv preprint arXiv:2505.13866, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 148, + 505, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 148, + 505, + 182 + ], + "spans": [ + { + "bbox": [ + 111, + 148, + 505, + 182 + ], + "type": "text", + "content": "[679] Mingyang Song, Zhaochen Su, Xiaoye Qu, Jiawei Zhou, and Yu Cheng. Prmbench: A fine-grained and challenging benchmark for process-level reward models. arXiv preprint arXiv:2501.03124, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 185, + 505, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 185, + 505, + 220 + ], + "spans": [ + { + "bbox": [ + 111, + 185, + 505, + 220 + ], + "type": "text", + "content": "[680] Mingyang Song, Mao Zheng, Zheng Li, Wenjie Yang, Xuan Luo, Yue Pan, and Feng Zhang. Fastcurl: Curriculum reinforcement learning with stage-wise context scaling for efficient training r1-like reasoning models. arXiv preprint arXiv:2503.17287, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 223, + 505, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 223, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 111, + 223, + 505, + 258 + ], + "type": "text", + "content": "[681] Woomin Song, Saket Dingliwal, Sai Muralidhar Jayanthi, Bhavana Ganesh, Jinwoo Shin, Aram Galstyan, and Sravan Babu Bodapati. Accelerated test-time scaling with model-free speculative sampling. arXiv preprint arXiv:2506.04708, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 261, + 505, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 261, + 505, + 295 + ], + "spans": [ + { + "bbox": [ + 111, + 261, + 505, + 295 + ], + "type": "text", + "content": "[682] Xiaoshuai Song, Yanan Wu, Weixun Wang, Jiaheng Liu, Wenbo Su, and Bo Zheng. Progco: Program helps self-correction of large language models. arXiv preprint arXiv:2501.01264, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 299, + 505, + 344 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 299, + 505, + 344 + ], + "spans": [ + { + "bbox": [ + 111, + 299, + 505, + 344 + ], + "type": "text", + "content": "[683] Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. arXiv preprint arXiv:2409.12183, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 348, + 506, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 348, + 506, + 393 + ], + "spans": [ + { + "bbox": [ + 111, + 348, + 506, + 393 + ], + "type": "text", + "content": "[684] Zayne Rea Sprague, Xi Ye, Kaj Bostrom, Swarat Chaudhuri, and Greg Durrett. MuSR: Testing the limits of chain-of-thought with multistep soft reasoning. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=jenyYQzuel." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 396, + 504, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 396, + 504, + 421 + ], + "spans": [ + { + "bbox": [ + 111, + 396, + 504, + 421 + ], + "type": "text", + "content": "[685] Gaurav Srivastava, Shuxiang Cao, and Xuan Wang. Towards reasoning ability of small language models. arXiv preprint arXiv:2502.11569, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 423, + 504, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 423, + 504, + 448 + ], + "spans": [ + { + "bbox": [ + 111, + 423, + 504, + 448 + ], + "type": "text", + "content": "[686] Saksham Sahai Srivastava and Vaneet Aggarwal. A technical survey of reinforcement learning techniques for large language models. arXiv preprint arXiv:2507.04136, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 450, + 504, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 450, + 504, + 474 + ], + "spans": [ + { + "bbox": [ + 111, + 450, + 504, + 474 + ], + "type": "text", + "content": "[687] Saksham Sahai Srivastava and Ashutosh Gandhi. Mathdivide: Improved mathematical reasoning by large language models. arXiv preprint arXiv:2405.13004, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 477, + 505, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 477, + 505, + 521 + ], + "spans": [ + { + "bbox": [ + 111, + 477, + 505, + 521 + ], + "type": "text", + "content": "[688] Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. Chain of thoughtlessness? an analysis of cot in planning. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id= kPBEAZU5Nm." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 526, + 506, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 526, + 506, + 594 + ], + "spans": [ + { + "bbox": [ + 111, + 526, + 506, + 594 + ], + "type": "text", + "content": "[689] Nisan Stiennon, Long Ouyang, Jeffrey Wu, Daniel Ziegler, Ryan Lowe, Chelsea Voss, Alec Radford, Dario Amodei, and Paul F Christiano. Learning to summarize with human feedback. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems, volume 33, pages 3008-3021. Curran Associates, Inc., December 2020. URL https://proceedings.neurips.cc/paper_files/paper/2020/file/1f89885d556929e98d3ef9b86448f951-Paper.pdf." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 597, + 504, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 597, + 504, + 620 + ], + "spans": [ + { + "bbox": [ + 111, + 597, + 504, + 620 + ], + "type": "text", + "content": "[690] Josefa Lia Stoisser, Marc Boubnovski Martell, and Julien Fauqueur. Sparks of tabular reasoning via text2sql reinforcement learning. arXiv preprint arXiv:2505.00016, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 624, + 506, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 624, + 506, + 658 + ], + "spans": [ + { + "bbox": [ + 111, + 624, + 506, + 658 + ], + "type": "text", + "content": "[691] DiJia Su, Sainbayar Sukhbaatar, Michael Rabbat, Yuandong Tian, and Qinqing Zheng. Dualformer: Controllable fast and slow thinking by learning with randomized reasoning traces. arXiv preprint arXiv:2410.09918, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 662, + 504, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 662, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 662, + 504, + 685 + ], + "type": "text", + "content": "[692] Jinyan Su and Claire Cardie. Thinking fast and right: Balancing accuracy and reasoning length with adaptive rewards. arXiv preprint arXiv:2505.18298, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 688, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 688, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 688, + 504, + 723 + ], + "type": "text", + "content": "[693] Yi Su, Dian Yu, Linfeng Song, Juntao Li, Haitao Mi, Zhaopeng Tu, Min Zhang, and Dong Yu. Expanding rl with verifiable rewards across diverse domains. arXiv preprint arXiv:2503.23829, 2025." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "80" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 79 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 723 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[694] Zhaochen Su, Peng Xia, Hangyu Guo, Zhenhua Liu, Yan Ma, Xiaoye Qu, Jiaqi Liu, Yanshu Li, Kaide Zeng, Zhengyuan Yang, et al. Thinking with images for multimodal reasoning: Foundations, methods, and future frontiers. arXiv preprint arXiv:2506.23918, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 108, + 504, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 108, + 504, + 152 + ], + "spans": [ + { + "bbox": [ + 111, + 108, + 504, + 152 + ], + "type": "text", + "content": "[695] Guangyan Sun, Mingyu Jin, Zhenting Wang, Cheng-Long Wang, Siqi Ma, Qifan Wang, Tong Geng, Ying Nian Wu, Yongfeng Zhang, and Dongfang Liu. Visual agents as fast and slow thinkers. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=ncCuiD3KJQ." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 156, + 504, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 156, + 504, + 190 + ], + "spans": [ + { + "bbox": [ + 111, + 156, + 504, + 190 + ], + "type": "text", + "content": "[696] Jiankai Sun, Chuanyang Zheng, Enze Xie, Zhengying Liu, Ruihang Chu, Jianing Qiu, Jiaqi Xu, Mingyu Ding, Hongyang Li, Mengzhe Geng, et al. A survey of reasoning with foundation models. arXiv preprint arXiv:2312.11562, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 193, + 505, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 193, + 505, + 226 + ], + "spans": [ + { + "bbox": [ + 111, + 193, + 505, + 226 + ], + "type": "text", + "content": "[697] Linzhuang Sun, Hao Liang, Jingxuan Wei, Bihui Yu, Tianpeng Li, Fan Yang, Zenan Zhou, and Wentao Zhang. Mm-verify: Enhancing multimodal reasoning with chain-of-thought verification. arXiv preprint arXiv:2502.13383, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 228, + 505, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 228, + 505, + 296 + ], + "spans": [ + { + "bbox": [ + 111, + 228, + 505, + 296 + ], + "type": "text", + "content": "[698] Qiushi Sun, Zhoumianze Liu, Chang Ma, Zichen Ding, Fangzhi Xu, Zhangyue Yin, Haiteng Zhao, Zhenyu Wu, Kanzhi Cheng, Zhaoyang Liu, Jianing Wang, Qintong Li, Robert Tang, Tianbao Xie, Xiachong Feng, Xiang Li, Ben Kao, Wenhai Wang, Biqing Qi, Lingpeng Kong, and Zhiyong Wu. Scienceboard: Evaluating multimodal autonomous agents in realistic scientific workflows. In ICML 2025 Workshop on Computer Use Agents, June 2025. URL https://openreview.net/forum?id=CTtuHMeU5e." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 298, + 504, + 343 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 298, + 504, + 343 + ], + "spans": [ + { + "bbox": [ + 111, + 298, + 504, + 343 + ], + "type": "text", + "content": "[699] Shengyang Sun, Yian Zhang, Alexander Bukharin, David Mosallanezhad, Jiaqi Zeng, Soumye Singhal, Gerald Shen, Adi Renduchintala, Tugrul Konuk, Yi Dong, et al. Reward-aware preference optimization: A unified mathematical framework for model alignment. arXiv preprint arXiv:2502.00203, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 346, + 505, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 346, + 505, + 380 + ], + "spans": [ + { + "bbox": [ + 111, + 346, + 505, + 380 + ], + "type": "text", + "content": "[700] Wei Sun, Qianlong Du, Fuwei Cui, and Jiajun Zhang. An efficient and precise training data construction framework for process-supervised reward model in mathematical reasoning. arXiv preprint arXiv:2503.02382, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 382, + 505, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 382, + 505, + 426 + ], + "spans": [ + { + "bbox": [ + 111, + 382, + 505, + 426 + ], + "type": "text", + "content": "[701] Yifan Sun, Jingyan Shen, Yibin Wang, Tianyu Chen, Zhendong Wang, Mingyuan Zhou, and Huan Zhang. Improving data efficiency for ltm reinforcement fine-tuning through difficulty-targeted online data selection and rollout replay. arXiv preprint arXiv:2506.05316, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 429, + 504, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 429, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 111, + 429, + 504, + 464 + ], + "type": "text", + "content": "[702] Yuhong Sun, Zhangyue Yin, Xuanjing Huang, Xipeng Qiu, and Hui Zhao. Error classification of large language models on math word problems: A dynamically adaptive framework. arXiv preprint arXiv:2501.15581, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 466, + 505, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 466, + 505, + 499 + ], + "spans": [ + { + "bbox": [ + 111, + 466, + 505, + 499 + ], + "type": "text", + "content": "[703] Zhongxiang Sun, Qipeng Wang, Weijie Yu, Xiaoxue Zang, Kai Zheng, Jun Xu, Xiao Zhang, Song Yang, and Han Li. Rearter: Retrieval-augmented reasoning with trustworthy process rewarding. arXiv preprint arXiv:2501.07861, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 502, + 505, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 502, + 505, + 569 + ], + "spans": [ + { + "bbox": [ + 111, + 502, + 505, + 569 + ], + "type": "text", + "content": "[704] Richard S Sutton, David McAllester, Satinder Singh, and Yishay Mansour. Policy gradient methods for reinforcement learning with function approximation. In S. Solla, T. Leen, and K. Müller, editors, Advances in Neural Information Processing Systems, volume 12. MIT Press, November 1999. URL https://proceedings.neurips.cc/paper_files/paper/1999/file/464d828b85b0bed98e80ade0a5c43b0f-Paper.pdf." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 571, + 505, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 571, + 505, + 649 + ], + "spans": [ + { + "bbox": [ + 111, + 571, + 505, + 649 + ], + "type": "text", + "content": "[705] Mirac Suzgun, Nathan Scales, Nathanael Schärli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc Le, Ed Chi, Denny Zhou, and Jason Wei. Challenging BIG-bench tasks and whether chain-of-thought can solve them. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Findings of the Association for Computational Linguistics: ACL 2023, pages 13003-13051, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.824. URL https://aclanthology.org/2023-findings-acl.824/." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 652, + 505, + 686 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 652, + 505, + 686 + ], + "spans": [ + { + "bbox": [ + 111, + 652, + 505, + 686 + ], + "type": "text", + "content": "[706] Jihoon Tack, Jack Lanchantin, Jane Yu, Andrew Cohen, Ilia Kulikov, Janice Lan, Shibo Hao, Yuandong Tian, Jason Weston, and Xian Li. Llm pretraining with continuous concepts. arXiv preprint arXiv:2502.08524, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 689, + 505, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 505, + 723 + ], + "type": "text", + "content": "[707] Huajie Tan, Yuheng Ji, Xiaoshuai Hao, Minglan Lin, Pengwei Wang, Zhongyuan Wang, and Shanghang Zhang. Reason-rft: Reinforcement fine-tuning for visual reasoning. arXiv preprint arXiv:2503.20752, 2025." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "81" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 80 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 508, + 723 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 506, + 140 + ], + "type": "text", + "content": "[708] Juanhe (TJ) Tan. Causal abstraction for chain-of-thought reasoning in arithmetic word problems. In Yonatan Belinkov, Sophie Hao, Jaap Jumelet, Najoung Kim, Arya McCarthy, and Hosein Mohebbi, editors, Proceedings of the 6th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP, pages 155–168, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.blackboxnlp-1.12. URL https://aclanthology.org/2023.blackboxnlp-1.12." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 142, + 506, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 142, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 111, + 142, + 506, + 176 + ], + "type": "text", + "content": "[709] Sijun Tan, Siyuan Zhuang, Kyle Montgomery, William Y Tang, Alejandro Cuadron, Chenguang Wang, Raluca Ada Popa, and Ion Stoica. Judgebench: A benchmark for evaluating llm-based judges. arXiv preprint arXiv:2410.12784, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 178, + 506, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 178, + 506, + 223 + ], + "spans": [ + { + "bbox": [ + 111, + 178, + 506, + 223 + ], + "type": "text", + "content": "[710] Xiaoyu Tan, Tianchu Yao, Chao Qu, Bin Li, Minghao Yang, Dakuan Lu, Haozhe Wang, Xihe Qiu, Wei Chu, Yinghui Xu, et al. Aurora: Automated training framework of universal process reward models via ensemble prompting and reverse verification. arXiv preprint arXiv:2502.11520, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 226, + 504, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 226, + 504, + 261 + ], + "spans": [ + { + "bbox": [ + 111, + 226, + 504, + 261 + ], + "type": "text", + "content": "[711] Kexian Tang, Junyao Gao, Yanhong Zeng, Haodong Duan, Yanan Sun, Zhening Xing, Wenran Liu, Kaifeng Lyu, and Kai Chen. Lego-puzzles: How good are mllms at multi-step spatial reasoning? arXiv preprint arXiv:2503.19990, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 263, + 506, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 263, + 506, + 297 + ], + "spans": [ + { + "bbox": [ + 111, + 263, + 506, + 297 + ], + "type": "text", + "content": "[712] Yihong Tang, Kehai Chen, Muyun Yang, Zhengyu Niu, Jing Li, Tiejun Zhao, and Min Zhang. Thinking in character: Advancing role-playing agents with role-aware reasoning. arXiv preprint arXiv:2506.01748, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 300, + 504, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 300, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 111, + 300, + 504, + 335 + ], + "type": "text", + "content": "[713] Zhengyang Tang, Ziniu Li, Zhenyang Xiao, Tian Ding, Ruoyu Sun, Benyou Wang, Dayiheng Liu, Fei Huang, Tianyu Liu, Bowen Yu, et al. Enabling scalable oversight via self-evolving critic. arXiv preprint arXiv:2501.05727, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 337, + 504, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 337, + 504, + 371 + ], + "spans": [ + { + "bbox": [ + 111, + 337, + 504, + 371 + ], + "type": "text", + "content": "[714] Zhengyang Tang, Ziniu Li, Zhenyang Xiao, Tian Ding, Ruoyu Sun, Benyou Wang, Dayiheng Liu, Fei Huang, Tianyu Liu, Bowen Yu, et al. Realcritic: Towards effectiveness-driven evaluation of language model critiques. arXiv preprint arXiv:2501.14492, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 374, + 504, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 374, + 504, + 407 + ], + "spans": [ + { + "bbox": [ + 111, + 374, + 504, + 407 + ], + "type": "text", + "content": "[715] Sree Harsha Tanneru, Dan Ley, Chirag Agarwal, and Himabindu Lakkaraju. On the hardness of faithful chain-of-thought reasoning in large language models. arXiv preprint arXiv:2406.10625, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 411, + 506, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 411, + 506, + 444 + ], + "spans": [ + { + "bbox": [ + 111, + 411, + 506, + 444 + ], + "type": "text", + "content": "[716] Amir Taubenfeld, Tom Sheffer, Eran Ofek, Amir Feder, Ariel Goldstein, Zorik Gekhman, and Gal Yona. Confidence improves self-consistency in llms. arXiv preprint arXiv:2502.06233, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 447, + 508, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 447, + 508, + 471 + ], + "spans": [ + { + "bbox": [ + 111, + 447, + 508, + 471 + ], + "type": "text", + "content": "[717] DolphinR1 Team. Dolphin R1. https://huggingface.co/datasets/cognitivecomputations/dolphin-r1, February 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 474, + 506, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 474, + 506, + 497 + ], + "spans": [ + { + "bbox": [ + 111, + 474, + 506, + 497 + ], + "type": "text", + "content": "[718] Fancy-MLLM Team. R1 Onevision. https://huggingface.co/datasets/Fancy-MLLM/R1-Onevision, February 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 499, + 506, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 499, + 506, + 534 + ], + "spans": [ + { + "bbox": [ + 111, + 499, + 506, + 534 + ], + "type": "text", + "content": "[719] Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 536, + 506, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 536, + 506, + 581 + ], + "spans": [ + { + "bbox": [ + 111, + 536, + 506, + 581 + ], + "type": "text", + "content": "[720] Gemma Team, Morgane Riviere, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, Léonard Hussenot, Thomas Mesnard, Bobak Shahriari, Alexandre Ramé, et al. Gemma 2: Improving open language models at a practical size. arXiv preprint arXiv:2408.00118, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 584, + 479, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 584, + 479, + 597 + ], + "spans": [ + { + "bbox": [ + 111, + 584, + 479, + 597 + ], + "type": "text", + "content": "[721] Huggingface Team. Open r1. https://github.com/huggingface/open-r1, January 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 600, + 506, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 600, + 506, + 633 + ], + "spans": [ + { + "bbox": [ + 111, + 600, + 506, + 633 + ], + "type": "text", + "content": "[722] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 636, + 506, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 636, + 506, + 670 + ], + "spans": [ + { + "bbox": [ + 111, + 636, + 506, + 670 + ], + "type": "text", + "content": "[723] NovaSky Team. Think less, achieve more: Cut reasoning costs by " + }, + { + "bbox": [ + 111, + 636, + 506, + 670 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 111, + 636, + 506, + 670 + ], + "type": "text", + "content": " without sacrificing accuracy. https://novasky-ai.github.io/posts/reduce-overthinking, January 2025. Accessed: 2025-01-23." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 673, + 506, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 673, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 111, + 673, + 506, + 696 + ], + "type": "text", + "content": "[724] NovaSky Team. Sky-t1: Train your own o1 preview model within $ 450. https://novaskyai.github.io/posts/sky-t1, January 2025. Accessed: 2025-01-09." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 111, + 700, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 700, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 700, + 506, + 723 + ], + "type": "text", + "content": "[725] NVIDIA Team. Mistral-nemo-12b-instruct. https://huggingface.co/nvidia/Mistral-NeMo-12B-Instruct, July 2024." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "82" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 81 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "type": "text", + "content": "[726] OpenDeepResearch Team. Open deep research. https://github.com/nickscamara/open-deepresearch, February 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 98, + 493, + 111 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 98, + 493, + 111 + ], + "spans": [ + { + "bbox": [ + 111, + 98, + 493, + 111 + ], + "type": "text", + "content": "[727] OpenO1 Team. Open o1. https://github.com/Open-Source-O1/Open-O1, February 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 113, + 506, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 113, + 506, + 136 + ], + "spans": [ + { + "bbox": [ + 111, + 113, + 506, + 136 + ], + "type": "text", + "content": "[728] OpenR1 Team. Open r1 math 200k. https://huggingface.co/datasets/open-r1/OpenR1-Math-220k, February 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 138, + 452, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 138, + 452, + 152 + ], + "spans": [ + { + "bbox": [ + 111, + 138, + 452, + 152 + ], + "type": "text", + "content": "[729] OpenThoughts Team. Open Thoughts. https://open-thoughts.ai, January 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 153, + 506, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 153, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 111, + 153, + 506, + 176 + ], + "type": "text", + "content": "[730] PowerInfer Team. QwQ LongCoT 500k. https://huggingface.co/datasets/PowerInfer/QWQ-LONGCOT-500K, January 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 178, + 506, + 202 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 178, + 506, + 202 + ], + "spans": [ + { + "bbox": [ + 111, + 178, + 506, + 202 + ], + "type": "text", + "content": "[731] QwQ Team. Qwq: Reflect deeply on the boundaries of the unknown. https://qwenlm.github.io/blog/qwq-32b-preview/, November 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 205, + 429, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 205, + 429, + 217 + ], + "spans": [ + { + "bbox": [ + 111, + 205, + 429, + 217 + ], + "type": "text", + "content": "[732] X-R1 Team. X-r1. https://github.com/dhcode-cpp/X-R1, February 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 220, + 505, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 220, + 505, + 243 + ], + "spans": [ + { + "bbox": [ + 111, + 220, + 505, + 243 + ], + "type": "text", + "content": "[733] Fengwei Teng, Zhaoyang Yu, Quan Shi, Jiayi Zhang, Chenglin Wu, and Yuyu Luo. Atom of thoughts for markov ltm test-time scaling. arXiv preprint arXiv:2502.12018, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 245, + 505, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 245, + 505, + 280 + ], + "spans": [ + { + "bbox": [ + 111, + 245, + 505, + 280 + ], + "type": "text", + "content": "[734] Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, et al. Llamav-o1: Rethinking step-by-step visual reasoning in llms. arXiv preprint arXiv:2501.06186, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 282, + 505, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 282, + 505, + 316 + ], + "spans": [ + { + "bbox": [ + 111, + 282, + 505, + 316 + ], + "type": "text", + "content": "[735] George Thomas, Alex J Chan, Jikun Kang, Wenqi Wu, Filippos Christianos, Fraser Greenlee, Andy Toulis, and Marvin Purtorab. Webgames: Challenging general-purpose web-browsing ai agents. arXiv preprint arXiv:2502.18356, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 319, + 505, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 319, + 505, + 352 + ], + "spans": [ + { + "bbox": [ + 111, + 319, + 505, + 352 + ], + "type": "text", + "content": "[736] Xiaoyu Tian, Sitong Zhao, Haotian Wang, Shuaiang Chen, Yunjie Ji, Yiping Peng, Han Zhao, and Xiangang Li. Think twice: Enhancing lIm reasoning by scaling multi-round test-time thinking. arXiv preprint arXiv:2503.19855, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 355, + 506, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 355, + 506, + 433 + ], + "spans": [ + { + "bbox": [ + 111, + 355, + 506, + 433 + ], + "type": "text", + "content": "[737] Ye Tian, Baolin Peng, Linfeng Song, Lifeng Jin, Dian Yu, Lei Han, Haitao Mi, and Dong Yu. Toward self-improvement of llms via imagination, searching, and criticizing. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 52723-52748. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/5e5853f35164e434015716a8c2a66543-Paper-Conference.pdf." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 436, + 506, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 436, + 506, + 502 + ], + "spans": [ + { + "bbox": [ + 111, + 436, + 506, + 502 + ], + "type": "text", + "content": "[738] Yuxuan Tong, Xiwen Zhang, Rui Wang, Ruidong Wu, and Junxian He. Dart-math: Difficulty-aware rejection tuning for mathematical problem-solving. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 7821-7846. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/0ef1afa0daa888d695dcd5e9513bafa3-Paper-Conference.pdf." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 505, + 505, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 505, + 505, + 540 + ], + "spans": [ + { + "bbox": [ + 111, + 505, + 505, + 540 + ], + "type": "text", + "content": "[739] Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. Openmathinstruct-2: Accelerating ai for math with massive open-source instruction data. arXiv preprint arXiv:2410.01560, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 542, + 505, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 542, + 505, + 576 + ], + "spans": [ + { + "bbox": [ + 111, + 542, + 505, + 576 + ], + "type": "text", + "content": "[740] Shubham Toshniwal, Wei Du, Ivan Moshkov, Branislav Kisacanin, Alexan Ayrapetyan, and Igor Gitman. Openmathinstruct-2: Accelerating ai for math with massive open-source instruction data. arXiv preprint arXiv:2410.01560, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 578, + 505, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 578, + 505, + 612 + ], + "spans": [ + { + "bbox": [ + 111, + 578, + 505, + 612 + ], + "type": "text", + "content": "[741] Shubham Toshniwal, Ivan Moshkov, Sean Naresthiran, Daria Gitman, Fei Jia, and Igor Gitman. Openmathinstruct-1: A 1.8 million math instruction tuning dataset. arXiv preprint arXiv: Arxiv-2402.10176, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 615, + 505, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 615, + 505, + 649 + ], + "spans": [ + { + "bbox": [ + 111, + 615, + 505, + 649 + ], + "type": "text", + "content": "[742] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 111, + 652, + 505, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 652, + 505, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 652, + 505, + 685 + ], + "type": "text", + "content": "[743] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 111, + 689, + 505, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 505, + 721 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 505, + 721 + ], + "type": "text", + "content": "[744] Christoph Treude and Raula Gaikovina Kula. Interacting with ai reasoning models: Harnessing \"thoughts\" for ai-driven software engineering. arXiv preprint arXiv:2503.00483, 2025." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "83" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 82 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 506, + 139 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 506, + 139 + ], + "type": "text", + "content": "[745] Luong Trung, Xinbo Zhang, Zhanming Jie, Peng Sun, Xiaoran Jin, and Hang Li. ReFT: Reasoning with reinforced fine-tuning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 7601–7614, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.410. URL https://aclanthology.org/2024.acl-long.410/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 141, + 505, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 141, + 505, + 176 + ], + "spans": [ + { + "bbox": [ + 111, + 141, + 505, + 176 + ], + "type": "text", + "content": "[746] Songjun Tu, Jiahao Lin, Qichao Zhang, Xiangyu Tian, Linjing Li, Xiangyuan Lan, and Dongbin Zhao. Learning when to think: Shaping adaptive reasoning in r1-style models via multi-stage rl. arXiv preprint arXiv:2505.10832, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 178, + 504, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 178, + 504, + 201 + ], + "spans": [ + { + "bbox": [ + 111, + 178, + 504, + 201 + ], + "type": "text", + "content": "[747] Benjamin Turtel, Danny Franklin, and Philipp Schoenegger. Llms can teach themselves to better predict the future. arXiv preprint arXiv:2502.05253, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 205, + 506, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 205, + 506, + 239 + ], + "spans": [ + { + "bbox": [ + 111, + 205, + 506, + 239 + ], + "type": "text", + "content": "[748] Martin Tutek, Fateme Hashemi Chaleshtori, Ana Marasović, and Yonatan Belinkov. Measuring faithfulness of chains of thought by unlearning reasoning steps. arXiv preprint arXiv:2502.14829, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 241, + 505, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 241, + 505, + 277 + ], + "spans": [ + { + "bbox": [ + 111, + 241, + 505, + 277 + ], + "type": "text", + "content": "[749] Jonathan Uesato, Nate Kushner, Ramana Kumar, Francis Song, Noah Siegel, Lisa Wang, Antonia Creswell, Geoffrey Irving, and Irina Higgins. Solving math word problems with process- and outcome-based feedback. arXiv preprint arXiv:2211.14275, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 278, + 505, + 313 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 278, + 505, + 313 + ], + "spans": [ + { + "bbox": [ + 111, + 278, + 505, + 313 + ], + "type": "text", + "content": "[750] Robert Vacareanu, Anurag Pratik, Evangelia Spiliopoulou, Zheng Qi, Giovanni Paolini, Neha Anna John, Jie Ma, Yassine Benajiba, and Miguel Ballesteros. General purpose verification for chain of thought prompting. arXiv preprint arXiv:2405.00204, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 316, + 505, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 316, + 505, + 360 + ], + "spans": [ + { + "bbox": [ + 111, + 316, + 505, + 360 + ], + "type": "text", + "content": "[751] Karthik Valmeekam, Kaya Stechly, and Subbarao Kambhampati. LLMs still can't plan; can LRMs? a preliminary evaluation of openAI's o1 on planbench. In NeurIPS 2024 Workshop on Open-World Agents, October 2024. URL https://openreview.net/forum?id=Gcr1Lx4Koz." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 363, + 506, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 363, + 506, + 397 + ], + "spans": [ + { + "bbox": [ + 111, + 363, + 506, + 397 + ], + "type": "text", + "content": "[752] Jean Vassoyan, Nathanaël Beau, and Roman Plaud. Ignore the kl penalty! boosting exploration on critical tokens to enhance rl fine-tuning. arXiv preprint arXiv:2502.06533, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 400, + 506, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 400, + 506, + 479 + ], + "spans": [ + { + "bbox": [ + 111, + 400, + 506, + 479 + ], + "type": "text", + "content": "[753] Tu Vu, Kalpesh Krishna, Salaheddin Alzubi, Chris Tar, Manaal Faruqui, and Yun-Hsuan Sung. Foundational autorators: Taming large language models for better automatic evaluation. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 17086-17105, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10. 18653/v1/2024.emnlp-main.949. URL https://aclanthology.org/2024.emnlp-main.949/." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 481, + 506, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 481, + 506, + 515 + ], + "spans": [ + { + "bbox": [ + 111, + 481, + 506, + 515 + ], + "type": "text", + "content": "[754] Guangya Wan, Yuqi Wu, Jie Chen, and Sheng Li. Cot rerailer: Enhancing the reliability of large language models in complex reasoning tasks through error detection and correction. arXiv preprint arXiv:2408.13940, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 518, + 506, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 518, + 506, + 564 + ], + "spans": [ + { + "bbox": [ + 111, + 518, + 506, + 564 + ], + "type": "text", + "content": "[755] Ziyu Wan, Xidong Feng, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. Alphazero-like tree-search can guide large language model decoding and training. In *Forty-first International Conference on Machine Learning*, May 2024. URL https://openreview.net/forum?id=C4OpREezgj." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 567, + 506, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 567, + 506, + 601 + ], + "spans": [ + { + "bbox": [ + 111, + 567, + 506, + 601 + ], + "type": "text", + "content": "[756] Ziyu Wan, Yunxiang Li, Yan Song, Hanjing Wang, Linyi Yang, Mark Schmidt, Jun Wang, Weinan Zhang, Shuyue Hu, and Ying Wen. Rema: Learning to meta-think for llms with multi-agent reinforcement learning. arXiv preprint arXiv:2503.09501, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 603, + 504, + 627 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 603, + 504, + 627 + ], + "spans": [ + { + "bbox": [ + 111, + 603, + 504, + 627 + ], + "type": "text", + "content": "[757] Ante Wang, Linfeng Song, Ye Tian, Baolin Peng, Dian Yu, Haitao Mi, Jinsong Su, and Dong Yu. Litesearch: Efficacious tree search for lIm. arXiv preprint arXiv:2407.00320, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 629, + 506, + 664 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 629, + 506, + 664 + ], + "spans": [ + { + "bbox": [ + 111, + 629, + 506, + 664 + ], + "type": "text", + "content": "[758] Ante Wang, Linfeng Song, Ye Tian, Dian Yu, Haitao Mi, Xiangyu Duan, Zhaopeng Tu, Jinsong Su, and Dong Yu. Don't get lost in the trees: Streamlining llm reasoning by overcoming tree search exploration pitfalls. arXiv preprint arXiv:2502.11183, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 666, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 666, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 666, + 506, + 723 + ], + "type": "text", + "content": "[759] Boshi Wang, Sewon Min, Xiang Deng, Jiaming Shen, You Wu, Luke Zettlemoyer, and Huan Sun. Towards understanding chain-of-thought prompting: An empirical study of what matters. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2717–2739, Toronto, Canada, July 2023. Association for Computational" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "84" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 83 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 136, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 136, + 72, + 505, + 95 + ], + "type": "text", + "content": "Linguistics. doi: 10.18653/v1/2023.acl-long.153. URL https://aclanthology.org/2023.acl-long.153/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 99, + 506, + 132 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 99, + 506, + 132 + ], + "spans": [ + { + "bbox": [ + 111, + 99, + 506, + 132 + ], + "type": "text", + "content": "[760] Chao Wang, Luning Zhang, Zheng Wang, and Yang Zhou. Can large language models unveil the mysteries? an exploration of their ability to unlock information in complex scenarios. arXiv preprint arXiv:2502.19973, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 135, + 506, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 135, + 506, + 168 + ], + "spans": [ + { + "bbox": [ + 111, + 135, + 506, + 168 + ], + "type": "text", + "content": "[761] Chaojie Wang, Yanchen Deng, Zhiyi Lyu, Liang Zeng, Jujie He, Shuicheng Yan, and Bo An. Q*: Improving multi-step reasoning for llms with deliberative planning. arXiv preprint arXiv:2406.14283, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 172, + 504, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 172, + 504, + 206 + ], + "spans": [ + { + "bbox": [ + 111, + 172, + 504, + 206 + ], + "type": "text", + "content": "[762] Chenlong Wang, Yuanning Feng, Dongping Chen, Zhaoyang Chu, Ranjay Krishna, and Tianyi Zhou. Wait, we don't need to\" wait!! removing thinking tokens improves reasoning efficiency. arXiv preprint arXiv:2506.08343, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 209, + 504, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 209, + 504, + 244 + ], + "spans": [ + { + "bbox": [ + 111, + 209, + 504, + 244 + ], + "type": "text", + "content": "[763] Clinton J Wang, Dean Lee, Cristina Menghini, Johannes Mols, Jack Doughty, Adam Khoja, Jayson Lynch, Sean Hendryx, Summer Yue, and Dan Hendrycks. Enigmaeval: A benchmark of long multimodal reasoning challenges. arXiv preprint arXiv:2502.08859, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 247, + 504, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 247, + 504, + 270 + ], + "spans": [ + { + "bbox": [ + 111, + 247, + 504, + 270 + ], + "type": "text", + "content": "[764] Danqing Wang, Zhuorui Ye, Fei Fang, and Lei Li. Cooperative strategic planning enhances reasoning capabilities in large language models. arXiv preprint arXiv:2410.20007, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 273, + 506, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 273, + 506, + 328 + ], + "spans": [ + { + "bbox": [ + 111, + 273, + 506, + 328 + ], + "type": "text", + "content": "[765] Evan Z Wang, Federico Cassano, Catherine Wu, Yunfeng Bai, William Song, Vaskar Nath, Ziwen Han, Sean M. Hendryx, Summer Yue, and Hugh Zhang. Planning in natural language improves LLM search for code generation. In The First Workshop on System-2 Reasoning at Scale, NeurIPS'24, October 2024. URL https://openreview.net/forum?id=B2iSfPNj49." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 331, + 506, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 331, + 506, + 376 + ], + "spans": [ + { + "bbox": [ + 111, + 331, + 506, + 376 + ], + "type": "text", + "content": "[766] Guoxin Wang, Minyu Gao, Shuai Yang, Ya Zhang, Lizhi He, Liang Huang, Hanlin Xiao, Yexuan Zhang, Wanyue Li, Lu Chen, et al. Citrus: Leveraging expert cognitive pathways in a medical language model for advanced medical decision support. arXiv preprint arXiv:2502.18274, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 380, + 504, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 380, + 504, + 423 + ], + "spans": [ + { + "bbox": [ + 111, + 380, + 504, + 423 + ], + "type": "text", + "content": "[767] Hanbin Wang, Xiaoxuan Zhou, Zhipeng Xu, Keyuan Cheng, Yuxin Zuo, Kai Tian, Jingwei Song, Junting Lu, Wenhui Hu, and Xueyang Liu. Code-vision: Evaluating multimodal llms logic understanding and code generation capabilities. arXiv preprint arXiv:2502.11829, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 427, + 504, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 427, + 504, + 452 + ], + "spans": [ + { + "bbox": [ + 111, + 427, + 504, + 452 + ], + "type": "text", + "content": "[768] Hanlin Wang, Jian Wang, Chak Tou Leong, and Wenjie Li. Steca: Step-level trajectory calibration for lIm agent learning. arXiv preprint arXiv:2502.14276, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 454, + 504, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 454, + 504, + 489 + ], + "spans": [ + { + "bbox": [ + 111, + 454, + 504, + 489 + ], + "type": "text", + "content": "[769] Hanyin Wang, Zhenbang Wu, Gururaj Kolar, Hariprasad Korsapati, Brian Bartlett, Bryan Hull, and Jimeng Sun. Reinforcement learning for out-of-distribution reasoning in llms: An empirical study on diagnosis-related group coding. arXiv preprint arXiv:2505.21908, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 491, + 504, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 491, + 504, + 525 + ], + "spans": [ + { + "bbox": [ + 111, + 491, + 504, + 525 + ], + "type": "text", + "content": "[770] Hao Wang, Boyi Liu, Yufeng Zhang, and Jie Chen. Seed-cts: Unleashing the power of tree search for superior performance in competitive coding tasks. arXiv preprint arXiv:2412.12544, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 529, + 506, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 529, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 111, + 529, + 506, + 596 + ], + "type": "text", + "content": "[771] Haoxiang Wang, Wei Xiong, Tengyang Xie, Han Zhao, and Tong Zhang. Interpretable preferences via multi-objective reward modeling and mixture-of-experts. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 10582-10592, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.620. URL https://aclanthology.org/2024/findings-emnlp.620/." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 599, + 506, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 599, + 506, + 632 + ], + "spans": [ + { + "bbox": [ + 111, + 599, + 506, + 632 + ], + "type": "text", + "content": "[772] Haoyu Wang, Zeyu Qin, Li Shen, Xueqian Wang, Minhao Cheng, and Dacheng Tao. Leveraging reasoning with guidelines to elicit and utilize knowledge for enhancing safety alignment. arXiv preprint arXiv:2502.04040, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 635, + 506, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 635, + 506, + 669 + ], + "spans": [ + { + "bbox": [ + 111, + 635, + 506, + 669 + ], + "type": "text", + "content": "[773] Huaijie Wang, Shibo Hao, Hanze Dong, Shenao Zhang, Yilin Bao, Ziran Yang, and Yi Wu. Offline reinforcement learning for llm multi-step reasoning. arXiv preprint arXiv:2412.16145, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 673, + 504, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 673, + 504, + 696 + ], + "spans": [ + { + "bbox": [ + 111, + 673, + 504, + 696 + ], + "type": "text", + "content": "[774] Jiaan Wang, Fandong Meng, Yunlong Liang, and Jie Zhou. Drt-o1: Optimized deep reasoning translation via long chain-of-thought. arXiv preprint arXiv:2412.17498, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 699, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 699, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 699, + 504, + 723 + ], + "type": "text", + "content": "[775] Jiaan Wang, Fandong Meng, and Jie Zhou. Extrans: Multilingual deep reasoning translation via exemplar-enhanced reinforcement learning. arXiv preprint arXiv:2505.12996, 2025." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "85" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 84 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 117 + ], + "type": "text", + "content": "[776] Jiaqi WANG, Yuhang Zhou, Zhixiong Zhang, Qiguang Chen, Yongqiang Chen, and James Cheng. DivIL: Unveiling and addressing over-invariance for out-of-distribution generalization. Transactions on Machine Learning Research, February 2025. ISSN 2835-8856. URL https://openreview.net/forum?id=2Zan4ATYsh." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 119, + 504, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 119, + 504, + 153 + ], + "spans": [ + { + "bbox": [ + 111, + 119, + 504, + 153 + ], + "type": "text", + "content": "[777] Jun Wang, Meng Fang, Ziyu Wan, Muning Wen, Jiachen Zhu, Anjie Liu, Ziqin Gong, Yan Song, Lei Chen, Lionel M Ni, et al. Openr: An open source framework for advanced reasoning with large language models. arXiv preprint arXiv:2410.09671, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 156, + 504, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 156, + 504, + 178 + ], + "spans": [ + { + "bbox": [ + 111, + 156, + 504, + 178 + ], + "type": "text", + "content": "[778] Junlin Wang, Jue Wang, Ben Athiwaratkun, Ce Zhang, and James Zou. Mixture-of-agents enhances large language model capabilities. arXiv preprint arXiv:2406.04692, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 182, + 504, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 182, + 504, + 214 + ], + "spans": [ + { + "bbox": [ + 111, + 182, + 504, + 214 + ], + "type": "text", + "content": "[779] Junxiong Wang, Wen-Ding Li, Daniele Paliotta, Daniel Ritter, Alexander M Rush, and Tri Dao. M1: Towards scalable test-time compute with mamba reasoning models. arXiv preprint arXiv:2504.10449, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 217, + 505, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 217, + 505, + 251 + ], + "spans": [ + { + "bbox": [ + 111, + 217, + 505, + 251 + ], + "type": "text", + "content": "[780] Junyang Wang, Haiyang Xu, Xi Zhang, Ming Yan, Ji Zhang, Fei Huang, and Jitao Sang. Mobile-agent-v: Learning mobile device operation through video-guided multi-agent collaboration. arXiv preprint arXiv:2502.17110, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 255, + 504, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 255, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 111, + 255, + 504, + 289 + ], + "type": "text", + "content": "[781] Ke Wang, Houxing Ren, Aojun Zhou, Zimu Lu, Sichun Luo, Weikang Shi, Renrui Zhang, Linqi Song, Mingjie Zhan, and Hongsheng Li. Mathcoder: Seamless code integration in llms for enhanced mathematical reasoning. arXiv preprint arXiv:2310.03731, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 291, + 505, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 291, + 505, + 346 + ], + "spans": [ + { + "bbox": [ + 111, + 291, + 505, + 346 + ], + "type": "text", + "content": "[782] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Houxing Ren, Aojun Zhou, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with MATH-vision dataset. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=QWTCcxMpPA." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 350, + 505, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 350, + 505, + 405 + ], + "spans": [ + { + "bbox": [ + 111, + 350, + 505, + 405 + ], + "type": "text", + "content": "[783] Ke Wang, Houxing Ren, Aojun Zhou, Zimu Lu, Sichun Luo, Weikang Shi, Renrui Zhang, Linqi Song, Mingjie Zhan, and Hongsheng Li. Mathcoder: Seamless code integration in LLMs for enhanced mathematical reasoning. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=z8TW0ttBPp." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 407, + 505, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 407, + 505, + 441 + ], + "spans": [ + { + "bbox": [ + 111, + 407, + 505, + 441 + ], + "type": "text", + "content": "[784] Kevin Wang, Junbo Li, Neel P Bhatt, Yihan Xi, Qiang Liu, Ufuk Topcu, and Zhangyang Wang. On the planning abilities of openai's o1 models: Feasibility, optimality, and generalizability. arXiv preprint arXiv:2409.19924, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 445, + 504, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 445, + 504, + 478 + ], + "spans": [ + { + "bbox": [ + 111, + 445, + 504, + 478 + ], + "type": "text", + "content": "[785] Kun Wang, Guibin Zhang, Zhenhong Zhou, Jiahao Wu, Miao Yu, Shiqian Zhao, Chenlong Yin, Jinhu Fu, Yibo Yan, Hanjun Luo, et al. A comprehensive survey in llm (-agent) full stack safety: Data, training and deployment. arXiv preprint arXiv:2504.15585, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 480, + 505, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 480, + 505, + 504 + ], + "spans": [ + { + "bbox": [ + 111, + 480, + 505, + 504 + ], + "type": "text", + "content": "[786] Liang Wang, Haonan Chen, Nan Yang, Xiaolong Huang, Zhicheng Dou, and Furu Wei. Chain-of-retrieval augmented generation. arXiv preprint arXiv:2501.14342, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 506, + 504, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 506, + 504, + 528 + ], + "spans": [ + { + "bbox": [ + 111, + 506, + 504, + 528 + ], + "type": "text", + "content": "[787] Libo Wang. Dynamic chain-of-thought: Towards adaptive deep reasoning. arXiv preprint arXiv:2502.10428, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 532, + 505, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 532, + 505, + 576 + ], + "spans": [ + { + "bbox": [ + 111, + 532, + 505, + 576 + ], + "type": "text", + "content": "[788] Mengru Wang, Xingyu Chen, Yue Wang, Zhiwei He, Jiahao Xu, Tian Liang, Qizhhi Liu, Yunzhi Yao, Wenxuan Wang, Ruotian Ma, et al. Two experts are all you need for steering thinking: Reinforcing cognitive effort in moe reasoning models without additional training. arXiv preprint arXiv:2505.14681, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 579, + 505, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 579, + 505, + 613 + ], + "spans": [ + { + "bbox": [ + 111, + 579, + 505, + 613 + ], + "type": "text", + "content": "[789] Mingyang Wang, Lukas Lange, Heike Adel, Yunpu Ma, Jannik Strötgen, and Hinrich Schütze. Language mixing in reasoning language models: Patterns, impact, and internal causes. arXiv preprint arXiv:2505.14815, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 616, + 505, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 616, + 505, + 649 + ], + "spans": [ + { + "bbox": [ + 111, + 616, + 505, + 649 + ], + "type": "text", + "content": "[790] Minzheng Wang, Yongbin Li, Haobo Wang, Xinghua Zhang, Nan Xu, Bingli Wu, Fei Huang, Haiyang Yu, and Wenji Mao. Adaptive thinking via mode policy optimization for social language agents. arXiv preprint arXiv:2505.02156, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 652, + 504, + 675 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 652, + 504, + 675 + ], + "spans": [ + { + "bbox": [ + 111, + 652, + 504, + 675 + ], + "type": "text", + "content": "[791] Peifeng Wang, Austin Xu, Yilun Zhou, Caiming Xiong, and Shafiq Joty. Direct judgement preference optimization. arXiv preprint arXiv:2409.14664, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 677, + 505, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 677, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 677, + 505, + 723 + ], + "type": "text", + "content": "[792] Peiyi Wang, Lei Li, Zhihong Shao, Runxin Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. Math-shepherd: Verify and reinforce LLMs step-by-step without human annotations. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "86" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 85 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 136, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 136, + 72, + 505, + 106 + ], + "type": "text", + "content": "Papers), pages 9426-9439, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.510. URL https://aclanthology.org/2024.acl-long.510/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 110, + 506, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 110, + 506, + 143 + ], + "spans": [ + { + "bbox": [ + 111, + 110, + 506, + 143 + ], + "type": "text", + "content": "[793] Peng Wang, Xuesi Hu, Jiageng Wu, Yuntao Zou, Qiancheng Zhang, and Dagang Li. What factors affect llms and rllms in financial question answering? arXiv preprint arXiv:2507.08339, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 148, + 505, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 148, + 505, + 182 + ], + "spans": [ + { + "bbox": [ + 111, + 148, + 505, + 182 + ], + "type": "text", + "content": "[794] Peng Wang, Ruihan Tao, Qiguang Chen, Mengkang Hu, and Libo Qin. X-webagentbench: A multilingual interactive web benchmark for evaluating global agentic system. arXiv preprint arXiv:2505.15372, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 186, + 505, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 186, + 505, + 220 + ], + "spans": [ + { + "bbox": [ + 111, + 186, + 505, + 220 + ], + "type": "text", + "content": "[795] Peng-Yuan Wang, Tian-Shuo Liu, Chenyang Wang, Yi-Di Wang, Shu Yan, Cheng-Xing Jia, Xu-Hui Liu, Xin-Wei Chen, Jia-Cheng Xu, Ziniu Li, et al. A survey on large language models for mathematical reasoning. arXiv preprint arXiv:2506.08446, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 225, + 505, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 225, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 111, + 225, + 505, + 258 + ], + "type": "text", + "content": "[796] Ru Wang, Wei Huang, Selena Song, Haoyu Zhang, Yusuke Iwasawa, Yutaka Matsuo, and Jiaxian Guo. Beyond in-distribution success: Scaling curves of cot granularity for language model generalization. arXiv preprint arXiv:2502.18273, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 263, + 505, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 263, + 505, + 297 + ], + "spans": [ + { + "bbox": [ + 111, + 263, + 505, + 297 + ], + "type": "text", + "content": "[797] Ruida Wang, Rui Pan, Yuxin Li, Jipeng Zhang, Yizhen Jia, Shizhe Diao, Renjie Pi, Junjie Hu, and Tong Zhang. Ma-lot: Model-collaboration lean-based long chain-of-thought reasoning enhances formal theorem proving. arXiv preprint arXiv:2503.03205, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 301, + 506, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 301, + 506, + 369 + ], + "spans": [ + { + "bbox": [ + 111, + 301, + 506, + 369 + ], + "type": "text", + "content": "[798] Ruoyao Wang, Peter Jansen, Marc-Alexandre Côté, and Prithviraj Ammanabrolu. Science-World: Is your agent smarter than a 5th grader? In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang, editors, Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 11279–11298, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.775. URL https://aclanthology.org/2022.emnlp-main.775/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 373, + 505, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 373, + 505, + 396 + ], + "spans": [ + { + "bbox": [ + 111, + 373, + 505, + 396 + ], + "type": "text", + "content": "[799] Siyuan Wang, Enda Zhao, Zhongyu Wei, and Xiang Ren. Stepwise informativeness search for improving llm reasoning. arXiv preprint arXiv:2502.15335, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 399, + 505, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 399, + 505, + 433 + ], + "spans": [ + { + "bbox": [ + 111, + 399, + 505, + 433 + ], + "type": "text", + "content": "[800] Song Wang, Gongfan Fang, Lingdong Kong, Xiangtai Li, Jianyun Xu, Sheng Yang, Qiang Li, Jianke Zhu, and Xinchao Wang. Pixelthink: Towards efficient chain-of-pixel reasoning. arXiv preprint arXiv:2505.23727, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 437, + 505, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 437, + 505, + 460 + ], + "spans": [ + { + "bbox": [ + 111, + 437, + 505, + 460 + ], + "type": "text", + "content": "[801] Tianlong Wang, Junzhe Chen, Xueting Han, and Jing Bai. Cpl: Critical plan step learning boosts llm generalization in reasoning tasks. arXiv preprint arXiv:2409.08642, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 465, + 506, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 465, + 506, + 508 + ], + "spans": [ + { + "bbox": [ + 111, + 465, + 506, + 508 + ], + "type": "text", + "content": "[802] Tianlu Wang, Ping Yu, Xiaoqing Ellen Tan, Sean O'Brien, Ramakanth Pasunuru, Jane Dwivedi-Yu, Olga Golovneva, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. Shepherd: A critic for language model generation. arXiv preprint arXiv:2308.04592, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 514, + 506, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 514, + 506, + 548 + ], + "spans": [ + { + "bbox": [ + 111, + 514, + 506, + 548 + ], + "type": "text", + "content": "[803] Tianlu Wang, Ilia Kulikov, Olga Golovneva, Ping Yu, Weizhe Yuan, Jane Dwivedi-Yu, Richard Yuanzhe Pang, Maryam Fazel-Zarandi, Jason Weston, and Xian Li. Self-taught evaluators. arXiv preprint arXiv:2408.02666, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 552, + 506, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 552, + 506, + 585 + ], + "spans": [ + { + "bbox": [ + 111, + 552, + 506, + 585 + ], + "type": "text", + "content": "[804] Weixuan Wang, Minghao Wu, Barry Haddow, and Alexandra Birch. Demystifying multilingual chain-of-thought in process reward modeling. arXiv preprint arXiv:2502.12663, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 590, + 506, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 590, + 506, + 634 + ], + "spans": [ + { + "bbox": [ + 111, + 590, + 506, + 634 + ], + "type": "text", + "content": "[805] Weixun Wang, Shaopan Xiong, Gengru Chen, Wei Gao, Sheng Guo, Yancheng He, Ju Huang, Jiaheng Liu, Zhendong Li, Xiaoyang Li, et al. Reinforcement learning optimization for large-scale learning: An efficient and user-friendly scaling library. arXiv preprint arXiv:2506.06122, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 639, + 506, + 683 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 639, + 506, + 683 + ], + "spans": [ + { + "bbox": [ + 111, + 639, + 506, + 683 + ], + "type": "text", + "content": "[806] Weiyun Wang, Zhe Chen, Wenhai Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Jinguo Zhu, Xizhou Zhu, Lewei Lu, Yu Qiao, et al. Enhancing the reasoning ability of multimodal large language models via mixed preference optimization. arXiv preprint arXiv:2411.10442, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "type": "text", + "content": "[807] Weiyun Wang, Zhangwei Gao, Lianjie Chen, Zhe Chen, Jinguo Zhu, Xiangyu Zhao, Yangzhou Liu, Yue Cao, Shenglong Ye, Xizhou Zhu, et al. Visualprm: An effective process reward model for multimodal reasoning. arXiv preprint arXiv:2503.10291, 2025." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "87" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 86 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 722 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "text", + "content": "[808] Xiaoqiang Wang, Suyuchen Wang, Yun Zhu, and Bang Liu. System-1.5 reasoning: Traversal in language and latent spaces with dynamic shortcuts. arXiv preprint arXiv:2505.18962, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 109, + 504, + 132 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 109, + 504, + 132 + ], + "spans": [ + { + "bbox": [ + 111, + 109, + 504, + 132 + ], + "type": "text", + "content": "[809] Xiaoxuan Wang, Yihe Deng, Mingyu Derek Ma, and Wei Wang. Entropy-based adaptive weighting for self-training. arXiv preprint arXiv:2503.23913, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 135, + 504, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 135, + 504, + 167 + ], + "spans": [ + { + "bbox": [ + 111, + 135, + 504, + 167 + ], + "type": "text", + "content": "[810] Xinyi Wang, Lucas Caccia, Oleksiy Ostapenko, Xingdi Yuan, William Yang Wang, and Alessandro Sordoni. Guiding language model reasoning with planning tokens. arXiv preprint arXiv:2310.05707, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 171, + 506, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 171, + 506, + 249 + ], + "spans": [ + { + "bbox": [ + 111, + 171, + 506, + 249 + ], + "type": "text", + "content": "[811] Xinyi Wang, Alfonso Amayuelas, Kexun Zhang, Liangming Pan, Wenhu Chen, and William Yang Wang. Understanding reasoning ability of language models from the perspective of reasoning paths aggregation. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 50026-50042. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/wang24a.html." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 252, + 504, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 252, + 504, + 285 + ], + "spans": [ + { + "bbox": [ + 111, + 252, + 504, + 285 + ], + "type": "text", + "content": "[812] Xinyi Wang, Shawn Tan, Mingyu Jin, William Yang Wang, Rameswar Panda, and Yikang Shen. Do larger language models imply better reasoning? a pretraining scaling law for reasoning. arXiv preprint arXiv:2504.03635, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 289, + 504, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 289, + 504, + 332 + ], + "spans": [ + { + "bbox": [ + 111, + 289, + 504, + 332 + ], + "type": "text", + "content": "[813] Xiyao Wang, Jiuhai Chen, Zhaoyang Wang, Yuhang Zhou, Yiyang Zhou, Huaxiu Yao, Tianyi Zhou, Tom Goldstein, Parminder Bhatia, Furong Huang, et al. Enhancing visual-language modality alignment in large vision language models via self-improvement. arXiv preprint arXiv:2405.15973, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 336, + 504, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 336, + 504, + 370 + ], + "spans": [ + { + "bbox": [ + 111, + 336, + 504, + 370 + ], + "type": "text", + "content": "[814] Xiyao Wang, Linfeng Song, Ye Tian, Dian Yu, Baolin Peng, Haitao Mi, Furong Huang, and Dong Yu. Towards self-improvement of llms via mcts: Leveraging stepwise knowledge with curriculum preference learning. arXiv preprint arXiv:2410.06508, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 373, + 504, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 373, + 504, + 407 + ], + "spans": [ + { + "bbox": [ + 111, + 373, + 504, + 407 + ], + "type": "text", + "content": "[815] Xuezhi Wang and Denny Zhou. Chain-of-thought reasoning without prompting. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=4Zt7S0B0Jp." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 410, + 506, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 410, + 506, + 454 + ], + "spans": [ + { + "bbox": [ + 111, + 410, + 506, + 454 + ], + "type": "text", + "content": "[816] Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, February 2023. URL https://openreview.net/forum?id=1PL1NIMMrw." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 457, + 506, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 457, + 506, + 480 + ], + "spans": [ + { + "bbox": [ + 111, + 457, + 506, + 480 + ], + "type": "text", + "content": "[817] Yao Wang, Mingxuan Cui, and Arthur Jiang. Enabling ai scientists to recognize innovation: A domain-agnostic algorithm for assessing novelty. arXiv preprint arXiv:2503.01508, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 483, + 506, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 483, + 506, + 528 + ], + "spans": [ + { + "bbox": [ + 111, + 483, + 506, + 528 + ], + "type": "text", + "content": "[818] Yifei Wang, Yuyang Wu, Zeming Wei, Stefanie Jegelka, and Yisen Wang. A theoretical understanding of self-correction through in-context alignment. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=OtvNLTWYww." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 531, + 506, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 531, + 506, + 564 + ], + "spans": [ + { + "bbox": [ + 111, + 531, + 506, + 564 + ], + "type": "text", + "content": "[819] Yiqun Wang, Sile Hu, Yonggang Zhang, Xiang Tian, Xuesong Liu, Yaowu Chen, Xu Shen, and Jieping Ye. How large language models implement chain-of-thought? September 2023. URL https://openreview.net/pdf?id=b2XfOm3RJa." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 567, + 504, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 567, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 111, + 567, + 504, + 590 + ], + "type": "text", + "content": "[820] Yu Wang, Nan Yang, Liang Wang, and Furu Wei. Examining false positives under inference scaling for mathematical reasoning. arXiv preprint arXiv:2502.06217, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 594, + 506, + 659 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 594, + 506, + 659 + ], + "spans": [ + { + "bbox": [ + 111, + 594, + 506, + 659 + ], + "type": "text", + "content": "[821] Yubo Wang, Xueguang Ma, Ge Zhang, Yuansheng Ni, Abhranil Chandra, Shiguang Guo, Weiming Ren, Aaran Arulraj, Xuan He, Ziyan Jiang, Tianle Li, Max Ku, Kai Wang, Alex Zhuang, Rongqi Fan, Xiang Yue, and Wenhu Chen. MMLU-pro: A more robust and challenging multi-task language understanding benchmark. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=y10DM6R2r3." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 663, + 504, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 663, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 663, + 504, + 685 + ], + "type": "text", + "content": "[822] Yubo Wang, Xiang Yue, and Wenhu Chen. Critique fine-tuning: Learning to critique is more effective than learning to imitate. arXiv preprint arXiv:2501.17703, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 689, + 504, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 504, + 722 + ], + "type": "text", + "content": "[823] Yue Wang, Qiuzhi Liu, Jiahao Xu, Tian Liang, Xingyu Chen, Zhiwei He, Linfeng Song, Dian Yu, Juntao Li, Zhuosheng Zhang, et al. Thoughts are all over the place: On the underthinking of o1-like llms. arXiv preprint arXiv:2501.18585, 2025." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "88" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 87 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "text", + "content": "[824] Yuhang Wang, Youhe Jiang, Bin Cui, and Fangcheng Fu. Thinking short and right over thinking long: Serving lmm reasoning efficiently and accurately. arXiv preprint arXiv:2505.13326, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 108, + 506, + 131 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 108, + 506, + 131 + ], + "spans": [ + { + "bbox": [ + 111, + 108, + 506, + 131 + ], + "type": "text", + "content": "[825] Zengzhi Wang, Fan Zhou, Xuefeng Li, and Pengfei Liu. Octothinker: Mid-training incentivizes reinforcement learning scaling. arXiv preprint arXiv:2506.20512, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 133, + 506, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 133, + 506, + 178 + ], + "spans": [ + { + "bbox": [ + 111, + 133, + 506, + 178 + ], + "type": "text", + "content": "[826] Zhaoyang Wang, Weilei He, Zhiyuan Liang, Xuchao Zhang, Chetan Bansal, Ying Wei, Weitong Zhang, and Huaxiu Yao. Cream: Consistency regularized self-rewarding language models. In Neurips Safe Generative AI Workshop 2024, October 2024. URL https://openreview.net/forum?id=oaWajnM93y." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 180, + 506, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 180, + 506, + 214 + ], + "spans": [ + { + "bbox": [ + 111, + 180, + 506, + 214 + ], + "type": "text", + "content": "[827] Zhengren Wang, Jiayang Yu, Dongsheng Ma, Zhe Chen, Yu Wang, Zhiyu Li, Feiyu Xiong, Yanfeng Wang, Linpeng Tang, Wentao Zhang, et al. Rare: Retrieval-augmented reasoning modeling. arXiv preprint arXiv:2503.23513, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 216, + 506, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 216, + 506, + 250 + ], + "spans": [ + { + "bbox": [ + 111, + 216, + 506, + 250 + ], + "type": "text", + "content": "[828] Zhenhailong Wang, Haiyang Xu, Junyang Wang, Xi Zhang, Ming Yan, Ji Zhang, Fei Huang, and Heng Ji. Mobile-agent-e: Self-evolving mobile assistant for complex tasks. arXiv preprint arXiv:2501.11733, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 252, + 506, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 252, + 506, + 308 + ], + "spans": [ + { + "bbox": [ + 111, + 252, + 506, + 308 + ], + "type": "text", + "content": "[829] Zhilin Wang, Yi Dong, Olivier Delalleau, Jiaqi Zeng, Gerald Shen, Daniel Egert, Jimmy J. Zhang, Makes Narsimhan Sreedhar, and Oleksii Kuchaiev. Helpsteer 2: Open-source dataset for training top-performing reward models. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=PvVKUFhaNy." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 310, + 506, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 310, + 506, + 354 + ], + "spans": [ + { + "bbox": [ + 111, + 310, + 506, + 354 + ], + "type": "text", + "content": "[830] Zhongsheng Wang, Jiamou Liu, Qiming Bao, Hongfei Rong, and Jingfeng Zhang. Chatlogic: Integrating logic programming with large language models for multi-step reasoning. In Neuro-Symbolic Learning and Reasoning in the era of Large Language Models, December 2023. URL https://openreview.net/forum?id=AOqGF7Po7Z." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 357, + 506, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 357, + 506, + 390 + ], + "spans": [ + { + "bbox": [ + 111, + 357, + 506, + 390 + ], + "type": "text", + "content": "[831] Zihan Wang, Yunxuan Li, Yuexin Wu, Liangchen Luo, Le Hou, Hongkun Yu, and Jingbo Shang. Multi-step problem solving through a verifier: An empirical analysis on model-induced process supervision. arXiv preprint arXiv:2402.02658, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 393, + 506, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 393, + 506, + 426 + ], + "spans": [ + { + "bbox": [ + 111, + 393, + 506, + 426 + ], + "type": "text", + "content": "[832] Zixiao Wang, Yuxin Wang, Xiaorui Wang, Mengting Xing, Jie Gao, Jianjun Xu, Guangcan Liu, Chenhui Jin, Zhuo Wang, Shengzhuo Zhang, et al. Test-time scaling with reflective generative model. arXiv preprint arXiv:2507.01951, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 429, + 506, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 429, + 506, + 472 + ], + "spans": [ + { + "bbox": [ + 111, + 429, + 506, + 472 + ], + "type": "text", + "content": "[833] Anjiang Wei, Jiannan Cao, Ran Li, Hongyu Chen, Yuhui Zhang, Ziheng Wang, Yaofeng Sun, Yuan Liu, Thiago SFX Teixeira, Diyi Yang, et al. Equibench: Benchmarking code reasoning capabilities of large language models via equivalence checking. arXiv preprint arXiv:2502.12466, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 476, + 506, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 476, + 506, + 498 + ], + "spans": [ + { + "bbox": [ + 111, + 476, + 506, + 498 + ], + "type": "text", + "content": "[834] Hao Wei. Medthoughts-8k: A medical question answering dataset, feb 2025. URL https://huggingface.co/datasets/hw-hwei/MedThoughts-8K." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 501, + 506, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 501, + 506, + 534 + ], + "spans": [ + { + "bbox": [ + 111, + 501, + 506, + 534 + ], + "type": "text", + "content": "[835] Haoran Wei, Youyang Yin, Yumeng Li, Jia Wang, Liang Zhao, Jianjian Sun, Zheng Ge, and Xiangyu Zhang. Slow perception: Let's perceive geometric figures step-by-step. arXiv preprint arXiv:2412.20631, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 537, + 506, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 537, + 506, + 613 + ], + "spans": [ + { + "bbox": [ + 111, + 537, + 506, + 613 + ], + "type": "text", + "content": "[836] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed Chi, Quoc V Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh, editors, Advances in Neural Information Processing Systems, volume 35, pages 24824-24837. Curran Associates, Inc., November 2022. URL https://proceedings.neurips.cc/paper_files/paper/2022/file/9d5609613524ecf4f15af0f7b31abca4-Paper-Conference.pdf." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 616, + 506, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 616, + 506, + 650 + ], + "spans": [ + { + "bbox": [ + 111, + 616, + 506, + 650 + ], + "type": "text", + "content": "[837] Shuyue Wei, Yongxin Tong, Zimu Zhou, Yi Xu, Jingkai Gao, Tongyu Wei, Tianran He, and Weifeng Lv. Federated reasoning llms: a survey. Frontiers of Computer Science, 19(12): 1-23, jun 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 652, + 506, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 652, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 652, + 506, + 685 + ], + "type": "text", + "content": "[838] Ting-Ruen Wei, Haowei Liu, Xuyang Wu, and Yi Fang. A survey on feedback-based multi-step reasoning for large language models on mathematics. arXiv preprint arXiv:2502.14333, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 689, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 504, + 723 + ], + "type": "text", + "content": "[839] Yana Wei, Liang Zhao, Jianjian Sun, Kangheng Lin, Jisheng Yin, Jingcheng Hu, Yinmin Zhang, En Yu, Haoran Lv, Zejia Weng, et al. Open vision reasoner: Transferring linguistic cognitive behavior for visual reasoning. arXiv preprint arXiv:2507.05255, 2025." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "89" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 88 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 723 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "text", + "content": "[840] Yongxian Wei, Anke Tang, Li Shen, Zixuan Hu, Chun Yuan, and Xiaochun Cao. Modeling multi-task model merging as adaptive projective gradient descent. arXiv preprint arXiv:2501.01230, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 109, + 505, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 109, + 505, + 152 + ], + "spans": [ + { + "bbox": [ + 111, + 109, + 505, + 152 + ], + "type": "text", + "content": "[841] Yuxiang Wei, Olivier Duchenne, Jade Copet, Quentin Carbonneaux, Lingming Zhang, Daniel Fried, Gabriel Synnaeve, Rishabh Singh, and Sida I. Wang. Swe-rl: Advancing llm reasoning via reinforcement learning on open software evolution. arXiv preprint arXiv:2502.18449, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 156, + 505, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 156, + 505, + 201 + ], + "spans": [ + { + "bbox": [ + 111, + 156, + 505, + 201 + ], + "type": "text", + "content": "[842] Nathaniel Weir, Muhammad Khalifa, Linlu Qiu, Orion Weller, and Peter Clark. Learning to reason via program generation, emulation, and search. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=te6VagJf6G." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 204, + 505, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 204, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 111, + 204, + 505, + 258 + ], + "type": "text", + "content": "[843] Sean Welleck, Amanda Bertsch, Matthew Finlayson, Hailey Schoelkopf, Alex Xie, Graham Neubig, Ilia Kulikov, and Zaid Harchaoui. From decoding to meta-generation: Inference-time algorithms for large language models. Transactions on Machine Learning Research, November 2024. ISSN 2835-8856. URL https://openreview.net/forum?id= eskQMcIbMS. Survey Certification." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 262, + 505, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 262, + 505, + 295 + ], + "spans": [ + { + "bbox": [ + 111, + 262, + 505, + 295 + ], + "type": "text", + "content": "[844] Cheng Wen, Tingwei Guo, Shuaijiang Zhao, Wei Zou, and Xiangang Li. Sari: Structured audio reasoning via curriculum-guided reinforcement learning. arXiv preprint arXiv:2504.15900, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 298, + 505, + 343 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 298, + 505, + 343 + ], + "spans": [ + { + "bbox": [ + 111, + 298, + 505, + 343 + ], + "type": "text", + "content": "[845] Jiaxin Wen, Jian Guan, Hongning Wang, Wei Wu, and Minlie Huang. Codeplan: Unlocking reasoning potential in large language models by scaling code-form planning. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=dCPF1wlqj8." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 346, + 505, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 346, + 505, + 390 + ], + "spans": [ + { + "bbox": [ + 111, + 346, + 505, + 390 + ], + "type": "text", + "content": "[846] Kaiyue Wen, Huaqing Zhang, Hongzhou Lin, and Jingzhao Zhang. From sparse dependence to sparse attention: Unveiling how chain-of-thought enhances transformer sample efficiency. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=AmEgWDhmTr." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 393, + 505, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 393, + 505, + 437 + ], + "spans": [ + { + "bbox": [ + 111, + 393, + 505, + 437 + ], + "type": "text", + "content": "[847] Xumeng Wen, Zihan Liu, Shun Zheng, Zhijian Xu, Shengyu Ye, Zhirong Wu, Xiao Liang, Yang Wang, Junjie Li, Ziming Miao, et al. Reinforcement learning with verifiable rewards implicitly incentivizes correct reasoning in base llms. arXiv preprint arXiv:2506.14245, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 441, + 505, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 441, + 505, + 507 + ], + "spans": [ + { + "bbox": [ + 111, + 441, + 505, + 507 + ], + "type": "text", + "content": "[848] Yixuan Weng, Minjun Zhu, Fei Xia, Bin Li, Shizhu He, Shengping Liu, Bin Sun, Kang Liu, and Jun Zhao. Large language models are better reasoners with self-verification. In Houda Bouamor, Juan Pino, and Kalika Bali, editors, Findings of the Association for Computational Linguistics: EMNLP 2023, pages 2550–2575, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.167. URL https://aclanthology.org/2023-findings-emnlp.167/." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 510, + 505, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 510, + 505, + 533 + ], + "spans": [ + { + "bbox": [ + 111, + 510, + 505, + 533 + ], + "type": "text", + "content": "[849] Jason Weston and Sainbayar Sukhbaatar. System 2 attention (is something you might need too). arXiv preprint arXiv:2311.11829, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 536, + 505, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 536, + 505, + 602 + ], + "spans": [ + { + "bbox": [ + 111, + 536, + 505, + 602 + ], + "type": "text", + "content": "[850] Colin White, Samuel Dooley, Manley Roberts, Arka Pal, Benjamin Feuer, Siddhartha Jain, Ravid Shwartz-Ziv, Neel Jain, Khalid Saifullah, Sreemanti Dey, Shubh-Agrawal, Sandeep Singh Sandha, Siddartha Venkat Naidu, Chinmay Hegde, Yann LeCun, Tom Goldstein, Willie Neiswanger, and Micah Goldblum. Livebench: A challenging, contamination-limited LLM benchmark. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=sKYHBTAxVa." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 605, + 505, + 638 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 605, + 505, + 638 + ], + "spans": [ + { + "bbox": [ + 111, + 605, + 505, + 638 + ], + "type": "text", + "content": "[851] Yotam Wolf, Binyamin Rothberg, Dorin Shteyman, and Amnon Shashua. Compositional hardness of code in large language models—a probabilistic perspective. arXiv preprint arXiv:2409.18028, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 641, + 505, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 641, + 505, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 641, + 505, + 685 + ], + "type": "text", + "content": "[852] Chengyue Wu, Yixiao Ge, Qiushan Guo, Jiahao Wang, Zhixuan Liang, Zeyu Lu, Ying Shan, and Ping Luo. Plot2code: A comprehensive benchmark for evaluating multi-modal large language models in code generation from scientific plots. arXiv preprint arXiv:2405.07990, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 689, + 505, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 505, + 723 + ], + "type": "text", + "content": "[853] Jinyang Wu, Mingkuan Feng, Shuai Zhang, Feihu Che, Zengqi Wen, and Jianhua Tao. Beyond examples: High-level automated reasoning paradigm in in-context learning via mcts. arXiv preprint arXiv:2411.18478, 2024." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "90" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 89 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 722 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "text", + "content": "[854] Jinyang Wu, Mingkuan Feng, Shuai Zhang, Ruihan Jin, Feihu Che, Zengqi Wen, and Jianhua Tao. Boosting multimodal reasoning with mcts-automated structured thinking. arXiv preprint arXiv:2502.02339, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 109, + 506, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 109, + 506, + 142 + ], + "spans": [ + { + "bbox": [ + 111, + 109, + 506, + 142 + ], + "type": "text", + "content": "[855] Jinyang Wu, Chonghua Liao, Mingkuan Feng, Shuai Zhang, Zhengqi Wen, Pengpeng Shao, Huazhe Xu, and Jianhua Tao. Thought-augmented policy optimization: Bridging external guidance and internal capabilities. arXiv preprint arXiv:2505.15692, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 144, + 504, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 144, + 504, + 167 + ], + "spans": [ + { + "bbox": [ + 111, + 144, + 504, + 167 + ], + "type": "text", + "content": "[856] Junde Wu, Jiayuan Zhu, and Yuyuan Liu. Agentic reasoning: Reasoning llms with tools for the deep research. arXiv preprint arXiv:2502.04644, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 170, + 504, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 170, + 504, + 203 + ], + "spans": [ + { + "bbox": [ + 111, + 170, + 504, + 203 + ], + "type": "text", + "content": "[857] Qiong Wu, Xiangcong Yang, Yiyi Zhou, Chenxin Fang, Baiyang Song, Xiaoshuai Sun, and Rongrong Ji. Grounded chain-of-thought for multimodal large language models. arXiv preprint arXiv:2503.12799, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 206, + 504, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 206, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 111, + 206, + 504, + 239 + ], + "type": "text", + "content": "[858] Siwei Wu, Zhongyuan Peng, Xinrun Du, Tuney Zheng, Minghao Liu, Jialong Wu, Jiachen Ma, Yizhi Li, Jian Yang, Wangchunshu Zhou, et al. A comparative study on reasoning patterns of openai's o1 model. arXiv preprint arXiv:2410.13639, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 243, + 506, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 243, + 506, + 264 + ], + "spans": [ + { + "bbox": [ + 111, + 243, + 506, + 264 + ], + "type": "text", + "content": "[859] Siye Wu, Jian Xie, Yikai Zhang, Aili Chen, Kai Zhang, Yu Su, and Yanghua Xiao. Arm: Adaptive reasoning model. arXiv preprint arXiv:2505.20258, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 267, + 506, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 267, + 506, + 300 + ], + "spans": [ + { + "bbox": [ + 111, + 267, + 506, + 300 + ], + "type": "text", + "content": "[860] Tianhao Wu, Janice Lan, Weizhe Yuan, Jiantao Jiao, Jason Weston, and Sainbayar Sukhbaatar. Thinking llms: General instruction following with thought generation. arXiv preprint arXiv:2410.10630, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 303, + 504, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 303, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 111, + 303, + 504, + 335 + ], + "type": "text", + "content": "[861] Wenjie Wu, Yongcheng Jing, Yingjie Wang, Wenbin Hu, and Dacheng Tao. Graph-augmented reasoning: Evolving step-by-step knowledge graph retrieval for llm reasoning. arXiv preprint arXiv:2503.01642, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 339, + 504, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 339, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 111, + 339, + 504, + 361 + ], + "type": "text", + "content": "[862] Xiaobao Wu. Sailing by the stars: A survey on reward models and learning strategies for learning from rewards. arXiv preprint arXiv:2505.02686, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 365, + 506, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 365, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 111, + 365, + 506, + 407 + ], + "type": "text", + "content": "[863] Xiong Jun Wu, Zhenduo Zhang, ZuJie Wen, Zhiqiang Zhang, Wang Ren, Lei Shi, Cai Chen, Deng Zhao, Qing Wang, Xudong Han, et al. Sharp: Synthesizing high-quality aligned reasoning problems for large reasoning models reinforcement learning. arXiv preprint arXiv:2505.14147, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 411, + 504, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 411, + 504, + 445 + ], + "spans": [ + { + "bbox": [ + 111, + 411, + 504, + 445 + ], + "type": "text", + "content": "[864] Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models. arXiv preprint arXiv:2408.00724, January 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 448, + 506, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 448, + 506, + 480 + ], + "spans": [ + { + "bbox": [ + 111, + 448, + 506, + 480 + ], + "type": "text", + "content": "[865] Yifan Wu, Jingze Shi, Bingheng Wu, Jiayi Zhang, Xiaotian Lin, Nan Tang, and Yuyu Luo. Concise reasoning, big gains: Pruning long reasoning trace with difficulty-aware prompting. arXiv preprint arXiv:2505.19716, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 483, + 506, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 483, + 506, + 516 + ], + "spans": [ + { + "bbox": [ + 111, + 483, + 506, + 516 + ], + "type": "text", + "content": "[866] Yong Wu, Weihang Pan, Ke Li, Chen Binhui, Ping Li, and Binbin Lin. Beyond templates: Dynamic adaptation of reasoning demonstrations via feasibility-aware exploration. arXiv preprint arXiv:2505.20700, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 519, + 506, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 519, + 506, + 542 + ], + "spans": [ + { + "bbox": [ + 111, + 519, + 506, + 542 + ], + "type": "text", + "content": "[867] Yuyang Wu, Yifei Wang, Tianqi Du, Stefanie Jegelka, and Yisen Wang. When more is less: Understanding chain-of-thought length in IIms. arXiv preprint arXiv:2502.07266, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 544, + 506, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 544, + 506, + 577 + ], + "spans": [ + { + "bbox": [ + 111, + 544, + 506, + 577 + ], + "type": "text", + "content": "[868] Zhenyu Wu, Qingkai Zeng, Zhihan Zhang, Zhaoxuan Tan, Chao Shen, and Meng Jiang. Enhancing mathematical reasoning in llms by stepwise correction. arXiv preprint arXiv:2410.12934, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 581, + 504, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 581, + 504, + 613 + ], + "spans": [ + { + "bbox": [ + 111, + 581, + 504, + 613 + ], + "type": "text", + "content": "[869] Zhenyu Wu, Qingkai Zeng, Zhihan Zhang, Zhaoxuan Tan, Chao Shen, and Meng Jiang. Large language models can self-correct with minimal effort. In AI for Math Workshop @ ICML 2024, May 2024. URL https://openreview.net/forum?id=mmZLMs413d." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 616, + 506, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 616, + 506, + 650 + ], + "spans": [ + { + "bbox": [ + 111, + 616, + 506, + 650 + ], + "type": "text", + "content": "[870] Zirui Wu, Xiao Liu, Jiayi Li, Lingpeng Kong, and Yansong Feng. Haste makes waste: Evaluating planning abilities of llms for efficient and feasible multitasking with time constraints between actions. arXiv preprint arXiv:2503.02238, 2025." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 111, + 653, + 506, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 653, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 111, + 653, + 506, + 685 + ], + "type": "text", + "content": "[871] Zongqian Wu, Tianyu Li, Jiaying Yang, Mengmeng Zhan, Xiaofeng Zhu, and Lei Feng. Is depth all you need? an exploration of iterative reasoning in llms. arXiv preprint arXiv:2502.10858, 2025." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 111, + 689, + 506, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 506, + 722 + ], + "type": "text", + "content": "[872] Zhiheng Xi, Wenxiang Chen, Boyang Hong, Senjie Jin, Rui Zheng, Wei He, Yiwen Ding, Shichun Liu, Xin Guo, Junzhe Wang, et al. Training large language models for reasoning through reverse curriculum reinforcement learning. arXiv preprint arXiv:2402.05808, 2024." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "91" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 90 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 722 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[873] Zhiheng Xi, Dingwen Yang, Jixuan Huang, Jiafu Tang, Guanyu Li, Yiwen Ding, Wei He, Boyang Hong, Shihan Do, Wenyu Zhan, et al. Enhancing llm reasoning via critique models with test-time and training-time supervision. arXiv preprint arXiv:2411.16579, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 110, + 505, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 110, + 505, + 144 + ], + "spans": [ + { + "bbox": [ + 111, + 110, + 505, + 144 + ], + "type": "text", + "content": "[874] Zhiheng Xi, Guanyu Li, Yutao Fan, Honglin Guo, Yufang Liu, Xiaoran Fan, Jiaqi Liu, Jingchao Ding, Wangmeng Zuo, Zhenfei Yin, et al. Bmmr: A large-scale bilingual multimodal multi-discipline reasoning dataset. arXiv preprint arXiv:2507.03483, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 148, + 506, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 148, + 506, + 180 + ], + "spans": [ + { + "bbox": [ + 111, + 148, + 506, + 180 + ], + "type": "text", + "content": "[875] Fanzeng Xia, Yidong Luo, Tinko Sebastian Bartels, Yaqi Xu, and Tongxin Li. Rethinking the unsolvable: When in-context search meets test-time scaling. arXiv preprint arXiv:2505.22290, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 184, + 506, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 184, + 506, + 207 + ], + "spans": [ + { + "bbox": [ + 111, + 184, + 506, + 207 + ], + "type": "text", + "content": "[876] Heming Xia, Yongqi Li, Chak Tou Leong, Wenjie Wang, and Wenjie Li. Tokenskip: Controllable chain-of-thought compression in lms. arXiv preprint arXiv:2502.12067, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 211, + 504, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 211, + 504, + 235 + ], + "spans": [ + { + "bbox": [ + 111, + 211, + 504, + 235 + ], + "type": "text", + "content": "[877] Shijie Xia, Xuefeng Li, Yixin Liu, Tongshuang Wu, and Pengfei Liu. Evaluating mathematical reasoning beyond accuracy. arXiv preprint arXiv:2404.05692, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 237, + 504, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 237, + 504, + 271 + ], + "spans": [ + { + "bbox": [ + 111, + 237, + 504, + 271 + ], + "type": "text", + "content": "[878] Yunhui Xia, Wei Shen, Yan Wang, Jason Klein Liu, Huifeng Sun, Siyue Wu, Jian Hu, and Xiaolong Xu. Leetcodedataset: A temporal dataset for robust evaluation and efficient training of code llms. arXiv preprint arXiv:2504.14655, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 275, + 504, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 275, + 504, + 309 + ], + "spans": [ + { + "bbox": [ + 111, + 275, + 504, + 309 + ], + "type": "text", + "content": "[879] Kun Xiang, Zhili Liu, Zihao Jiang, Yunshuang Nie, Runhui Huang, Haoxiang Fan, Hanhui Li, Weiran Huang, Yihan Zeng, Jianhua Han, et al. Atomthink: A slow thinking framework for multimodal mathematical reasoning. arXiv preprint arXiv:2411.11930, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 312, + 505, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 312, + 505, + 346 + ], + "spans": [ + { + "bbox": [ + 111, + 312, + 505, + 346 + ], + "type": "text", + "content": "[880] Violet Xiang, Chase Blagden, Rafael Rafailov, Nathan Lile, Sang Truong, Chelsea Finn, and Nick Haber. Just enough thinking: Efficient reasoning with adaptive length penalties reinforcement learning. arXiv preprint arXiv:2506.05256, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 350, + 506, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 350, + 506, + 393 + ], + "spans": [ + { + "bbox": [ + 111, + 350, + 506, + 393 + ], + "type": "text", + "content": "[881] Violet Xiang, Charlie Snell, Kanishk Gandhi, Alon Albalak, Anikait Singh, Chase Blagden, Duy Phung, Rafael Rafailov, Nathan Lile, Dakota Mahan, et al. Towards system 2 reasoning in llms: Learning how to think with meta chain-of-though. arXiv preprint arXiv:2501.04682, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 398, + 506, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 398, + 506, + 441 + ], + "spans": [ + { + "bbox": [ + 111, + 398, + 506, + 441 + ], + "type": "text", + "content": "[882] Wenyi Xiao, Zechuan Wang, Leilei Gan, Shuai Zhao, Wanggui He, Luu Anh Tuan, Long Chen, Hao Jiang, Zhou Zhao, and Fei Wu. A comprehensive survey of direct preference optimization: Datasets, theories, variants, and applications. arXiv preprint arXiv:2410.15595, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 447, + 506, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 447, + 506, + 480 + ], + "spans": [ + { + "bbox": [ + 111, + 447, + 506, + 480 + ], + "type": "text", + "content": "[883] Chulin Xie, Yangsibo Huang, Chiyuan Zhang, Da Yu, Xinyun Chen, Bill Yuchen Lin, Bo Li, Badih Ghazi, and Ravi Kumar. On memorization of large language models in logical reasoning. arXiv preprint arXiv:2410.23123, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 484, + 506, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 484, + 506, + 528 + ], + "spans": [ + { + "bbox": [ + 111, + 484, + 506, + 528 + ], + "type": "text", + "content": "[884] Enze Xie, Junsong Chen, Yuyang Zhao, Jincheng Yu, Ligeng Zhu, Chengyue Wu, Yujun Lin, Zhekai Zhang, Muyang Li, Junyu Chen, et al. Sana 1.5: Efficient scaling of training-time and inference-time compute in linear diffusion transformer. arXiv preprint arXiv:2501.18427, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 533, + 506, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 533, + 506, + 566 + ], + "spans": [ + { + "bbox": [ + 111, + 533, + 506, + 566 + ], + "type": "text", + "content": "[885] Senwei Xie, Hongyu Wang, Zhanqi Xiao, Ruiping Wang, and Xilin Chen. Robotic programmer: Video instructed policy code generation for robotic manipulation. arXiv preprint arXiv:2501.04268, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 570, + 506, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 570, + 506, + 604 + ], + "spans": [ + { + "bbox": [ + 111, + 570, + 506, + 604 + ], + "type": "text", + "content": "[886] Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768, February 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 607, + 506, + 674 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 607, + 506, + 674 + ], + "spans": [ + { + "bbox": [ + 111, + 607, + 506, + 674 + ], + "type": "text", + "content": "[887] Tianbao Xie, Danyang Zhang, Jixuan Chen, Xiaochuan Li, Siheng Zhao, Ruisheng Cao, Toh Jing Hua, Zhoujun Cheng, Dongchan Shin, Fangyu Lei, Yitao Liu, Yiheng Xu, Shuyan Zhou, Silvio Savarese, Caiming Xiong, Victor Zhong, and Tao Yu. OSWorld: Benchmarking multimodal agents for open-ended tasks in real computer environments. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track, September 2024. URL https://openreview.net/forum?id=tN61DTr4Ed." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 677, + 506, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 677, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 111, + 677, + 506, + 722 + ], + "type": "text", + "content": "[888] Yuxi Xie, Kenji Kawaguchi, Yiran Zhao, Xu Zhao, Min-Yen Kan, Junxian He, and Qizhe Xie. Self-evaluation guided beam search for reasoning. In Thirty-seventh Conference on Neural Information Processing Systems, September 2023. URL https://openreview.net/forum?id=Bw82hwg5Q3." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "92" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 91 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[889] Yuxi Xie, Anirudh Goyal, Wenyue Zheng, Min-Yen Kan, Timothy P Lillicrap, Kenji Kawaguchi, and Michael Shieh. Monte carlo tree search boosts reasoning via iterative preference learning. arXiv preprint arXiv:2405.00451, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 108, + 506, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 108, + 506, + 144 + ], + "spans": [ + { + "bbox": [ + 111, + 108, + 506, + 144 + ], + "type": "text", + "content": "[890] Zhifei Xie, Mingbao Lin, Zihang Liu, Pengcheng Wu, Shuicheng Yan, and Chunyan Miao. Audio-reasoner: Improving reasoning capability in large audio language models. arXiv preprint arXiv:2503.02318, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 146, + 504, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 146, + 504, + 170 + ], + "spans": [ + { + "bbox": [ + 111, + 146, + 504, + 170 + ], + "type": "text", + "content": "[891] Zhihui Xie, Liyu Chen, Weichao Mao, Jingjing Xu, Lingpeng Kong, et al. Teaching language models to critique via reinforcement learning. arXiv preprint arXiv:2502.03492, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 172, + 506, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 172, + 506, + 205 + ], + "spans": [ + { + "bbox": [ + 111, + 172, + 506, + 205 + ], + "type": "text", + "content": "[892] Siheng Xiong, Ali Payani, Yuan Yang, and Faramarz Fekri. Deliberate reasoning for llms as structure-aware planning with accurate world model. arXiv preprint arXiv:2410.03136, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 209, + 506, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 209, + 506, + 243 + ], + "spans": [ + { + "bbox": [ + 111, + 209, + 506, + 243 + ], + "type": "text", + "content": "[893] Wei Xiong, Chengshuai Shi, Jiaming Shen, Aviv Rosenberg, Zhen Qin, Daniele Calandriello, Misha Khalman, Rishabh Joshi, Bilal Piot, Mohammad Saleh, et al. Building math agents with multi-turn iterative preference learning. arXiv preprint arXiv:2409.02392, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 246, + 506, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 246, + 506, + 280 + ], + "spans": [ + { + "bbox": [ + 111, + 246, + 506, + 280 + ], + "type": "text", + "content": "[894] Wang Xiyao, Yang Zhengyuan, Li Linjie, Lu Hongjin, Xu Yuancheng, Lin Chung-Ching Lin, Lin Kevin, Huang Furong, and Wang Lijuan. Scaling inference-time search with vision value model for improved visual comprehension. arXiv preprint arXiv:2412.03704, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 282, + 504, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 282, + 504, + 316 + ], + "spans": [ + { + "bbox": [ + 111, + 282, + 504, + 316 + ], + "type": "text", + "content": "[895] Austin Xu, Yilun Zhou, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. J4r: Learning to judge with equivalent initial state group relative policy optimization. arXiv preprint arXiv:2505.13346, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 319, + 506, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 319, + 506, + 352 + ], + "spans": [ + { + "bbox": [ + 111, + 319, + 506, + 352 + ], + "type": "text", + "content": "[896] Bin Xu, Yiguan Lin, Yinghao Li, et al. Sra-mcts: Self-driven reasoning augmentation with monte carlo tree search for enhanced code generation. arXiv preprint arXiv:2411.11053, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 356, + 504, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 356, + 504, + 390 + ], + "spans": [ + { + "bbox": [ + 111, + 356, + 504, + 390 + ], + "type": "text", + "content": "[897] Fangzhi Xu, Qiushi Sun, Kanzhi Cheng, Jun Liu, Yu Qiao, and Zhiyong Wu. Interactive evolution: A neural-symbolic self-training framework for large language models. arXiv preprint arXiv:2406.11736, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 393, + 506, + 427 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 393, + 506, + 427 + ], + "spans": [ + { + "bbox": [ + 111, + 393, + 506, + 427 + ], + "type": "text", + "content": "[898] Fangzhi Xu, Hang Yan, Chang Ma, Haiteng Zhao, Qiushi Sun, Kanzhi Cheng, Junxian He, Jun Liu, and Zhiyong Wu. Genius: A generalizable and purely unsupervised self-training framework for advanced reasoning. arXiv preprint arXiv:2504.08672, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 430, + 506, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 430, + 506, + 474 + ], + "spans": [ + { + "bbox": [ + 111, + 430, + 506, + 474 + ], + "type": "text", + "content": "[899] Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, et al. Towards large reasoning models: A survey of reinforced reasoning with large language models. arXiv preprint arXiv:2501.09686, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 478, + 504, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 478, + 504, + 502 + ], + "spans": [ + { + "bbox": [ + 111, + 478, + 504, + 502 + ], + "type": "text", + "content": "[900] Guowei Xu, Peng Jin, Li Hao, Yibing Song, Lichao Sun, and Li Yuan. Llava-ol: Let vision language models reason step-by-step. arXiv preprint arXiv:2411.10440, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 504, + 506, + 537 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 504, + 506, + 537 + ], + "spans": [ + { + "bbox": [ + 111, + 504, + 506, + 537 + ], + "type": "text", + "content": "[901] Haotian Xu. No train still gain. unleash mathematical reasoning of large language models with monte carlo tree search guided by energy function. arXiv preprint arXiv:2309.03224, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 540, + 504, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 540, + 504, + 575 + ], + "spans": [ + { + "bbox": [ + 111, + 540, + 504, + 575 + ], + "type": "text", + "content": "[902] Haotian Xu, Xing Wu, Weinong Wang, Zhongzhi Li, Da Zheng, Boyuan Chen, Yi Hu, Shijia Kang, Jiaming Ji, Yingying Zhang, et al. Redstar: Does scaling long-cot data unlock better slow-reasoning systems? arXiv preprint arXiv:2501.11284, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 578, + 504, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 578, + 504, + 612 + ], + "spans": [ + { + "bbox": [ + 111, + 578, + 504, + 612 + ], + "type": "text", + "content": "[903] Huimin Xu, Xin Mao, Feng-Lin Li, Xiaobao Wu, Wang Chen, Wei Zhang, and Anh Tuan Luu. Full-step-dpo: Self-supervised preference optimization with step-wise rewards for mathematical reasoning. arXiv preprint arXiv:2502.14356, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 614, + 506, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 614, + 506, + 647 + ], + "spans": [ + { + "bbox": [ + 111, + 614, + 506, + 647 + ], + "type": "text", + "content": "[904] Jin Xu, Zhifang Guo, Jinzheng He, Hangrui Hu, Ting He, Shuai Bai, Keqin Chen, Jialin Wang, Yang Fan, Kai Dang, et al. Qwen2. 5-omni technical report. arXiv preprint arXiv:2503.20215, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 651, + 504, + 686 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 651, + 504, + 686 + ], + "spans": [ + { + "bbox": [ + 111, + 651, + 504, + 686 + ], + "type": "text", + "content": "[905] Pusheng Xu, Yue Wu, Kai Jin, Xiaolan Chen, Mingguang He, and Danli Shi. Deepseek-r1 outperforms gemini 2.0 pro, openai o1, and o3-mini in bilingual complex ophthalmology reasoning. arXiv preprint arXiv:2502.17947, 2025." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 111, + 689, + 506, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 506, + 721 + ], + "type": "text", + "content": "[906] Rongwu Xu, Xiaojian Li, Shuo Chen, and Wei Xu. \"nuclear deployed!\": Analyzing catastrophic risks in decision-making of autonomous llm agents. arXiv preprint arXiv:2502.11355, 2025." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "93" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 92 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "type": "text", + "content": "[907] Silei Xu, Wenhao Xie, Lingxiao Zhao, and Pengcheng He. Chain of draft: Thinking faster by writing less. arXiv preprint arXiv:2502.18600, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 98, + 506, + 165 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 98, + 506, + 165 + ], + "spans": [ + { + "bbox": [ + 111, + 98, + 506, + 165 + ], + "type": "text", + "content": "[908] Wenda Xu, Guanglei Zhu, Xuandong Zhao, Liangming Pan, Lei Li, and William Wang. Pride and prejudice: LLM amplifies self-bias in self-refinement. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 15474–15492, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.826. URL https://aclanthology.org/2024.acl-long.826/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 167, + 506, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 167, + 506, + 201 + ], + "spans": [ + { + "bbox": [ + 111, + 167, + 506, + 201 + ], + "type": "text", + "content": "[909] Xiaoang Xu, Shuo Wang, Xu Han, Zhenghao Liu, Huijia Wu, Peipei Li, Zhiyuan Liu, Maosong Sun, and Zhaofeng He. A\\*thought: Efficient reasoning via bidirectional compression for low-resource settings. arXiv preprint arXiv:2505.24550, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 203, + 504, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 203, + 504, + 225 + ], + "spans": [ + { + "bbox": [ + 111, + 203, + 504, + 225 + ], + "type": "text", + "content": "[910] Xin Xu, Shizhe Diao, Can Yang, and Yang Wang. Can we verify step by step for incorrect answer detection? arXiv preprint arXiv:2402.10528, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 228, + 504, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 228, + 504, + 262 + ], + "spans": [ + { + "bbox": [ + 111, + 228, + 504, + 262 + ], + "type": "text", + "content": "[911] Yao Xu, Mingyu Xu, Fangyu Lei, Wangtao Sun, Xiangrong Zeng, Bingning Wang, Guang Liu, Shizhu He, Jun Zhao, and Kang Liu. Amplify adjacent token differences: Enhancing long chain-of-thought reasoning with shift-ffn. arXiv preprint arXiv:2505.17153, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 264, + 504, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 264, + 504, + 308 + ], + "spans": [ + { + "bbox": [ + 111, + 264, + 504, + 308 + ], + "type": "text", + "content": "[912] Yi Xu, Chengzhu Li, Han Zhou, Xingchen Wan, Caiqi Zhang, Anna Korhonen, and Ivan Vulić. Visual planning: Let's think only with images. In Workshop on Foundation Models Meet Embodied Agents at CVPR 2025, may 2025. URL https://openreview.net/forum?id=ELIt3v3S1J." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 312, + 504, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 312, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 111, + 312, + 504, + 335 + ], + "type": "text", + "content": "[913] Yige Xu, Xu Guo, Zhiwei Zeng, and Chunyan Miao. Softcot: Soft chain-of-thought for efficient reasoning with llms. arXiv preprint arXiv:2502.12134, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 337, + 504, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 337, + 504, + 360 + ], + "spans": [ + { + "bbox": [ + 111, + 337, + 504, + 360 + ], + "type": "text", + "content": "[914] Yige Xu, Xu Guo, Zhiwei Zeng, and Chunyan Miao. Softcot++: Test-time scaling with soft chain-of-thought reasoning. arXiv preprint arXiv:2505.11484, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 362, + 506, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 362, + 506, + 396 + ], + "spans": [ + { + "bbox": [ + 111, + 362, + 506, + 396 + ], + "type": "text", + "content": "[915] Zhangchen Xu, Fengqing Jiang, Luyao Niu, Yuntian Deng, Radha Poovendran, Yejin Choi, and Bill Yuchen Lin. Magpie: Alignment data synthesis from scratch by prompting aligned lms with nothing. arXiv preprint arXiv:2406.08464, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 399, + 504, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 399, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 111, + 399, + 504, + 422 + ], + "type": "text", + "content": "[916] Zhangchen Xu, Yang Liu, Yueqin Yin, Mingyuan Zhou, and Radha Poovendran. Kodcode: A diverse, challenging, and verifiable synthetic dataset for coding. February 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 424, + 506, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 424, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 111, + 424, + 506, + 456 + ], + "type": "text", + "content": "[917] Jianhao Yan, Yafu Li, Zican Hu, Zhi Wang, Ganqu Cui, Xiaoye Qu, Yu Cheng, and Yue Zhang. Learning to reason under off-policy guidance. arXiv preprint arXiv:2504.14945, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 460, + 504, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 460, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 111, + 460, + 504, + 495 + ], + "type": "text", + "content": "[918] Kai Yan, Yufei Xu, Zhengyin Du, Xuesong Yao, Zheyu Wang, Xiaowen Guo, and Jiecao Chen. Recitation over reasoning: How cutting-edge language models can fail on elementary school-level reasoning problems? arXiv preprint arXiv:2504.00509, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 497, + 504, + 519 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 497, + 504, + 519 + ], + "spans": [ + { + "bbox": [ + 111, + 497, + 504, + 519 + ], + "type": "text", + "content": "[919] Ruin Yan, Zheng Liu, and Defu Lian. O1 embedder: Let retrievers think before action. arXiv preprint arXiv:2502.07555, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 522, + 506, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 522, + 506, + 555 + ], + "spans": [ + { + "bbox": [ + 111, + 522, + 506, + 555 + ], + "type": "text", + "content": "[920] Siming Yan, Min Bai, Weifeng Chen, Xiong Zhou, Qixing Huang, and Li Erran Li. Vigor: Improving visual grounding of large vision language models with fine-grained reward modeling. In European Conference on Computer Vision, pages 37-53. Springer, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 559, + 506, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 559, + 506, + 602 + ], + "spans": [ + { + "bbox": [ + 111, + 559, + 506, + 602 + ], + "type": "text", + "content": "[921] Yibo Yan, Jiamin Su, Jianxiang He, Fangteng Fu, Xu Zheng, Yuanhuiyi Lyu, Kun Wang, Shen Wang, Qingsong Wen, and Xuming Hu. A survey of mathematical reasoning in the era of multimodal large language model: Benchmark, method & challenges. arXiv preprint arXiv:2412.11936, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 605, + 506, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 605, + 506, + 649 + ], + "spans": [ + { + "bbox": [ + 111, + 605, + 506, + 649 + ], + "type": "text", + "content": "[922] Yibo Yan, Shen Wang, Jiahao Huo, Hang Li, Boyan Li, Jiamin Su, Xiong Gao, Yi-Fan Zhang, Tianlong Xu, Zhendong Chu, et al. Errorradar: Benchmarking complex mathematical reasoning of multimodal large language models via error detection. arXiv preprint arXiv:2410.04509, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 652, + 506, + 686 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 652, + 506, + 686 + ], + "spans": [ + { + "bbox": [ + 111, + 652, + 506, + 686 + ], + "type": "text", + "content": "[923] Yibo Yan, Shen Wang, Jiahao Huo, Jingheng Ye, Zhendong Chu, Xuming Hu, Philip S Yu, Carla Gomes, Bart Selman, and Qingsong Wen. Position: Multimodal large language models can significantly advance scientific reasoning. arXiv preprint arXiv:2502.02871, 2025." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "type": "text", + "content": "[924] Yuchen Yan, Jin Jiang, Yang Liu, Yixin Cao, Xin Xu, Xunliang Cai, Jian Shao, et al. S " + }, + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 111, + 689, + 506, + 723 + ], + "type": "text", + "content": " c-math: Spontaneous step-level self-correction makes large language models better mathematical reasoners. arXiv preprint arXiv:2409.01524, 2024." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "94" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 93 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 105 + ], + "type": "text", + "content": "[925] An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 109, + 505, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 109, + 505, + 142 + ], + "spans": [ + { + "bbox": [ + 111, + 109, + 505, + 142 + ], + "type": "text", + "content": "[926] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 145, + 504, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 145, + 504, + 179 + ], + "spans": [ + { + "bbox": [ + 111, + 145, + 504, + 179 + ], + "type": "text", + "content": "[927] An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2.5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 182, + 504, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 182, + 504, + 216 + ], + "spans": [ + { + "bbox": [ + 111, + 182, + 504, + 216 + ], + "type": "text", + "content": "[928] Cehao Yang, Xueyuan Lin, Chengjin Xu, Xuhui Jiang, Xiaojun Wu, Honghao Liu, Hui Xiong, and Jian Guo. Select2reason: Efficient instruction-tuning data selection for long-cot reasoning. arXiv preprint arXiv:2505.17266, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 219, + 506, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 219, + 506, + 241 + ], + "spans": [ + { + "bbox": [ + 111, + 219, + 506, + 241 + ], + "type": "text", + "content": "[929] Chen Yang, Chenyang Zhao, Quanquan Gu, and Dongruo Zhou. Cops: Empowering llm agents with provable cross-task experience sharing. arXiv preprint arXiv:2410.16670, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 244, + 504, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 244, + 504, + 267 + ], + "spans": [ + { + "bbox": [ + 111, + 244, + 504, + 267 + ], + "type": "text", + "content": "[930] Cheng Yang, Chufan Shi, Siheng Li, Bo Shui, Yujiu Yang, and Wai Lam. Llm2: Let large language models harness system 2 reasoning. arXiv preprint arXiv:2412.20372, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 270, + 506, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 270, + 506, + 325 + ], + "spans": [ + { + "bbox": [ + 111, + 270, + 506, + 325 + ], + "type": "text", + "content": "[931] Cheng Yang, Chufan Shi, Yaxin Liu, Bo Shui, Junjie Wang, Mohan Jing, Linran Xu, Xinyu Zhu, Siheng Li, Yuxiang Zhang, Gongye Liu, Xiaomei Nie, Deng Cai, and Yujiu Yang. Chartmimic: Evaluating LMM's cross-modal reasoning capability via chart-to-code generation. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=sGpCzsfd1K." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 328, + 506, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 328, + 506, + 360 + ], + "spans": [ + { + "bbox": [ + 111, + 328, + 506, + 360 + ], + "type": "text", + "content": "[932] Kailai Yang, Zhiwei Liu, Qianqian Xie, Jimin Huang, Erxue Min, and Sophia Ananiadou. Selective preference optimization via token-level reward function estimation. arXiv preprint arXiv:2408.13518, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 364, + 506, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 364, + 506, + 397 + ], + "spans": [ + { + "bbox": [ + 111, + 364, + 506, + 397 + ], + "type": "text", + "content": "[933] Kaiyu Yang, Gabriel Poesia, Jingxuan He, Wenda Li, Kristin Lauter, Swarat Chaudhuri, and Dawn Song. Formal mathematical reasoning: A new frontier in ai. arXiv preprint arXiv:2412.16075, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 401, + 506, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 401, + 506, + 434 + ], + "spans": [ + { + "bbox": [ + 111, + 401, + 506, + 434 + ], + "type": "text", + "content": "[934] Lei Yang, Renren Jin, Ling Shi, Jianxiang Peng, Yue Chen, and Deyi Xiong. Probench: Benchmarking large language models in competitive programming. arXiv preprint arXiv:2502.20868, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 437, + 504, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 437, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 111, + 437, + 504, + 460 + ], + "type": "text", + "content": "[935] Ling Yang, Zhaochen Yu, Bin Cui, and Mengdi Wang. Reasonflux: Hierarchical llm reasoning via scaling thought templates. arXiv preprint arXiv:2502.06772, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 463, + 506, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 463, + 506, + 496 + ], + "spans": [ + { + "bbox": [ + 111, + 463, + 506, + 496 + ], + "type": "text", + "content": "[936] Ruihan Yang, Fanghua Ye, Jian Li, Siyu Yuan, Yikai Zhang, Zhaopeng Tu, Xiaolong Li, and Deqing Yang. The lighthouse of language: Enhancing llm agents via critique-guided improvement. arXiv preprint arXiv:2503.16024, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 499, + 504, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 499, + 504, + 544 + ], + "spans": [ + { + "bbox": [ + 111, + 499, + 504, + 544 + ], + "type": "text", + "content": "[937] Sherry Yang, Dale Schuurmans, Pieter Abbeel, and Ofir Nachum. Chain of thought imitation with procedure cloning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho, editors, Advances in Neural Information Processing Systems, November 2022. URL https://openreview.net/forum?id=ZJqqSa8FsH9." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 547, + 504, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 547, + 504, + 581 + ], + "spans": [ + { + "bbox": [ + 111, + 547, + 504, + 581 + ], + "type": "text", + "content": "[938] Shiming Yang, Yuxuan Tong, Xinyao Niu, Graham Neubig, and Xiang Yue. Demystifying long chain-of-thought reasoning. In *Forty-second International Conference on Machine Learning*, may 2025. URL https://openreview.net/forum?id=OLodUbcWjb." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 583, + 506, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 583, + 506, + 616 + ], + "spans": [ + { + "bbox": [ + 111, + 583, + 506, + 616 + ], + "type": "text", + "content": "[939] Shu Yang, Junchao Wu, Xin Chen, Yunze Xiao, Xinyi Yang, Derek F. Wong, and Di Wang. Understanding aha moments: from external observations to internal mechanisms. arXiv preprint arXiv:2504.02956, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 620, + 506, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 620, + 506, + 653 + ], + "spans": [ + { + "bbox": [ + 111, + 620, + 506, + 653 + ], + "type": "text", + "content": "[940] Shu Yang, Junchao Wu, Xuansheng Wu, Derek Wong, Ninhao Liu, and Di Wang. Is long-to-short a free lunch? investigating inconsistency and reasoning efficiency in Irms. arXiv preprint arXiv:2506.19492, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 656, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 656, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 656, + 506, + 723 + ], + "type": "text", + "content": "[941] Sohee Yang, Elena Gribovskaya, Nora Kassner, Mor Geva, and Sebastian Riedel. Do large language models latently perform multi-hop reasoning? In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 10210–10229, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.550. URL https://aclanthology.org/2024.acl-long.550/." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "95" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 94 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "type": "text", + "content": "[942] Wang Yang, Hongye Jin, Jingfeng Yang, Vipin Chaudhary, and Xiaotian Han. Thinking preference optimization. arXiv preprint arXiv:2502.13173, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 97, + 505, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 97, + 505, + 120 + ], + "spans": [ + { + "bbox": [ + 111, + 97, + 505, + 120 + ], + "type": "text", + "content": "[943] Wenkai Yang, Shuming Ma, Yankai Lin, and Furu Wei. Towards thinking-optimal scaling of test-time compute for lIm reasoning. arXiv preprint arXiv:2502.18080, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 122, + 506, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 122, + 506, + 157 + ], + "spans": [ + { + "bbox": [ + 111, + 122, + 506, + 157 + ], + "type": "text", + "content": "[944] Xiao-Wen Yang, Xuan-Yi Zhu, Wen-Da Wei, Ding-Chu Zhang, Jie-Jing Shao, Zhi Zhou, Lan-Zhe Guo, and Yu-Feng Li. Step back to leap forward: Self-backtracking for boosting reasoning of language models. arXiv preprint arXiv:2502.04404, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 159, + 504, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 159, + 504, + 182 + ], + "spans": [ + { + "bbox": [ + 111, + 159, + 504, + 182 + ], + "type": "text", + "content": "[945] Yang Yang, Xiaolu Zhou, Bosong Ding, and Miao Xin. Uncertainty-aware reward design process. arXiv preprint arXiv:2507.02256, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 184, + 506, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 184, + 506, + 218 + ], + "spans": [ + { + "bbox": [ + 111, + 184, + 506, + 218 + ], + "type": "text", + "content": "[946] Yifei Yang, Zouying Cao, Qiguang Chen, Libo Qin, Dongjie Yang, Hai Zhao, and Zhi Chen. Kvsharer: Efficient inference via layer-wise dissimilar kv cache sharing. arXiv preprint arXiv:2410.18517, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 220, + 506, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 220, + 506, + 264 + ], + "spans": [ + { + "bbox": [ + 111, + 220, + 506, + 264 + ], + "type": "text", + "content": "[947] Yue Yang, MingKang Chen, Qihua Liu, Mengkang Hu, Qiguang Chen, Gengrui Zhang, Shuyue Hu, Guangtao Zhai, Yu Qiao, Yu Wang, et al. Truly assessing fluid intelligence of large language models through dynamic reasoning evaluation. arXiv preprint arXiv:2506.02648, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 267, + 506, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 267, + 506, + 323 + ], + "spans": [ + { + "bbox": [ + 111, + 267, + 506, + 323 + ], + "type": "text", + "content": "[948] Yuqing Yang, Yan Ma, and Pengfei Liu. Weak-to-strong reasoning. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 8350-8367, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.490. URL https://aclanthology.org/2024 findings-emnlp.490/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 326, + 506, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 326, + 506, + 358 + ], + "spans": [ + { + "bbox": [ + 111, + 326, + 506, + 358 + ], + "type": "text", + "content": "[949] Zeyuan Yang, Xueyang Yu, Delin Chen, Maohao Shen, and Chuang Gan. Machine mental imagery: Empower multimodal reasoning with latent visual tokens. arXiv preprint arXiv:2506.17218, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 361, + 506, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 361, + 506, + 396 + ], + "spans": [ + { + "bbox": [ + 111, + 361, + 506, + 396 + ], + "type": "text", + "content": "[950] Zhe Yang, Yichang Zhang, Yudong Wang, Ziyao Xu, Junyang Lin, and Zhifang Sui. Confidence vs critique: A decomposition of self-correction capability for llms. arXiv preprint arXiv:2412.19513, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 398, + 506, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 398, + 506, + 443 + ], + "spans": [ + { + "bbox": [ + 111, + 398, + 506, + 443 + ], + "type": "text", + "content": "[951] Zonghan Yang, Peng Li, Ming Yan, Ji Zhang, Fei Huang, and Yang Liu. React meets actre: Autonomous annotation of agent trajectories for contrastive self-training. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=0VLBwQGWpA." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 445, + 504, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 445, + 504, + 490 + ], + "spans": [ + { + "bbox": [ + 111, + 445, + 504, + 490 + ], + "type": "text", + "content": "[952] Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, et al. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. arXiv preprint arXiv:2412.18319, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 492, + 506, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 492, + 506, + 536 + ], + "spans": [ + { + "bbox": [ + 111, + 492, + 506, + 536 + ], + "type": "text", + "content": "[953] Huanjin Yao, Jiaxing Huang, Yawen Qiu, Michael K Chen, Wenzheng Liu, Wei Zhang, Wenjie Zeng, Xikun Zhang, Jingyi Zhang, Yuxin Song, et al. Mmreason: An open-ended multi-modal multi-step reasoning benchmark for mllms toward agi. arXiv preprint arXiv:2506.23563, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 540, + 506, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 540, + 506, + 594 + ], + "spans": [ + { + "bbox": [ + 111, + 540, + 506, + 594 + ], + "type": "text", + "content": "[954] Shunyu Yao, Howard Chen, John Yang, and Karthik R Narasimhan. Webshop: Towards scalable real-world web interaction with grounded language agents. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho, editors, Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=R9KnuFlvnU." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 597, + 506, + 675 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 597, + 506, + 675 + ], + "spans": [ + { + "bbox": [ + 111, + 597, + 506, + 675 + ], + "type": "text", + "content": "[955] Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Tom Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine, editors, Advances in Neural Information Processing Systems, volume 36, pages 11809-11822. Curran Associates, Inc., September 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/271db9922b8d1f4dd7aaef84ed5ac703-Paper-Conference.pdf." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 677, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 677, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 677, + 504, + 723 + ], + "type": "text", + "content": "[956] Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik R Narasimhan, and Yuan Cao. React: Synergizing reasoning and acting in language models. In The Eleventh International Conference on Learning Representations, February 2023. URL https://openreview.net/forum?id=WE_vluYUL-X." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "96" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 95 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 106 + ], + "type": "text", + "content": "[957] Xinhao Yao, Ruifeng Ren, Yun Liao, and Yong Liu. Unveiling the mechanisms of explicit cot training: How chain-of-thought enhances reasoning generalization. arXiv preprint arXiv:2502.04667, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 110, + 505, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 110, + 505, + 144 + ], + "spans": [ + { + "bbox": [ + 111, + 110, + 505, + 144 + ], + "type": "text", + "content": "[958] Yang Yao, Xuan Tong, Ruofan Wang, Yixu Wang, Lujundong Li, Liang Liu, Yan Teng, and Yingchun Wang. A mousetrap: Fooling large reasoning models for jailbreak with chain of iterative chaos. arXiv preprint arXiv:2502.15806, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 148, + 505, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 148, + 505, + 182 + ], + "spans": [ + { + "bbox": [ + 111, + 148, + 505, + 182 + ], + "type": "text", + "content": "[959] Wang Yaoting, Wu Shengqiong, Zhang Yuechen, Yan Shuicheng, Liu Ziwei, Luo Jiebo, and Fei Hao. Multimodal chain-of-thought reasoning: A comprehensive survey. arXiv preprint arXiv:2503.12605, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 186, + 505, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 186, + 505, + 219 + ], + "spans": [ + { + "bbox": [ + 111, + 186, + 505, + 219 + ], + "type": "text", + "content": "[960] Michihiro Yasunaga, Luke Zettlemoyer, and Marjan Ghazvininejad. Multimodal reward-bench: Holistic evaluation of reward models for vision language models. arXiv preprint arXiv:2502.14191, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 223, + 505, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 223, + 505, + 247 + ], + "spans": [ + { + "bbox": [ + 111, + 223, + 505, + 247 + ], + "type": "text", + "content": "[961] Nicolas Yax, Hernán Anló, and Stefano Palminteri. Studying and improving reasoning in humans and machines. Communications Psychology, 2(1):51, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 251, + 505, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 251, + 505, + 285 + ], + "spans": [ + { + "bbox": [ + 111, + 251, + 505, + 285 + ], + "type": "text", + "content": "[962] Guanghao Ye, Khiem Duc Pham, Xinzhi Zhang, Sivakanth Gopi, Baolin Peng, Beibin Li, Janardhan Kulkarni, and Huseyin A Inan. On the emergence of thinking in llms i: Searching for the right intuition. arXiv preprint arXiv:2502.06773, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 288, + 505, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 288, + 505, + 322 + ], + "spans": [ + { + "bbox": [ + 111, + 288, + 505, + 322 + ], + "type": "text", + "content": "[963] Jiaran Ye, Zijun Yao, Zhidian Huang, Liangming Pan, Jinxin Liu, Yushi Bai, Amy Xin, Liu Weichuan, Xiaoyin Che, Lei Hou, et al. How does transformer learn implicit reasoning? arXiv preprint arXiv:2505.23653, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 326, + 505, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 326, + 505, + 371 + ], + "spans": [ + { + "bbox": [ + 111, + 326, + 505, + 371 + ], + "type": "text", + "content": "[964] Rui Ye, Shuo Tang, Rui Ge, Yaxin Du, Zhenfei Yin, Jing Shao, and Siheng Chen. MAS-GPT: Training LLMs to build LLM-based multi-agent systems. In Workshop on Reasoning and Planning for Large Language Models, March 2025. URL https://openreview.net/forum?id=TqHoQIlumy." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 374, + 505, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 374, + 505, + 420 + ], + "spans": [ + { + "bbox": [ + 111, + 374, + 505, + 420 + ], + "type": "text", + "content": "[965] Tian Ye, Zicheng Xu, Yuanzhi Li, and Zeyuan Allen-Zhu. Physics of language models: Part 2.2, how to learn from mistakes on grade-school math problems. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=zpDGwcmMV4." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 423, + 505, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 423, + 505, + 457 + ], + "spans": [ + { + "bbox": [ + 111, + 423, + 505, + 457 + ], + "type": "text", + "content": "[966] Xinwu Ye, Chengfan Li, Siming Chen, Xiangru Tang, and Wei Wei. Mmscibench: Benchmarking language models on multimodal scientific problems. arXiv preprint arXiv:2503.01891, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 461, + 505, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 461, + 505, + 485 + ], + "spans": [ + { + "bbox": [ + 111, + 461, + 505, + 485 + ], + "type": "text", + "content": "[967] Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning. arXiv preprint arXiv:2502.03387, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 488, + 505, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 488, + 505, + 521 + ], + "spans": [ + { + "bbox": [ + 111, + 488, + 505, + 521 + ], + "type": "text", + "content": "[968] Zihuiwen Ye, Fraser Greenlee-Scott, Max Bartolo, Phil Blunsom, Jon Ander Campos, and Matthias Galle. Improving reward models with synthetic critiques. arXiv preprint arXiv:2405.20850, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 526, + 505, + 560 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 526, + 505, + 560 + ], + "spans": [ + { + "bbox": [ + 111, + 526, + 505, + 560 + ], + "type": "text", + "content": "[969] Zihuiwen Ye, Luckeciano Carvalho Melo, Younesse Kaddar, Phil Blunsom, Sam Staton, and Yarin Gal. Uncertainty-aware step-wise verification with generative reward models. arXiv preprint arXiv:2502.11250, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 564, + 505, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 564, + 505, + 597 + ], + "spans": [ + { + "bbox": [ + 111, + 564, + 505, + 597 + ], + "type": "text", + "content": "[970] Hao Yi, Qingyang Li, Yulan Hu, Fuzheng Zhang, Di Zhang, and Yong Liu. Sppd: Self-training with process preference learning using dynamic value margin. arXiv preprint arXiv:2502.13516, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 601, + 505, + 625 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 601, + 505, + 625 + ], + "spans": [ + { + "bbox": [ + 111, + 601, + 505, + 625 + ], + "type": "text", + "content": "[971] Jingyang Yi, Jiazheng Wang, and Sida Li. Shorterbetter: Guiding reasoning models to find optimal inference length for efficient reasoning. arXiv preprint arXiv:2504.21370, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 628, + 505, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 628, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 111, + 628, + 505, + 673 + ], + "type": "text", + "content": "[972] Qiyue Yin, Pei Xu, Qiaozhe Li, Shengda Liu, Shengqi Shen, Tong Wang, Yihong Han, Xiaonan Zhao, Likun Yang, Shiyue Cao, et al. Wgsr-bench: Wargame-based game-theoretic strategic reasoning benchmark for large language models. arXiv preprint arXiv:2506.10264, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 677, + 505, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 677, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 111, + 677, + 505, + 723 + ], + "type": "text", + "content": "[973] Zhangyue Yin, Qiushi Sun, Qipeng Guo, Zhiyuan Zeng, Xiaonan Li, Junqi Dai, Qinyuan Cheng, Xuanjing Huang, and Xipeng Qiu. Reasoning in flux: Enhancing large language models reasoning through uncertainty-aware adaptive guidance. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "97" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 96 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 72, + 506, + 722 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 135, + 72, + 506, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 72, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 135, + 72, + 506, + 106 + ], + "type": "text", + "content": "for Computational Linguistics (Volume 1: Long Papers), pages 2401-2416, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.131. URL https://aclanthology.org/2024.acl-long.131/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 110, + 505, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 110, + 505, + 144 + ], + "spans": [ + { + "bbox": [ + 111, + 110, + 505, + 144 + ], + "type": "text", + "content": "[974] Huaiyuan Ying, Shuo Zhang, Linyang Li, Zhejian Zhou, Yunfan Shao, Zhaoye Fei, Yichuan Ma, Jiawei Hong, Kuikun Liu, Ziyi Wang, et al. Internl m - Math: Open math large language models toward verifiable reasoning. arXiv preprint arXiv:2402.06332, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 148, + 506, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 148, + 506, + 192 + ], + "spans": [ + { + "bbox": [ + 111, + 148, + 506, + 192 + ], + "type": "text", + "content": "[975] Eunseop Yoon, Hee Suk Yoon, SooHwan Eom, Gunsoo Han, Daniel Wontae Nam, Daejin Jo, Kyoung-Woon On, Mark A Hasegawa-Johnson, Sungwoong Kim, and Chang D Yoo. Tlcr: Token-level continuous reward for fine-grained reinforcement learning from human feedback. arXiv preprint arXiv:2407.16574, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 196, + 504, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 196, + 504, + 218 + ], + "spans": [ + { + "bbox": [ + 111, + 196, + 504, + 218 + ], + "type": "text", + "content": "[976] Jaesik Yoon, Hyeonseo Cho, Doojin Baek, Yoshua Bengio, and Sungjin Ahn. Monte carlo tree diffusion for system 2 planning. arXiv preprint arXiv:2502.07202, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 222, + 504, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 222, + 504, + 256 + ], + "spans": [ + { + "bbox": [ + 111, + 222, + 504, + 256 + ], + "type": "text", + "content": "[977] Bin Yu, Hang Yuan, Haotian Li, Xueyin Xu, Yuliang Wei, Bailing Wang, Weizhen Qi, and Kai Chen. Long-short chain-of-thought mixture supervised fine-tuning eliciting efficient reasoning in large language models. arXiv preprint arXiv:2505.03469, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 259, + 506, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 259, + 506, + 293 + ], + "spans": [ + { + "bbox": [ + 111, + 259, + 506, + 293 + ], + "type": "text", + "content": "[978] Dian Yu, Baolin Peng, Ye Tian, Linfeng Song, Haitao Mi, and Dong Yu. Siam: Self-improving code-assisted mathematical reasoning of large language models. arXiv preprint arXiv:2408.15565, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 297, + 506, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 297, + 506, + 363 + ], + "spans": [ + { + "bbox": [ + 111, + 297, + 506, + 363 + ], + "type": "text", + "content": "[979] Fei Yu, Anningzhe Gao, and Benyou Wang. OVM, outcome-supervised value models for planning in mathematical reasoning. In Kevin Duh, Helena Gomez, and Steven Bethard, editors, Findings of the Association for Computational Linguistics: NAACL 2024, pages 858-875, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-naacl.55. URL https://aclanthology.org/2024.findings-naacl.55/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 368, + 506, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 368, + 506, + 401 + ], + "spans": [ + { + "bbox": [ + 111, + 368, + 506, + 401 + ], + "type": "text", + "content": "[980] Fei Yu, Hongbo Zhang, Prayag Tiwari, and Benyou Wang. Natural language reasoning, a survey. ACM Comput. Surv., 56(12), October 2024. ISSN 0360-0300. doi: 10.1145/3664194. URL https://doi.org/10.1145/3664194." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 404, + 504, + 427 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 404, + 504, + 427 + ], + "spans": [ + { + "bbox": [ + 111, + 404, + 504, + 427 + ], + "type": "text", + "content": "[981] Fei Yu, Yingru Li, and Benyou Wang. Uncertainty-aware search and value models: Mitigating search scaling flaws in llms. arXiv preprint arXiv:2502.11155, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 431, + 506, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 431, + 506, + 475 + ], + "spans": [ + { + "bbox": [ + 111, + 431, + 506, + 475 + ], + "type": "text", + "content": "[982] Hongli Yu, Tinghong Chen, Jiangtao Feng, Jiangjie Chen, Weinan Dai, Qiying Yu, YaQin Zhang, Wei-Ying Ma, Jingjing Liu, Mingxuan Wang, et al. Memagent: Reshaping long-context llm with multi-conv rl-based memory agent. arXiv preprint arXiv:2507.02259, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 479, + 506, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 479, + 506, + 535 + ], + "spans": [ + { + "bbox": [ + 111, + 479, + 506, + 535 + ], + "type": "text", + "content": "[983] Longhui Yu, Weisen Jiang, Han Shi, Jincheng YU, Zhengying Liu, Yu Zhang, James Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. Metamath: Bootstrap your own mathematical questions for large language models. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=N8N0hgNDRt." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 539, + 504, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 539, + 504, + 562 + ], + "spans": [ + { + "bbox": [ + 111, + 539, + 504, + 562 + ], + "type": "text", + "content": "[984] Ping Yu, Jing Xu, Jason Weston, and Ilia Kulikov. Distilling system 2 into system 1. arXiv preprint arXiv:2407.06023, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 566, + 506, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 566, + 506, + 599 + ], + "spans": [ + { + "bbox": [ + 111, + 566, + 506, + 599 + ], + "type": "text", + "content": "[985] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 603, + 506, + 636 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 603, + 506, + 636 + ], + "spans": [ + { + "bbox": [ + 111, + 603, + 506, + 636 + ], + "type": "text", + "content": "[986] Tianyu Yu, Bo Ji, Shouli Wang, Shu Yao, Zefan Wang, Ganqu Cui, Lifan Yuan, Ning Ding, Yuan Yao, Zhiyuan Liu, et al. Rlpr: Extrapolating rlvr to general domains without verifiers. arXiv preprint arXiv:2506.18254, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 640, + 506, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 640, + 506, + 673 + ], + "spans": [ + { + "bbox": [ + 111, + 640, + 506, + 673 + ], + "type": "text", + "content": "[987] Tong Yu, Yongcheng Jing, Xikun Zhang, Wentao Jiang, Wenjie Wu, Yingjie Wang, Wenbin Hu, Bo Du, and Dacheng Tao. Benchmarking reasoning robustness in large language models. arXiv preprint arXiv:2503.04550, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 677, + 506, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 677, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 111, + 677, + 506, + 722 + ], + "type": "text", + "content": "[988] Xiao Yu, Baolin Peng, Vineeth Vajipey, Hao Cheng, Michel Galley, Jianfeng Gao, and Zhou Yu. ExACT: Teaching AI agents to explore with reflective-MCTS and exploratory learning. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=GBIUbwW9D8." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "98" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 97 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 95 + ], + "type": "text", + "content": "[989] Yahan Yu, Yuyang Dong, and Masafumi Oyamada. Learning deliberately, acting intuitively: Unlocking test-time reasoning in multimodal llms. arXiv preprint arXiv:2507.06999, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 99, + 506, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 99, + 506, + 144 + ], + "spans": [ + { + "bbox": [ + 111, + 99, + 506, + 144 + ], + "type": "text", + "content": "[990] Yiyao Yu, Yuxiang Zhang, Dongdong Zhang, Xiao Liang, Hengyuan Zhang, Xingxing Zhang, Ziyi Yang, Mahmoud Khademi, Hany Awadalla, Junjie Wang, et al. Chain-of-reasoning: Towards unified mathematical reasoning in large language models via a multi-paradigm perspective. arXiv preprint arXiv:2501.11110, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 148, + 506, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 148, + 506, + 182 + ], + "spans": [ + { + "bbox": [ + 111, + 148, + 506, + 182 + ], + "type": "text", + "content": "[991] Yue Yu, Zhengxing Chen, Aston Zhang, Liang Tan, Chenguang Zhu, Richard Yuanzhe Pang, Yundi Qian, Xuewei Wang, Suchin Gururangan, Chao Zhang, et al. Self-generated critiques boost reward modeling for language models. arXiv preprint arXiv:2411.16646, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 186, + 506, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 186, + 506, + 218 + ], + "spans": [ + { + "bbox": [ + 111, + 186, + 506, + 218 + ], + "type": "text", + "content": "[992] Zeping Yu, Yonatan Belinkov, and Sophia Ananiadou. Back attention: Understanding and enhancing multi-hop reasoning in large language models. arXiv preprint arXiv:2502.10835, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 224, + 505, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 224, + 505, + 257 + ], + "spans": [ + { + "bbox": [ + 111, + 224, + 505, + 257 + ], + "type": "text", + "content": "[993] Zhaojian Yu, Yilun Zhao, Arman Cohan, and Xiao-Ping Zhang. Humaneval pro and mbpp pro: Evaluating large language models on self-invoking code generation. arXiv preprint arXiv:2412.21199, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 261, + 504, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 261, + 504, + 285 + ], + "spans": [ + { + "bbox": [ + 111, + 261, + 504, + 285 + ], + "type": "text", + "content": "[994] Zhaojian Yu, Yinghao Wu, Yilun Zhao, Arman Cohan, and Xiao-Ping Zhang. Z1: Efficient test-time scaling with code. arXiv preprint arXiv:2504.00810, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 289, + 506, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 289, + 506, + 322 + ], + "spans": [ + { + "bbox": [ + 111, + 289, + 506, + 322 + ], + "type": "text", + "content": "[995] Zhouliang Yu, Yuhuan Yuan, Tim Z Xiao, Fuxiang Frank Xia, Jie Fu, Ge Zhang, Ge Lin, and Weiyang Liu. Generating symbolic world models via test-time scaling of large language models. arXiv preprint arXiv:2502.04728, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 326, + 504, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 326, + 504, + 358 + ], + "spans": [ + { + "bbox": [ + 111, + 326, + 504, + 358 + ], + "type": "text", + "content": "[996] Zhuohao Yu, Weizheng Gu, Yidong Wang, Zhengran Zeng, Jindong Wang, Wei Ye, and Shikun Zhang. Outcome-refining process supervision for code generation. arXiv preprint arXiv:2412.15118, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 364, + 506, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 364, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 111, + 364, + 506, + 407 + ], + "type": "text", + "content": "[997] Zishun Yu, Tengyu Xu, Di Jin, Karthik Abinav Sankararaman, Yun He, Wenxuan Zhou, Zhouhao Zeng, Eryk Helenowski, Chen Zhu, Sinong Wang, et al. Think smarter not harder: Adaptive reasoning with inference aware optimization. arXiv preprint arXiv:2501.17974, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 412, + 506, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 412, + 506, + 445 + ], + "spans": [ + { + "bbox": [ + 111, + 412, + 506, + 445 + ], + "type": "text", + "content": "[998] Hang Yuan, Bin Yu, Haotian Li, Shijun Yang, Christina Dan Wang, Zhou Yu, Xueyin Xu, Weizhen Qi, and Kai Chen. Not all tokens are what you need in thinking. arXiv preprint arXiv:2505.17827, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 451, + 506, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 451, + 506, + 483 + ], + "spans": [ + { + "bbox": [ + 111, + 451, + 506, + 483 + ], + "type": "text", + "content": "[999] Jiahao Yuan, Dehui Du, Hao Zhang, Zixiang Di, and Usman Naseem. Reversal of thought: Enhancing large language models with preference-guided reverse reasoning warm-up. arXiv preprint arXiv:2410.12323, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 488, + 506, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 488, + 506, + 521 + ], + "spans": [ + { + "bbox": [ + 107, + 488, + 506, + 521 + ], + "type": "text", + "content": "[1000] Lifan Yuan, Wendi Li, Huayu Chen, Ganqu Cui, Ning Ding, Kaiyan Zhang, Bowen Zhou, Zhiyuan Liu, and Hao Peng. Free process rewards without process labels. arXiv preprint arXiv:2412.01981, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 526, + 506, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 526, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 106, + 526, + 506, + 582 + ], + "type": "text", + "content": "[1001] Lifan Yuan, Ganqu Cui, Hanbin Wang, Ning Ding, Xingyao Wang, Boji Shan, Zeyuan Liu, Jia Deng, Huimin Chen, Ruobing Xie, Yankai Lin, Zhenghao Liu, Bowen Zhou, Hao Peng, Zhiyuan Liu, and Maosong Sun. Advancing LLM reasoning generalists with preference trees. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=2ea5TNVR0c." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 586, + 506, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 586, + 506, + 620 + ], + "spans": [ + { + "bbox": [ + 107, + 586, + 506, + 620 + ], + "type": "text", + "content": "[1002] Michelle Yuan, Elman Mansimov, Katerina Margatina, Anurag Pratik, Daniele Bonadiman, Monica Sunkara, Yi Zhang, Yassine Benajiba, et al. A study on leveraging search and self-feedback for agent reasoning. arXiv preprint arXiv:2502.12094, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 624, + 506, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 624, + 506, + 657 + ], + "spans": [ + { + "bbox": [ + 107, + 624, + 506, + 657 + ], + "type": "text", + "content": "[1003] Siyu Yuan, Zehui Chen, Zhiheng Xi, Junjie Ye, Zhengyin Du, and Jiecao Chen. Agentr: Training language model agents to reflect via iterative self-training. arXiv preprint arXiv:2501.11425, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 662, + 506, + 695 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 662, + 506, + 695 + ], + "spans": [ + { + "bbox": [ + 107, + 662, + 506, + 695 + ], + "type": "text", + "content": "[1004] Weizhe Yuan, Jane Yu, Song Jiang, Karthik Padthe, Yang Li, Dong Wang, Ilia Kulikov, Kyunghyun Cho, Yuandong Tian, Jason E Weston, and Xian Li. Naturalreasoning: Reasoning in the wild with 2.8m challenging questions, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 699, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 699, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 699, + 506, + 723 + ], + "type": "text", + "content": "[1005] Yige Yuan, Teng Xiao, Shuchang Tao, Xue Wang, Jinyang Gao, Bolin Ding, and Bingbing Xu. Incentivizing reasoning from weak supervision. arXiv preprint arXiv:2505.20072, 2025." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "99" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 98 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "text", + "content": "[1006] Xiang Yue, Xingwei Qu, Ge Zhang, Yao Fu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mammoth: Building math generalist models through hybrid instruction tuning. arXiv preprint arXiv:2309.05653, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 108, + 506, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 108, + 506, + 153 + ], + "spans": [ + { + "bbox": [ + 106, + 108, + 506, + 153 + ], + "type": "text", + "content": "[1007] Xiang Yue, Tianyu Zheng, Ge Zhang, and Wenhu Chen. Mammoth2: Scaling instructions from the web. Advances in Neural Information Processing Systems, 37:90629-90660, 2025. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/a4ca07aa108036f80cbb5b82285fd4b1-Paper-Conference.pdf." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 156, + 506, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 156, + 506, + 190 + ], + "spans": [ + { + "bbox": [ + 106, + 156, + 506, + 190 + ], + "type": "text", + "content": "[1008] Zhenrui Yue, Bowen Jin, Huimin Zeng, Honglei Zhuang, Zhen Qin, Jinsung Yoon, Lanyu Shang, Jiawei Han, and Dong Wang. Hybrid latent reasoning via reinforcement learning. arXiv preprint arXiv:2505.18454, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 193, + 506, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 193, + 506, + 237 + ], + "spans": [ + { + "bbox": [ + 106, + 193, + 506, + 237 + ], + "type": "text", + "content": "[1009] Mert Yuksekgonul, Federico Bianchi, Joseph Boen, Sheng Liu, Pan Lu, Zhi Huang, Carlos Guestrin, and James Zou. Optimizing generative ai by backpropagating language model feedback. Nature, 639(8055):609-616, March 2025. URL https://www.nature.com/articles/s41586-025-08661-4." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 240, + 506, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 240, + 506, + 306 + ], + "spans": [ + { + "bbox": [ + 106, + 240, + 506, + 306 + ], + "type": "text", + "content": "[1010] YuYue, Yufeng Yuan, Qiying Yu, Xiaochen Zuo, Ruofei Zhu, Wenyuan Xu, Jiaze Chen, Chengyi Wang, TianTian Fan, Zhengyin Du, Xiangpeng Wei, Gaohong Liu, Juncai Liu, Lingjun Liu, Haibin Lin, Zhiqi Lin, Bole Ma, Chi Zhang, Mofan Zhang, Wang Zhang, Hang Zhu, Ru Zhang, Xin Liu, Mingxuan Wang, Yonghui Wu, and Lin Yan. Vapo: Efficient and reliable reinforcement learning for advanced reasoning tasks. arXiv preprint arXiv:2504.05118, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 310, + 504, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 310, + 504, + 345 + ], + "spans": [ + { + "bbox": [ + 106, + 310, + 504, + 345 + ], + "type": "text", + "content": "[1011] Yuhang Zang, Xiaoyi Dong, Pan Zhang, Yuhang Cao, Ziyu Liu, Shengyuan Ding, Shenxi Wu, Yubo Ma, Haodong Duan, Wenwei Zhang, et al. Internlm-xcomposer2.5-reward: A simple yet effective multi-modal reward model. arXiv preprint arXiv:2501.12368, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 347, + 506, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 347, + 506, + 381 + ], + "spans": [ + { + "bbox": [ + 106, + 347, + 506, + 381 + ], + "type": "text", + "content": "[1012] Eric Zelikman, Yuhuai Wu, Jesse Mu, and Noah Goodman. Star: Bootstrapping reasoning with reasoning. Advances in Neural Information Processing Systems, 35:15476-15488, November 2022. URL https://openreview.net/pdf?id=3ELRdg2sqI." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 384, + 506, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 384, + 506, + 418 + ], + "spans": [ + { + "bbox": [ + 106, + 384, + 506, + 418 + ], + "type": "text", + "content": "[1013] Eric Zelikman, Georges Harik, Yijia Shao, Varuna Jayasiri, Nick Haber, and Noah D Goodman. Quiet-star: Language models can teach themselves to think before speaking. arXiv preprint arXiv:2403.09629, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 420, + 506, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 420, + 506, + 453 + ], + "spans": [ + { + "bbox": [ + 106, + 420, + 506, + 453 + ], + "type": "text", + "content": "[1014] Huaye Zeng, Dongfu Jiang, Haozhe Wang, Ping Nie, Xiaotong Chen, and Wenhu Chen. Acecoder: Acing coder rl via automated test-case synthesis. arXiv preprint arXiv:2502.01718, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 456, + 506, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 456, + 506, + 491 + ], + "spans": [ + { + "bbox": [ + 106, + 456, + 506, + 491 + ], + "type": "text", + "content": "[1015] Thomas Zeng, Shuibai Zhang, Shutong Wu, Christian Classen, Daewon Chae, Ethan Ewer, Minjae Lee, Heeju Kim, Wonjun Kang, Jackson Kunde, et al. Versaprm: Multi-domain process reward model via synthetic reasoning data. arXiv preprint arXiv:2502.06737, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 494, + 506, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 494, + 506, + 528 + ], + "spans": [ + { + "bbox": [ + 106, + 494, + 506, + 528 + ], + "type": "text", + "content": "[1016] Weihao Zeng, Yuzhen Huang, Lulu Zhao, Yijun Wang, Zifei Shan, and Junxian He. B-star: Monitoring and balancing exploration and exploitation in self-taught reasoners. arXiv preprint arXiv:2412.17256, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 530, + 506, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 530, + 506, + 564 + ], + "spans": [ + { + "bbox": [ + 106, + 530, + 506, + 564 + ], + "type": "text", + "content": "[1017] Weihao Zeng, Yuzhen Huang, Qian Liu, Wei Liu, Keqing He, Zejun Ma, and Junxian He. Simplerl-zoo: Investigating and taming zero reinforcement learning for open base models in the wild, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 567, + 504, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 567, + 504, + 602 + ], + "spans": [ + { + "bbox": [ + 106, + 567, + 504, + 602 + ], + "type": "text", + "content": "[1018] Yongcheng Zeng, Xinyu Cui, Xuanfa Jin, Guoqing Liu, Zexu Sun, Quan He, Dong Li, Ning Yang, Jianye Hao, Haifeng Zhang, et al. Aries: Stimulating self-refinement of large language models by iterative preference optimization. arXiv preprint arXiv:2502.05605, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 604, + 506, + 648 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 604, + 506, + 648 + ], + "spans": [ + { + "bbox": [ + 106, + 604, + 506, + 648 + ], + "type": "text", + "content": "[1019] Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Bo Wang, Shimin Li, Yunhua Zhou, Qipeng Guo, Xuanjing Huang, and Xipeng Qiu. Scaling of search and learning: A roadmap to reproduce o1 from reinforcement learning perspective. arXiv preprint arXiv:2412.14135, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 651, + 506, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 651, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 106, + 651, + 506, + 685 + ], + "type": "text", + "content": "[1020] Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Yunhua Zhou, and Xipeng Qiu. Revisiting the test-time scaling of o1-like models: Do they truly possess test-time scaling capabilities? arXiv preprint arXiv:2502.12215, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 688, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 688, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 688, + 506, + 723 + ], + "type": "text", + "content": "[1021] Zhongshen Zeng, Yinhong Liu, Yingjia Wan, Jingyao Li, Pengguang Chen, Jianbo Dai, Yuxuan Yao, Rongwu Xu, Zehan Qi, Wanru Zhao, Linling Shen, Jianqiao Lu, Haochen Tan, Yukang Chen, Hao Zhang, Zhan Shi, Bailin Wang, Zhijiang Guo, and Jiaya Jia. MR-ben:" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 740, + 313, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 740, + 313, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 740, + 313, + 750 + ], + "type": "text", + "content": "100" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 99 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 136, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 136, + 72, + 505, + 106 + ], + "type": "text", + "content": "A meta-reasoning benchmark for evaluating system-2 thinking in LLMs. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, June 2024. URL https://openreview.net/forum?id=GN2qbxZ1ni." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 110, + 505, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 110, + 505, + 133 + ], + "spans": [ + { + "bbox": [ + 105, + 110, + 505, + 133 + ], + "type": "text", + "content": "[1022] Zihao Zeng, Xuyao Huang, Boxiu Li, and Zhijie Deng. Sift: Grounding llm reasoning in contexts via stickers. arXiv preprint arXiv:2502.14922, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 136, + 506, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 136, + 506, + 192 + ], + "spans": [ + { + "bbox": [ + 106, + 136, + 506, + 192 + ], + "type": "text", + "content": "[1023] Yuexiang Zhai, Hao Bai, Zipeng Lin, Jiayi Pan, Shengbang Tong, Yifei Zhou, Alane Suhr, Saining Xie, Yann LeCun, Yi Ma, and Sergey Levine. Fine-tuning large vision-language models as decision-making agents via reinforcement learning. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=nBjmMF2IZU." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 194, + 504, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 194, + 504, + 229 + ], + "spans": [ + { + "bbox": [ + 107, + 194, + 504, + 229 + ], + "type": "text", + "content": "[1024] Zaifu Zhan, Shuang Zhou, Huixue Zhou, Jiawen Deng, Yu Hou, Jeremy Yeung, and Rui Zhang. An evaluation of deepseek models in biomedical natural language processing. arXiv preprint arXiv:2503.00624, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 232, + 505, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 232, + 505, + 267 + ], + "spans": [ + { + "bbox": [ + 107, + 232, + 505, + 267 + ], + "type": "text", + "content": "[1025] Alexander Zhang, Marcus Dong, Jiaheng Liu, Wei Zhang, Yejie Wang, Jian Yang, Ge Zhang, Tianyu Liu, Zhongyuan Peng, Yingshui Tan, et al. Codecriticbench: A holistic code critique benchmark for large language models. arXiv preprint arXiv:2502.16614, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 270, + 506, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 270, + 506, + 305 + ], + "spans": [ + { + "bbox": [ + 107, + 270, + 506, + 305 + ], + "type": "text", + "content": "[1026] Beichen Zhang, Yuhong Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Haodong Duan, Yuhang Cao, Dahua Lin, and Jiaqi Wang. Booststep: Boosting mathematical capability of large language models via improved single-step reasoning. arXiv preprint arXiv:2501.03226, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 307, + 506, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 307, + 506, + 342 + ], + "spans": [ + { + "bbox": [ + 107, + 307, + 506, + 342 + ], + "type": "text", + "content": "[1027] Bohan Zhang, Xiaokang Zhang, Jing Zhang, Jifan Yu, Sijia Luo, and Jie Tang. Cot-based synthesizer: Enhancing llm performance through answer synthesis. arXiv preprint arXiv:2501.01668, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 344, + 504, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 344, + 504, + 379 + ], + "spans": [ + { + "bbox": [ + 107, + 344, + 504, + 379 + ], + "type": "text", + "content": "[1028] Che Zhang, Zhenyang Xiao, Chengcheng Han, Yixin Lian, and Yuejian Fang. Learning to check: Unleashing potentials for self-correction in large language models. arXiv preprint arXiv:2402.13035, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 381, + 506, + 416 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 381, + 506, + 416 + ], + "spans": [ + { + "bbox": [ + 107, + 381, + 506, + 416 + ], + "type": "text", + "content": "[1029] Chi Zhang, Jiajun Song, Siyu Li, Yitao Liang, Yuxi Ma, Wei Wang, Yixin Zhu, and Song-Chun Zhu. Proposing and solving olympiad geometry with guided tree search. arXiv preprint arXiv:2412.10673, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 419, + 506, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 419, + 506, + 475 + ], + "spans": [ + { + "bbox": [ + 107, + 419, + 506, + 475 + ], + "type": "text", + "content": "[1030] Chunhui Zhang, Zhongyu Ouyang, Kwonjoon Lee, Nakul Agarwal, Sean Dae Houlihan, Soroush Vosoughi, and Shao-Yuan Lo. Overcoming multi-step complexity in multimodal theory-of-mind reasoning: A scalable bayesian planner. In *Forty-second International Conference on Machine Learning*, 2025. URL https://openreview.net/forum?id=2dz6psiiA0." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 479, + 504, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 479, + 504, + 514 + ], + "spans": [ + { + "bbox": [ + 107, + 479, + 504, + 514 + ], + "type": "text", + "content": "[1031] Dalong Zhang, Jun Xu, Jun Zhou, Lei Liang, Lin Yuan, Ling Zhong, Mengshu Sun, Peilong Zhao, QiWei Wang, Xiaorui Wang, et al. Kag-thinker: Teaching large language models to think with human-like reasoning process. arXiv preprint arXiv:2506.17728, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 516, + 506, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 516, + 506, + 562 + ], + "spans": [ + { + "bbox": [ + 107, + 516, + 506, + 562 + ], + "type": "text", + "content": "[1032] Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. ReST-MCTS*: LLM self-training via process reward guided tree search. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. URL https://openreview.net/forum?id=8rcFOqEud5." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 564, + 506, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 564, + 506, + 599 + ], + "spans": [ + { + "bbox": [ + 107, + 564, + 506, + 599 + ], + "type": "text", + "content": "[1033] Di Zhang, Xiaoshui Huang, Dongzhan Zhou, Yuqiang Li, and Wanli Ouyang. Accessing gpt-4 level mathematical olympiad solutions via monte carlo tree self-refine with llama-3 8b. arXiv preprint arXiv:2406.07394, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 601, + 504, + 637 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 601, + 504, + 637 + ], + "spans": [ + { + "bbox": [ + 107, + 601, + 504, + 637 + ], + "type": "text", + "content": "[1034] Di Zhang, Jianbo Wu, Jingdi Lei, Tong Che, Jiatong Li, Tong Xie, Xiaoshui Huang, Shufei Zhang, Marco Pavone, Yuqiang Li, et al. Llama-berry: Pairwise optimization for o1-like olympiad-level mathematical reasoning. arXiv preprint arXiv:2410.02884, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 639, + 506, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 639, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 107, + 639, + 506, + 685 + ], + "type": "text", + "content": "[1035] Fengji Zhang, Linquan Wu, Huiyu Bai, Guancheng Lin, Xiao Li, Xiao Yu, Yue Wang, Bei Chen, and Jacky Keung. Humaneval-v: Evaluating visual understanding and reasoning abilities of large multimodal models through coding tasks. arXiv preprint arXiv:2410.12381, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 687, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 687, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 687, + 504, + 723 + ], + "type": "text", + "content": "[1036] Hanning Zhang, Pengcheng Wang, Shizhe Diao, Yong Lin, Rui Pan, Hanze Dong, Dylan Zhang, Pavlo Molchanov, and Tong Zhang. Entropy-regularized process reward model. arXiv preprint arXiv:2412.11006, 2024." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 312, + 750 + ], + "type": "text", + "content": "101" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 100 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "text", + "content": "[1037] Haoyue Zhang, Hualei Zhang, Xiaosong Ma, Jie Zhang, and Song Guo. Lazyeviction: Lagged kv eviction with attention pattern observation for efficient long reasoning. arXiv preprint arXiv:2506.15969, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 108, + 505, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 108, + 505, + 144 + ], + "spans": [ + { + "bbox": [ + 107, + 108, + 505, + 144 + ], + "type": "text", + "content": "[1038] Hongbo Zhang, Han Cui, Guangsheng Bao, Linyi Yang, Jun Wang, and Yue Zhang. Direct value optimization: Improving chain-of-thought reasoning in llms with refined values. arXiv preprint arXiv:2502.13723, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 145, + 506, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 145, + 506, + 179 + ], + "spans": [ + { + "bbox": [ + 106, + 145, + 506, + 179 + ], + "type": "text", + "content": "[1039] Jiayi Zhang, Jinyu Xiang, Zhaoyang Yu, Fengwei Teng, Xionghui Chen, Jiaqi Chen, Mingchen Zhuge, Xin Cheng, Sirui Hong, Jinlin Wang, et al. Aflow: Automating agentic workflow generation. arXiv preprint arXiv:2410.10762, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 181, + 506, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 181, + 506, + 216 + ], + "spans": [ + { + "bbox": [ + 106, + 181, + 506, + 216 + ], + "type": "text", + "content": "[1040] Jinghan Zhang, Xiting Wang, Fengran Mo, Yeyang Zhou, Wanfu Gao, and Kunpeng Liu. Entropy-based exploration conduction for multi-step reasoning. arXiv preprint arXiv:2503.15848, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 217, + 506, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 217, + 506, + 252 + ], + "spans": [ + { + "bbox": [ + 107, + 217, + 506, + 252 + ], + "type": "text", + "content": "[1041] Jintian Zhang, Yuqi Zhu, Mengshu Sun, Yujie Luo, Shuofei Qiao, Lun Du, Da Zheng, Huajun Chen, and Ningyu Zhang. Lighthinker: Thinking step-by-step compression. arXiv preprint arXiv:2502.15589, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 254, + 506, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 254, + 506, + 289 + ], + "spans": [ + { + "bbox": [ + 107, + 254, + 506, + 289 + ], + "type": "text", + "content": "[1042] Kaiyi Zhang, Ang Lv, Jinpeng Li, Yongbo Wang, Feng Wang, Haoyuan Hu, and Rui Yan. Stephint: Multi-level stepwise hints enhance reinforcement learning to reason. arXiv preprint arXiv:2507.02841, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 290, + 506, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 290, + 506, + 324 + ], + "spans": [ + { + "bbox": [ + 107, + 290, + 506, + 324 + ], + "type": "text", + "content": "[1043] Kechi Zhang, Ge Li, Jia Li, Yihong Dong, and Zhi Jin. Focused-dpo: Enhancing code generation through focused preference optimization on error-prone points. arXiv preprint arXiv:2502.11475, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 327, + 506, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 327, + 506, + 361 + ], + "spans": [ + { + "bbox": [ + 107, + 327, + 506, + 361 + ], + "type": "text", + "content": "[1044] Kechi Zhang, Ge Li, Jia Li, Huangzhao Zhang, Jingjing Xu, Hao Zhu, Lecheng Wang, Yihong Dong, Jing Mai, Bin Gu, et al. Computational thinking reasoning in large language models. arXiv preprint arXiv:2506.02658, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 363, + 506, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 363, + 506, + 388 + ], + "spans": [ + { + "bbox": [ + 107, + 363, + 506, + 388 + ], + "type": "text", + "content": "[1045] Kexun Zhang, Shang Zhou, Danqing Wang, William Yang Wang, and Lei Li. Scaling llm inference with optimized sample compute allocation. arXiv preprint arXiv:2410.22480, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 389, + 506, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 389, + 506, + 423 + ], + "spans": [ + { + "bbox": [ + 107, + 389, + 506, + 423 + ], + "type": "text", + "content": "[1046] Kongcheng Zhang, Qi Yao, Baisheng Lai, Jiaxing Huang, Wenkai Fang, Dacheng Tao, Mingli Song, and Shunyu Liu. Reasoning with reinforced functional token tuning. arXiv preprint arXiv:2502.13389, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 425, + 506, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 425, + 506, + 460 + ], + "spans": [ + { + "bbox": [ + 107, + 425, + 506, + 460 + ], + "type": "text", + "content": "[1047] Kongcheng Zhang, Qi Yao, Shunyu Liu, Yingjie Wang, Baisheng Lai, Jieping Ye, Mingli Song, and Dacheng Tao. Consistent paths lead to truth: Self-rewarding reinforcement learning for lIm reasoning. arXiv preprint arXiv:2506.08745, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 462, + 506, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 462, + 506, + 496 + ], + "spans": [ + { + "bbox": [ + 107, + 462, + 506, + 496 + ], + "type": "text", + "content": "[1048] Lunjun Zhang, Arian Hosseini, Hritik Bansal, Mehran Kazemi, Aviral Kumar, and Rishabh Agarwal. Generative verifiers: Reward modeling as next-token prediction. arXiv preprint arXiv:2408.15240, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 498, + 506, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 498, + 506, + 544 + ], + "spans": [ + { + "bbox": [ + 107, + 498, + 506, + 544 + ], + "type": "text", + "content": "[1049] Ming Zhang, Yu jiong Shen, Zelin Li, Huayu Sha, Binze Hu, Yuhui Wang, Chenhao Huang, Shichun Liu, Jingqi Tong, Changhao Jiang, et al. Llmeval-med: A real-world clinical benchmark for medical llms with physician validation. arXiv preprint arXiv:2506.04078, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 545, + 506, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 545, + 506, + 612 + ], + "spans": [ + { + "bbox": [ + 107, + 545, + 506, + 612 + ], + "type": "text", + "content": "[1050] Ming-Liang Zhang, Fei yin, and Cheng-Lin Liu. A multi-modal neural geometric solver with textual clauses parsed from diagram. In Edith Elkind, editor, Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence, IJCAI-23, pages 3374-3382. International Joint Conferences on Artificial Intelligence Organization, 8 2023. doi: 10.24963/ijcai.2023/376. URL https://doi.org/10.24963/ijcai.2023/376. Main Track." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 614, + 506, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 614, + 506, + 649 + ], + "spans": [ + { + "bbox": [ + 107, + 614, + 506, + 649 + ], + "type": "text", + "content": "[1051] Qingjie Zhang, Han Qiu, Di Wang, Haoting Qian, Yiming Li, Tianwei Zhang, and Minlie Huang. Understanding the dark side of llms' intrinsic self-correction. arXiv preprint arXiv:2412.14959, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 651, + 506, + 687 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 651, + 506, + 687 + ], + "spans": [ + { + "bbox": [ + 107, + 651, + 506, + 687 + ], + "type": "text", + "content": "[1052] Qiyuan Zhang, Fuyuan Lyu, Zexu Sun, Lei Wang, Weixu Zhang, Zhihan Guo, Yufei Wang, Irwin King, Xue Liu, and Chen Ma. What, how, where, and how well? a survey on test-time scaling in large language models. arXiv preprint arXiv:2503.24235, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 689, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 689, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 689, + 506, + 723 + ], + "type": "text", + "content": "[1053] Qiyuan Zhang, Fuyuan Lyu, Zexu Sun, Lei Wang, Weixu Zhang, Wenyue Hua, Haolun Wu, Zhihan Guo, Yufei Wang, Niklas Muennighoff, et al. A survey on test-time scaling in large language models: What, how, where, and how well? arXiv preprint arXiv:2503.24235, 2025." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 313, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 313, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 313, + 750 + ], + "type": "text", + "content": "102" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 101 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 127 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 127 + ], + "type": "text", + "content": "[1054] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Yu Qiao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? In European Conference on Computer Vision, pages 169-186. Springer, October 2024. URL https://link.springer.com/chapter/10.1007/978-3-031-73242-3_10." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 130, + 506, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 130, + 506, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 130, + 506, + 198 + ], + "type": "text", + "content": "[1055] Shaowei Zhang and Deyi Xiong. BackMATH: Towards backward reasoning for solving math problems step by step. In Owen Rambow, Leo Wanner, Marianna Apidianaki, Hend Al-Khalifa, Barbara Di Eugenio, Steven Schockaert, Kareem Darwish, and Apoorv Agarwal, editors, Proceedings of the 31st International Conference on Computational Linguistics: Industry Track, pages 466-482, Abu Dhabi, UAE, January 2025. Association for Computational Linguistics. URL https://aclanthology.org/2025.coling-industry.40/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 200, + 504, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 200, + 504, + 233 + ], + "spans": [ + { + "bbox": [ + 106, + 200, + 504, + 233 + ], + "type": "text", + "content": "[1056] Shenao Zhang, Yaqing Wang, Yinxiao Liu, Tianqi Liu, Peter Grabowski, Eugene Ie, Zhaoran Wang, and Yunxuan Li. Beyond markovian: Reflective exploration via bayes-adaptive rl for llm reasoning. arXiv preprint arXiv:2505.20561, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 236, + 504, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 236, + 504, + 270 + ], + "spans": [ + { + "bbox": [ + 106, + 236, + 504, + 270 + ], + "type": "text", + "content": "[1057] Shengjia Zhang, Junjie Wu, Jiawei Chen, Changwang Zhang, Xingyu Lou, Wangchunshu Zhou, Sheng Zhou, Can Wang, and Jun Wang. Othink-r1: Intrinsic fast/slow thinking mode switching for over-reasoning mitigation. arXiv preprint arXiv:2506.02397, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 273, + 506, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 273, + 506, + 306 + ], + "spans": [ + { + "bbox": [ + 106, + 273, + 506, + 306 + ], + "type": "text", + "content": "[1058] Shengyu Zhang, Linfeng Dong, Xiaoya Li, Sen Zhang, Xiaofei Sun, Shuhe Wang, Jiwei Li, Runyi Hu, Tianwei Zhang, Fei Wu, et al. Instruction tuning for large language models: A survey. arXiv preprint arXiv:2308.10792, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 309, + 506, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 309, + 506, + 342 + ], + "spans": [ + { + "bbox": [ + 106, + 309, + 506, + 342 + ], + "type": "text", + "content": "[1059] Shimao Zhang, Xiao Liu, Xin Zhang, Junxiao Liu, Zheheng Luo, Shujian Huang, and Yeyun Gong. Process-based self-rewarding language models. arXiv preprint arXiv:2503.03746, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 346, + 506, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 346, + 506, + 389 + ], + "spans": [ + { + "bbox": [ + 106, + 346, + 506, + 389 + ], + "type": "text", + "content": "[1060] Weizhi Zhang, Yangning Li, Yuanchen Bei, Junyu Luo, Guancheng Wan, Liangwei Yang, Chenxuan Xie, Yuyao Yang, Wei-Chieh Huang, Chunyu Miao, et al. From web search towards agentic deep research: Incentivizing search with reasoning agents. arXiv preprint arXiv:2506.18959, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 393, + 506, + 427 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 393, + 506, + 427 + ], + "spans": [ + { + "bbox": [ + 106, + 393, + 506, + 427 + ], + "type": "text", + "content": "[1061] Wenjing Zhang, Xuejiao Lei, Zhaoxiang Liu, Ning Wang, Zhenhong Long, Peijun Yang, Jiaojiao Zhao, Minjie Hua, Chaoyang Ma, Kai Wang, et al. Safety evaluation of deepseek models in Chinese contexts. arXiv preprint arXiv:2502.11137, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 430, + 506, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 430, + 506, + 506 + ], + "spans": [ + { + "bbox": [ + 106, + 430, + 506, + 506 + ], + "type": "text", + "content": "[1062] Wenqi Zhang, Yongliang Shen, Linjuan Wu, Qiuying Peng, Jun Wang, Yueting Zhuang, and Weiming Lu. Self-contrast: Better reflection through inconsistent solving perspectives. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 3602–3622, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.197. URL https://aclanthology.org/2024.acl-long.197/." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 510, + 504, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 510, + 504, + 544 + ], + "spans": [ + { + "bbox": [ + 106, + 510, + 504, + 544 + ], + "type": "text", + "content": "[1063] Xiaoyun Zhang, Jingqing Ruan, Xing Ma, Yawen Zhu, Haodong Zhao, Hao Li, Jiansong Chen, Ke Zeng, and Xunliang Cai. When to continue thinking: Adaptive thinking mode switching for efficient reasoning. arXiv preprint arXiv:2505.15400, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 546, + 506, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 546, + 506, + 580 + ], + "spans": [ + { + "bbox": [ + 106, + 546, + 506, + 580 + ], + "type": "text", + "content": "[1064] Xinyu Zhang, Yuxuan Dong, Yanrui Wu, Jiaxing Huang, Chengyou Jia, Basura Fernando, Mike Zheng Shou, Lingling Zhang, and Jun Liu. Physreason: A comprehensive benchmark towards physics-based reasoning. arXiv preprint arXiv:2502.12054, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 583, + 506, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 583, + 506, + 660 + ], + "spans": [ + { + "bbox": [ + 106, + 583, + 506, + 660 + ], + "type": "text", + "content": "[1065] Xuan Zhang, Chao Du, Tianyu Pang, Qian Liu, Wei Gao, and Min Lin. Chain of preference optimization: Improving chain-of-thought reasoning in llms. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, and C. Zhang, editors, Advances in Neural Information Processing Systems, volume 37, pages 333-356. Curran Associates, Inc., September 2024. URL https://proceedings.neurips.cc/paper_files/paper/2024/file/00d80722b756de0166523a87805dd00f-Paper-Conference.pdf." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 663, + 506, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 663, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 106, + 663, + 506, + 696 + ], + "type": "text", + "content": "[1066] Xuanliang Zhang, Dingzirui Wang, Keyan Xu, Qingfu Zhu, and Wanxiang Che. Rot: Enhancing table reasoning with iterative row-wise traversals. arXiv preprint arXiv:2505.15110, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 700, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 700, + 504, + 723 + ], + "type": "text", + "content": "[1067] Yifan Zhang, Yang Yuan, and Andrew Chi-Chih Yao. On the diagram of thought. arXiv preprint arXiv:2409.10038, 2024." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 313, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 313, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 313, + 750 + ], + "type": "text", + "content": "103" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 102 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 105, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 505, + 106 + ], + "type": "text", + "content": "[1068] Yifan Zhang, Wenyu Du, Dongming Jin, Jie Fu, and Zhi Jin. Finite state automata inside transformers with chain-of-thought: A mechanistic study on state tracking. arXiv preprint arXiv:2502.20129, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 110, + 506, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 110, + 506, + 153 + ], + "spans": [ + { + "bbox": [ + 105, + 110, + 506, + 153 + ], + "type": "text", + "content": "[1069] Yong Zhang, Bingyuan Zhang, Zhitao Li, Ming Li, Ning Cheng, Minchuan Chen, Tao Wei, Jun Ma, Shaojun Wang, and Jing Xiao. Self-enhanced reasoning training: Activating latent reasoning in small models for enhanced reasoning distillation. arXiv preprint arXiv:2502.12744, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 157, + 506, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 157, + 506, + 224 + ], + "spans": [ + { + "bbox": [ + 105, + 157, + 506, + 224 + ], + "type": "text", + "content": "[1070] Yongheng Zhang, Qiguang Chen, Min Li, Wanxiang Che, and Libo Qin. AutoCAP: Towards automatic cross-lingual alignment planning for zero-shot chain-of-thought. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 9191–9200, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.546. URL https://aclanthology.org/2024-findings-acl.546/." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 228, + 506, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 228, + 506, + 306 + ], + "spans": [ + { + "bbox": [ + 105, + 228, + 506, + 306 + ], + "type": "text", + "content": "[1071] Yongheng Zhang, Qiguang Chen, Jingxuan Zhou, Peng Wang, Jiasheng Si, Jin Wang, Wenpeng Lu, and Libo Qin. Wrong-of-thought: An integrated reasoning framework with multi-perspective verification and wrong information. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Findings of the Association for Computational Linguistics: EMNLP 2024, pages 6644-6653, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.388. URL https://aclanthology.org/2024-findings-emnlp.388/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 308, + 504, + 344 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 308, + 504, + 344 + ], + "spans": [ + { + "bbox": [ + 105, + 308, + 504, + 344 + ], + "type": "text", + "content": "[1072] Yongheng Zhang, Xu Liu, Ruihan Tao, Qiguang Chen, Hao Fei, Wanxiang Che, and Libo Qin. Vitcot: Video-text interleaved chain-of-thought for boosting video understanding in large language models. arXiv preprint arXiv:2507.09876, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 346, + 506, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 346, + 506, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 346, + 506, + 380 + ], + "type": "text", + "content": "[1073] Yongheng Zhang, Xu Liu, Ruoxi Zhou, Qiguang Chen, Hao Fei, Wenpeng Lu, and Libo Qin. Cchall: A novel benchmark for joint cross-lingual and cross-modal hallucinations detection in large language models. arXiv preprint arXiv:2505.19108, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 383, + 506, + 427 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 383, + 506, + 427 + ], + "spans": [ + { + "bbox": [ + 105, + 383, + 506, + 427 + ], + "type": "text", + "content": "[1074] Yudi Zhang, Lu Wang, Meng Fang, Yali Du, Chenghua Huang, Jun Wang, Qingwei Lin, Mykola Pechenizkiy, Dongmei Zhang, Saravan Rajmohan, et al. Distill not only data but also rewards: Can smaller language models surpass larger ones? arXiv preprint arXiv:2502.19557, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 432, + 506, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 432, + 506, + 498 + ], + "spans": [ + { + "bbox": [ + 105, + 432, + 506, + 498 + ], + "type": "text", + "content": "[1075] Yunxiang Zhang, Muhammad Khalifa, Lajanugen Logeswaran, Jaekyeom Kim, Moontae Lee, Honglak Lee, and Lu Wang. Small language models need strong verifiers to self-correct reasoning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 15637–15653, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.924. URL https://aclanthology.org/2024 findings-acl.924/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 502, + 504, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 502, + 504, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 504, + 525 + ], + "type": "text", + "content": "[1076] Yuxiang Zhang, Shangxi Wu, Yuqi Yang, Jiangming Shu, Jinlin Xiao, Chao Kong, and Jitao Sang. o1-coder: an o1 replication for coding. arXiv preprint arXiv:2412.00154, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 528, + 506, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 528, + 506, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 506, + 563 + ], + "type": "text", + "content": "[1077] Yuxiang Zhang, Yuqi Yang, Jiangming Shu, Yuhang Wang, Jinlin Xiao, and Jitao Sang. Openrft: Adapting reasoning foundation model for domain-specific tasks with reinforcement fine-tuning. arXiv preprint arXiv:2412.16849, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 566, + 504, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 566, + 504, + 600 + ], + "spans": [ + { + "bbox": [ + 105, + 566, + 504, + 600 + ], + "type": "text", + "content": "[1078] Zhenru Zhang, Chujie Zheng, Yangzhen Wu, Beichen Zhang, Runji Lin, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. The lessons of developing process reward models in mathematical reasoning. arXiv preprint arXiv:2501.07301, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 603, + 506, + 637 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 603, + 506, + 637 + ], + "spans": [ + { + "bbox": [ + 105, + 603, + 506, + 637 + ], + "type": "text", + "content": "[1079] Zhihao Zhang, Qiaole Dong, Qi Zhang, Jun Zhao, Enyu Zhou, Zhiheng Xi, Senjie Jin, Xiaoran Fan, Yuhao Zhou, Yanwei Fu, et al. Reinforcement fine-tuning enables mllms learning novel tasks stably. arXiv preprint arXiv:2506.23508, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 640, + 506, + 674 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 640, + 506, + 674 + ], + "spans": [ + { + "bbox": [ + 105, + 640, + 506, + 674 + ], + "type": "text", + "content": "[1080] Zhongwang Zhang, Pengxiao Lin, Zhiwei Wang, Yaoyu Zhang, and Zhi-Qin John Xu. Complexity control facilitates reasoning-based compositional generalization in transformers. arXiv preprint arXiv:2501.08537, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 677, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 506, + 723 + ], + "type": "text", + "content": "[1081] Zhuosheng Zhang, Aston Zhang, Mu Li, hai zhao, George Karypis, and Alex Smola. Multi-modal chain-of-thought reasoning in language models. Transactions on Machine Learning Research, June 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=y1pPWFVfvR." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 313, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 313, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 313, + 750 + ], + "type": "text", + "content": "104" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 103 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 507, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "text", + "content": "[1082] Deji Zhao, Donghong Han, Jia Wu, Zhongjiang He, Bo Ning, Ye Yuan, Yongxiang Li, Chao Wang, and Shuangyong Song. Enhancing math reasoning ability of large language models via computation logic graphs. Knowledge-Based Systems, page 113905, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 108, + 504, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 108, + 504, + 133 + ], + "spans": [ + { + "bbox": [ + 107, + 108, + 504, + 133 + ], + "type": "text", + "content": "[1083] Eric Zhao, Pranjal Awasthi, and Sreenivas Gollapudi. Sample, scrutinize and scale: Effective inference-time search by scaling verification. arXiv preprint arXiv:2502.01839, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 134, + 504, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 134, + 504, + 169 + ], + "spans": [ + { + "bbox": [ + 106, + 134, + 504, + 169 + ], + "type": "text", + "content": "[1084] Han Zhao, Haotian Wang, Yiping Peng, Sitong Zhao, Xiaoyu Tian, Shuaiying Chen, Yunjie Ji, and Xiangang Li. 1.4 million open-source distilled reasoning dataset to empower large language model training. arXiv preprint arXiv:2503.19633, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 171, + 507, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 171, + 507, + 249 + ], + "spans": [ + { + "bbox": [ + 106, + 171, + 507, + 249 + ], + "type": "text", + "content": "[1085] Jun Zhao, Jingqi Tong, Yurong Mou, Ming Zhang, Qi Zhang, and Xuanjing Huang. Exploring the compositional deficiency of large language models in mathematical reasoning through trap problems. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen, editors, Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 16361-16376, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.915. URL https://aclanthology.org/2024.emnlp-main.915/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 251, + 506, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 251, + 506, + 297 + ], + "spans": [ + { + "bbox": [ + 106, + 251, + 506, + 297 + ], + "type": "text", + "content": "[1086] Lili Zhao, Yang Wang, Qi Liu, Mengyun Wang, Wei Chen, Zhichao Sheng, and Shijin Wang. Evaluating large language models through role-guide and self-reflection: A comparative study. In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=E36NHwe7Zc." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 298, + 506, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 298, + 506, + 334 + ], + "spans": [ + { + "bbox": [ + 107, + 298, + 506, + 334 + ], + "type": "text", + "content": "[1087] Shangziqi Zhao, Jiahao Yuan, Guisong Yang, and Usman Naseem. Can pruning improve reasoning? revisiting long-cot compression with capability in mind for better reasoning. arXiv preprint arXiv:2505.14582, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 335, + 506, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 335, + 506, + 380 + ], + "spans": [ + { + "bbox": [ + 107, + 335, + 506, + 380 + ], + "type": "text", + "content": "[1088] Weixiang Zhao, Jiahe Guo, Yang Deng, Xingyu Sui, Yulin Hu, Yanyan Zhao, Wanxiang Che, Bing Qin, Tat-Seng Chua, and Ting Liu. Exploring and exploiting the inherent efficiency within large reasoning models for self-guided efficiency enhancement. arXiv preprint arXiv:2506.15647, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 382, + 504, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 382, + 504, + 407 + ], + "spans": [ + { + "bbox": [ + 107, + 382, + 504, + 407 + ], + "type": "text", + "content": "[1089] Xuandong Zhao, Zhewei Kang, Aosong Feng, Sergey Levine, and Dawn Song. Learning to reason without external rewards. arXiv preprint arXiv:2505.19590, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 409, + 506, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 409, + 506, + 443 + ], + "spans": [ + { + "bbox": [ + 107, + 409, + 506, + 443 + ], + "type": "text", + "content": "[1090] Xueliang Zhao, Wei Wu, Jian Guan, and Lingpeng Kong. Promptcot: Synthesizing olympiad-level problems for mathematical reasoning in large language models. arXiv preprint arXiv:2503.02324, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 445, + 506, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 445, + 506, + 524 + ], + "spans": [ + { + "bbox": [ + 107, + 445, + 506, + 524 + ], + "type": "text", + "content": "[1091] Xufeng Zhao, Mengdi Li, Wenhao Lu, Cornelius Weber, Jae Hee Lee, Kun Chu, and Stefan Wermter. Enhancing zero-shot chain-of-thought reasoning in large language models through logic. In Nicoletta Calzolari, Min-Yen Kan, Veronique Hoste, Alessandro Lenci, Sakriani Sakti, and Nianwen Xue, editors, Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 6144-6166, Torino, Italia, May 2024. ELRA and ICCL. URL https://aclanthology.org/2024.lrec-main.543/." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 526, + 504, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 526, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 107, + 526, + 504, + 550 + ], + "type": "text", + "content": "[1092] Yachao Zhao, Bo Wang, and Yan Wang. Explicit vs. implicit: Investigating social bias in large language models through self-reflection. arXiv preprint arXiv:2501.02295, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 552, + 506, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 552, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 107, + 552, + 506, + 586 + ], + "type": "text", + "content": "[1093] Yang Zhao, Kai Xiong, Xiao Ding, Li Du, Zhouhao Sun, Jiannan Guan, Wenbin Zhang, Bin Liu, Dong Hu, Bing Qin, et al. Ufo-rl: Uncertainty-focused optimization for efficient reinforcement learning data selection. arXiv preprint arXiv:2505.12457, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 589, + 506, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 589, + 506, + 622 + ], + "spans": [ + { + "bbox": [ + 107, + 589, + 506, + 622 + ], + "type": "text", + "content": "[1094] Yichong Zhao and Susumu Goto. Can frontier llms replace annotators in biomedical text mining? analyzing challenges and exploring solutions. arXiv preprint arXiv:2503.03261, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 625, + 506, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 625, + 506, + 660 + ], + "spans": [ + { + "bbox": [ + 107, + 625, + 506, + 660 + ], + "type": "text", + "content": "[1095] Yu Zhao, Huifeng Yin, Bo Zeng, Hao Wang, Tianqi Shi, Chenyang Lyu, Longyue Wang, Weihua Luo, and Kaifu Zhang. Marco-o1: Towards open reasoning models for open-ended solutions. arXiv preprint arXiv:2411.14405, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 662, + 504, + 687 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 662, + 504, + 687 + ], + "spans": [ + { + "bbox": [ + 107, + 662, + 504, + 687 + ], + "type": "text", + "content": "[1096] Yurui Zhao, Xiang Wang, Jiahong Liu, Irwin King, and Zhitao Huang. Towards geometry problem solving in the large model era: A survey. arXiv preprint arXiv:2506.02690, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 689, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 689, + 504, + 723 + ], + "type": "text", + "content": "[1097] Zhonghan Zhao, Wenwei Zhang, Haian Huang, Kuikun Liu, Jianfei Gao, Gaoang Wang, and Kai Chen. Rig: Synergizing reasoning and imagination in end-to-end generalist policy. arXiv preprint arXiv:2503.24388, 2025." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 740, + 313, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 740, + 313, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 740, + 313, + 750 + ], + "type": "text", + "content": "105" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 104 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "text", + "content": "[1098] Zilong Zhao, Yao Rong, Dongyang Guo, Emek Gözlüklü, Emir Gülboy, and Enkelejda Kasneci. Stepwise self-consistent mathematical reasoning with large language models. arXiv preprint arXiv:2402.17786, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 110, + 506, + 155 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 110, + 506, + 155 + ], + "spans": [ + { + "bbox": [ + 106, + 110, + 506, + 155 + ], + "type": "text", + "content": "[1099] Zirui Zhao, Wee Sun Lee, and David Hsu. Large language models as commonsense knowledge for large-scale task planning. Advances in Neural Information Processing Systems, 36:31967-31987, December 2023. URL https://openreview.net/pdf?id=ted747HURfX." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 159, + 506, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 159, + 506, + 194 + ], + "spans": [ + { + "bbox": [ + 107, + 159, + 506, + 194 + ], + "type": "text", + "content": "[1100] Bowen Zheng, Xiaolei Wang, Enze Liu, Xi Wang, Lu Hongyu, Yu Chen, Wayne Xin Zhao, and Ji-Rong Wen. Deeprec: Towards a deep dive into the item space with large language model based recommendation. arXiv preprint arXiv:2505.16810, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 198, + 506, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 198, + 506, + 232 + ], + "spans": [ + { + "bbox": [ + 107, + 198, + 506, + 232 + ], + "type": "text", + "content": "[1101] Chuanyang Zheng, Zhengying Liu, Enze Xie, Zhenguo Li, and Yu Li. Progressive-hint prompting improves reasoning in large language models. In AI for Math Workshop @ ICML 2024, June 2024. URL https://openreview.net/forum?id=UkFEs3ciz8." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 235, + 506, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 235, + 506, + 270 + ], + "spans": [ + { + "bbox": [ + 107, + 235, + 506, + 270 + ], + "type": "text", + "content": "[1102] Chujie Zheng, Zhenru Zhang, Beichen Zhang, Runji Lin, Keming Lu, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. Processbench: Identifying process errors in mathematical reasoning. arXiv preprint arXiv:2412.06559, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 274, + 506, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 274, + 506, + 308 + ], + "spans": [ + { + "bbox": [ + 107, + 274, + 506, + 308 + ], + "type": "text", + "content": "[1103] Da Zheng, Lun Du, Junwei Su, Yuchen Tian, Yuqi Zhu, Jintian Zhang, Lanning Wei, Ningyu Zhang, and Huajun Chen. Knowledge augmented complex problem solving with large language models: A survey. arXiv preprint arXiv:2505.03418, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 312, + 506, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 312, + 506, + 346 + ], + "spans": [ + { + "bbox": [ + 107, + 312, + 506, + 346 + ], + "type": "text", + "content": "[1104] Ge Zheng, Bin Yang, Jiajin Tang, Hong-Yu Zhou, and Sibei Yang. Ddcot: Duty-distinct chain-of-thought prompting for multimodal reasoning in language models. Advances in Neural Information Processing Systems, 36:5168-5191, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 350, + 506, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 350, + 506, + 384 + ], + "spans": [ + { + "bbox": [ + 107, + 350, + 506, + 384 + ], + "type": "text", + "content": "[1105] Hang Zheng, Hongshen Xu, Yuncong Liu, Lu Chen, Pascale Fung, and Kai Yu. Enhancing llm reliability via explicit knowledge boundary modeling. arXiv preprint arXiv:2503.02233, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 388, + 506, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 388, + 506, + 423 + ], + "spans": [ + { + "bbox": [ + 107, + 388, + 506, + 423 + ], + "type": "text", + "content": "[1106] Jiani Zheng, Lu Wang, Fangkai Yang, Chaoyun Zhang, Lingrui Mei, Wenjie Yin, Qingwei Lin, Dongmei Zhang, Saravan Rajmohan, and Qi Zhang. Vem: Environment-free exploration for training gui agent with value environment model. arXiv preprint arXiv:2502.18906, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 426, + 506, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 426, + 506, + 472 + ], + "spans": [ + { + "bbox": [ + 107, + 426, + 506, + 472 + ], + "type": "text", + "content": "[1107] Kunhao Zheng, Juliette Decugis, Jonas Gehring, Taco Cohen, benjamin negrevergne, and Gabriel Synnaeve. What makes large language models reason in (multi-turn) code generation? In The Thirteenth International Conference on Learning Representations, January 2025. URL https://openreview.net/forum?id=Zk9guO19NS." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 475, + 506, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 475, + 506, + 543 + ], + "spans": [ + { + "bbox": [ + 107, + 475, + 506, + 543 + ], + "type": "text", + "content": "[1108] Tianyu Zheng, Ge Zhang, Tianhao Shen, Xueling Liu, Bill Yuchen Lin, Jie Fu, Wenhu Chen, and Xiang Yue. OpenCodeInterpreter: Integrating code generation with execution and refinement. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pages 12834–12859, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.762. URL https://aclanthology.org/2024-findings-acl.762/." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 546, + 506, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 546, + 506, + 581 + ], + "spans": [ + { + "bbox": [ + 107, + 546, + 506, + 581 + ], + "type": "text", + "content": "[1109] Xin Zheng, Jie Lou, Boxi Cao, Xueru Wen, Yuqiu Ji, Hongyu Lin, Yaojie Lu, Xianpei Han, Debing Zhang, and Le Sun. Critic-cot: Boosting the reasoning abilities of large language model via chain-of-thoughts critic. arXiv preprint arXiv:2408.16326, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 585, + 506, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 585, + 506, + 619 + ], + "spans": [ + { + "bbox": [ + 107, + 585, + 506, + 619 + ], + "type": "text", + "content": "[1110] Zhi Zheng, Zhuoliang Xie, Zhenkun Wang, and Bryan Hooi. Monte carlo tree search for comprehensive exploration in llm-based automatic heuristic design. arXiv preprint arXiv:2501.08603, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 622, + 506, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 622, + 506, + 647 + ], + "spans": [ + { + "bbox": [ + 107, + 622, + 506, + 647 + ], + "type": "text", + "content": "[1111] Jianyuan Zhong, Zeju Li, Zhijian Xu, Xiangyu Wen, and Qiang Xu. Dyve: Thinking fast and slow for dynamic process verification. arXiv preprint arXiv:2502.11157, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 650, + 506, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 650, + 506, + 684 + ], + "spans": [ + { + "bbox": [ + 107, + 650, + 506, + 684 + ], + "type": "text", + "content": "[1112] Qihuang Zhong, Kang Wang, Ziyang Xu, Juhua Liu, Liang Ding, and Bo Du. Achieving> 97% on gsm8k: Deeply understanding the problems makes llms better solvers for math word problems. arXiv preprint arXiv:2404.14963, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 688, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 688, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 688, + 506, + 723 + ], + "type": "text", + "content": "[1113] Tianyang Zhong, Zhengliang Liu, Yi Pan, Yutong Zhang, Yifan Zhou, Shizhe Liang, Zihao Wu, Yanjun Lyu, Peng Shu, Xiaowei Yu, et al. Evaluation of openai o1: Opportunities and challenges of agi. arXiv preprint arXiv:2409.18486, 2024." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 34, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 313, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 313, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 313, + 750 + ], + "type": "text", + "content": "106" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 105 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 507, + 722 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 106, + 72, + 507, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 507, + 118 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 507, + 118 + ], + "type": "text", + "content": "[1114] Andy Zhou, Kai Yan, Michal Shlapentokh-Rothman, Haohan Wang, and Yu-Xiong Wang. Language agent tree search unifies reasoning, acting, and planning in language models. In *Forty-first International Conference on Machine Learning*, May 2024. URL https://openreview.net/forum?id=njwv9BsGHF." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 121, + 507, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 121, + 507, + 177 + ], + "spans": [ + { + "bbox": [ + 106, + 121, + 507, + 177 + ], + "type": "text", + "content": "[1115] Aojun Zhou, Ke Wang, Zimu Lu, Weikang Shi, Sichun Luo, Zipeng Qin, Shaoqing Lu, Anya Jia, Linqi Song, Mingjie Zhan, and Hongsheng Li. Solving challenging math word problems using GPT-4 code interpreter with code-based self-verification. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=c8McWs4Av0." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 181, + 507, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 181, + 507, + 225 + ], + "spans": [ + { + "bbox": [ + 106, + 181, + 507, + 225 + ], + "type": "text", + "content": "[1116] Changzhi Zhou, Xinyu Zhang, Dandan Song, Xiancai Chen, Wanli Gu, Huipeng Ma, Yuhang Tian, Mengdi Zhang, and Linmei Hu. Refinecoder: Iterative improving of large language models via adaptive critique refinement for code generation. arXiv preprint arXiv:2502.09183, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 230, + 507, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 230, + 507, + 286 + ], + "spans": [ + { + "bbox": [ + 106, + 230, + 507, + 286 + ], + "type": "text", + "content": "[1117] Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc V Le, and Ed H. Chi. Least-to-most prompting enables complex reasoning in large language models. In The Eleventh International Conference on Learning Representations, February 2023. URL https://openreview.net/forum?id=WZH7099tgfM." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 290, + 507, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 290, + 507, + 324 + ], + "spans": [ + { + "bbox": [ + 106, + 290, + 507, + 324 + ], + "type": "text", + "content": "[1118] Fan Zhou, Haoyu Dong, Qian Liu, Zhoujun Cheng, Shi Han, and Dongmei Zhang. Reflection of thought: Inversely eliciting numerical reasoning in language models via solving linear systems. arXiv preprint arXiv:2210.05075, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 328, + 507, + 362 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 328, + 507, + 362 + ], + "spans": [ + { + "bbox": [ + 106, + 328, + 507, + 362 + ], + "type": "text", + "content": "[1119] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's\" aha moment\" in visual reasoning on a 2b non-sft model. arXiv preprint arXiv:2503.05132, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 366, + 507, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 366, + 507, + 412 + ], + "spans": [ + { + "bbox": [ + 106, + 366, + 507, + 412 + ], + "type": "text", + "content": "[1120] Jin Peng Zhou, Charles E Staats, Wenda Li, Christian Szegedy, Kilian Q Weinberger, and Yuhuai Wu. Don't trust: Verify – grounding LLM quantitative reasoning with autoformalization. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=V5tdi14ple." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 415, + 507, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 415, + 507, + 450 + ], + "spans": [ + { + "bbox": [ + 106, + 415, + 507, + 450 + ], + "type": "text", + "content": "[1121] Jin Peng Zhou, Kaiwen Wang, Jonathan Chang, Zhaolin Gao, Nathan Kallus, Kilian Q Weinberger, Kianté Brantley, and Wen Sun. q#: Provably optimal distributional rl for llm post-training. arXiv preprint arXiv:2502.20548, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 453, + 507, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 453, + 507, + 487 + ], + "spans": [ + { + "bbox": [ + 106, + 453, + 507, + 487 + ], + "type": "text", + "content": "[1122] Kaiwen Zhou, Chengzhi Liu, Xuandong Zhao, Shreedhar Jangam, Jayanth Srinivasa, Gaowen Liu, Dawn Song, and Xin Eric Wang. The hidden risks of large reasoning models: A safety assessment of r1. arXiv preprint arXiv:2502.12659, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 492, + 507, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 492, + 507, + 536 + ], + "spans": [ + { + "bbox": [ + 106, + 492, + 507, + 536 + ], + "type": "text", + "content": "[1123] Lexin Zhou, Wout Schellaert, Fernando Martínez-Plumed, Yael Moros-Daval, César Ferri, and José Hernández-Orallo. Larger and more instructable language models become less reliable. Nature, 634(8032):61–68, 2024. URL https://www.nature.com/articles/s41586-024-07930-y." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 540, + 507, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 540, + 507, + 574 + ], + "spans": [ + { + "bbox": [ + 106, + 540, + 507, + 574 + ], + "type": "text", + "content": "[1124] Li Zhou, Ruijie Zhang, Xunlian Dai, Daniel Hershcovich, and Haizhou Li. Large language models penetration in scholarly writing and peer review. arXiv preprint arXiv:2502.11193, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 578, + 507, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 578, + 507, + 624 + ], + "spans": [ + { + "bbox": [ + 106, + 578, + 507, + 624 + ], + "type": "text", + "content": "[1125] Ruochen Zhou, Minrui Xu, Shiqi Chen, Junteng Liu, Yunqi Li, LIN Xinxin, Zhengyu Chen, and Junxian He. AI for math or math for AI? on the generalization of learning mathematical problem solving. In The 4th Workshop on Mathematical Reasoning and AI at NeurIPS'24, 2024. URL https://openreview.net/forum?id=xlnvZ85CSo." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 628, + 507, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 628, + 507, + 685 + ], + "spans": [ + { + "bbox": [ + 106, + 628, + 507, + 685 + ], + "type": "text", + "content": "[1126] Shuyan Zhou, Frank F. Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Tianyue Ou, Yonatan Bisk, Daniel Fried, Uri Alon, and Graham Neubig. Webarena: A realistic web environment for building autonomous agents. In The Twelfth International Conference on Learning Representations, January 2024. URL https://openreview.net/forum?id=oKn9c6ytLx." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 688, + 507, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 688, + 507, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 688, + 507, + 722 + ], + "type": "text", + "content": "[1127] Xiangxin Zhou, Zichen Liu, Anya Sims, Haonan Wang, Tianyu Pang, Chongxuan Li, Liang Wang, Min Lin, and Chao Du. Reinforcing general reasoning without verifiers. arXiv preprint arXiv:2505.21493, 2025." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 740, + 313, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 740, + 313, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 740, + 313, + 750 + ], + "type": "text", + "content": "107" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 106 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 507, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 105, + 72, + 507, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 507, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 507, + 106 + ], + "type": "text", + "content": "[1128] Xiaofeng Zhou, Heyan Huang, and Lizi Liao. Debate, reflect, and distill: Multi-agent feedback with tree-structured preference optimization for efficient language model enhancement. arXiv preprint arXiv:2506.03541, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 110, + 507, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 110, + 507, + 144 + ], + "spans": [ + { + "bbox": [ + 105, + 110, + 507, + 144 + ], + "type": "text", + "content": "[1129] Xin Zhou, Yiwen Guo, Ruotian Ma, Tao Gui, Qi Zhang, and Xuanjing Huang. Self-consistency of the internal reward models improves self-rewarding language models. arXiv preprint arXiv:2502.08922, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 148, + 507, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 148, + 507, + 183 + ], + "spans": [ + { + "bbox": [ + 105, + 148, + 507, + 183 + ], + "type": "text", + "content": "[1130] Yang Zhou, Hongyi Liu, Zhuoming Chen, Yuandong Tian, and Beidi Chen. Gsm-infinite: How do your llms behave over infinitely increasing context length and reasoning complexity? arXiv preprint arXiv:2502.05252, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 186, + 507, + 221 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 186, + 507, + 221 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 507, + 221 + ], + "type": "text", + "content": "[1131] Yifei Zhou, Song Jiang, Yuandong Tian, Jason Weston, Sergey Levine, Sainbayar Sukhbaatar, and Xian Li. Sweet-rl: Training multi-turn llm agents on collaborative reasoning tasks. arXiv preprint arXiv:2503.15478, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 224, + 507, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 224, + 507, + 270 + ], + "spans": [ + { + "bbox": [ + 105, + 224, + 507, + 270 + ], + "type": "text", + "content": "[1132] Yufa Zhou, Shaobo Wang, Xingyu Dong, Xiangqi Jin, Yifang Chen, Yue Min, Kexin Yang, Xingzhang Ren, Dayiheng Liu, and Linfeng Zhang. Reasoning like an economist: Posttraining on economic problems induces strategic generalization in llms. arXiv preprint arXiv:2506.00577, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 273, + 507, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 273, + 507, + 308 + ], + "spans": [ + { + "bbox": [ + 105, + 273, + 507, + 308 + ], + "type": "text", + "content": "[1133] Zhanke Zhou, Zhaocheng Zhu, Xuan Li, Mikhail Galkin, Xiao Feng, Sanmi Koyejo, Jian Tang, and Bo Han. Landscape of thoughts: Visualizing the reasoning process of large language models. arXiv preprint arXiv:2503.22165, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 312, + 507, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 312, + 507, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 507, + 346 + ], + "type": "text", + "content": "[1134] Zhi Zhou, Tan Yuhao, Zenan Li, Yuan Yao, Lan-Zhe Guo, Xiaoxing Ma, and Yu-Feng Li. Bridging internal probability and self-consistency for effective and efficient lIm reasoning. arXiv preprint arXiv:2502.00511, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 350, + 507, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 350, + 507, + 374 + ], + "spans": [ + { + "bbox": [ + 105, + 350, + 507, + 374 + ], + "type": "text", + "content": "[1135] Bin Zhu, Hailong Yin, Jingjing Chen, and Yu-Gang Jiang. Reasoning models are more easily gaslighted than you think. arXiv preprint arXiv:2506.09677, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 376, + 507, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 376, + 507, + 422 + ], + "spans": [ + { + "bbox": [ + 105, + 376, + 507, + 422 + ], + "type": "text", + "content": "[1136] Dawei Zhu, Xiyu Wei, Guangxiang Zhao, Wenhao Wu, Haosheng Zou, Junfeng Ran, Xun Wang, Lin Sun, Xiangzheng Zhang, and Sujian Li. Chain-of-thought matters: Improving long-context language models with reasoning path supervision. arXiv preprint arXiv:2502.20790, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 426, + 507, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 426, + 507, + 450 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 507, + 450 + ], + "type": "text", + "content": "[1137] Jason Zhu and Hongyu Li. Towards concise and adaptive thinking in large reasoning models: A survey. arXiv preprint arXiv:2507.09662, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 453, + 507, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 453, + 507, + 487 + ], + "spans": [ + { + "bbox": [ + 105, + 453, + 507, + 487 + ], + "type": "text", + "content": "[1138] Junda Zhu, Lingyong Yan, Shuaiqiang Wang, Dawei Yin, and Lei Sha. Reasoning-to-defend: Safety-aware reasoning can defend large language models from jailbreaking. arXiv preprint arXiv:2502.12970, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 491, + 507, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 491, + 507, + 526 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 507, + 526 + ], + "type": "text", + "content": "[1139] King Zhu, Hanhao Li, Siwei Wu, Tianshun Xing, Dehua Ma, Xiangru Tang, Minghao Liu, Jian Yang, Jiaheng Liu, Yuchen Eleanor Jiang, et al. Scaling test-time compute for llm agents. arXiv preprint arXiv:2506.12928, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 529, + 507, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 529, + 507, + 565 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 507, + 565 + ], + "type": "text", + "content": "[1140] Kunlun Zhu, Hongyi Du, Zhaochen Hong, Xiaocheng Yang, Shuyi Guo, Zhe Wang, Zhenhailong Wang, Cheng Qian, Xiangru Tang, Heng Ji, et al. Multiagentbench: Evaluating the collaboration and competition of lIm agents. arXiv preprint arXiv:2503.01935, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 568, + 507, + 603 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 568, + 507, + 603 + ], + "spans": [ + { + "bbox": [ + 105, + 568, + 507, + 603 + ], + "type": "text", + "content": "[1141] Rongzhi Zhu, Yi Liu, Zequn Sun, Yiwei Wang, and Wei Hu. When can large reasoning models save thinking? mechanistic analysis of behavioral divergence in reasoning. arXiv preprint arXiv:2505.15276, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 606, + 507, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 606, + 507, + 641 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 507, + 641 + ], + "type": "text", + "content": "[1142] Tinghui Zhu, Kai Zhang, Jian Xie, and Yu Su. Deductive beam search: Decoding deducible rationale for chain-of-thought reasoning. In First Conference on Language Modeling, July 2024. URL https://openreview.net/forum?id=S1XnUsqwr7." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 644, + 507, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 644, + 507, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 644, + 507, + 723 + ], + "type": "text", + "content": "[1143] Xinyu Zhu, Junjie Wang, Lin Zhang, Yuxiang Zhang, Yongfeng Huang, Ruyi Gan, Jiaxing Zhang, and Yujiu Yang. Solving math word problems via cooperative reasoning induced language models. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki, editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 4471–4485, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.245. URL https://aclanthology.org/2023.acl-long.245/." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 741, + 313, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 741, + 313, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 741, + 313, + 750 + ], + "type": "text", + "content": "108" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 107 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 506, + 360 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 105, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 505, + 106 + ], + "type": "text", + "content": "[1144] Zihao Zhu, Hongbao Zhang, Ruotong Wang, Ke Xu, Siwei Lyu, and Baoyuan Wu. To think or not to think: Exploring the unthinking vulnerability in large reasoning models. arXiv preprint arXiv:2502.12202, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 108, + 505, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 108, + 505, + 144 + ], + "spans": [ + { + "bbox": [ + 105, + 108, + 505, + 144 + ], + "type": "text", + "content": "[1145] Zihao Zhu, Hongbao Zhang, Mingda Zhang, Ruotong Wang, Guanzong Wu, Ke Xu, and Baoyuan Wu. Bot: Breaking long thought processes of o1-like large language models through backdoor attack. arXiv preprint arXiv:2502.12202, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 145, + 506, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 145, + 506, + 170 + ], + "spans": [ + { + "bbox": [ + 107, + 145, + 506, + 170 + ], + "type": "text", + "content": "[1146] Ren Zhuang, Ben Wang, and Shuifa Sun. Accelerating chain-of-thought reasoning: When goal-gradient importance meets dynamic skipping. arXiv preprint arXiv:2505.08392, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 171, + 506, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 171, + 506, + 239 + ], + "spans": [ + { + "bbox": [ + 106, + 171, + 506, + 239 + ], + "type": "text", + "content": "[1147] Ziyu Zhuang, Qiguang Chen, Longxuan Ma, Mingda Li, Yi Han, Yushan Qian, Haopeng Bai, Weinan Zhang, and Liu Ting. Through the lens of core competency: Survey on evaluation of large language models. In Proceedings of the 22nd Chinese National Conference on Computational Linguistics (Volume 2: Frontier Forum), pages 88–109, Harbin, China, August 2023. Chinese Information Processing Society of China. URL https://aclanthology.org/2023.ccl-2.8/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 240, + 504, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 240, + 504, + 275 + ], + "spans": [ + { + "bbox": [ + 106, + 240, + 504, + 275 + ], + "type": "text", + "content": "[1148] Alireza S Ziabari, Nona Ghazizadeh, Zhivar Sourati, Farzan Karimi-Malekabadi, Payam Piray, and Morteza Dehghani. Reasoning on a spectrum: Aligning llms to system 1 and system 2 thinking. arXiv preprint arXiv:2502.12470, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 277, + 506, + 321 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 277, + 506, + 321 + ], + "spans": [ + { + "bbox": [ + 107, + 277, + 506, + 321 + ], + "type": "text", + "content": "[1149] Henry Peng Zou, Zhengyao Gu, Yue Zhou, Yankai Chen, Weizhi Zhang, Liancheng Fang, Yibo Wang, Yangning Li, Kay Liu, and Philip S Yu. Testnuc: Enhancing test-time computing approaches through neighboring unlabeled data consistency. arXiv preprint arXiv:2502.19163, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 324, + 506, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 324, + 506, + 360 + ], + "spans": [ + { + "bbox": [ + 107, + 324, + 506, + 360 + ], + "type": "text", + "content": "[1150] Yuxin Zuo, Shang Qu, Yifei Li, Zhangren Chen, Xuekai Zhu, Ermo Hua, Kaiyan Zhang, Ning Ding, and Bowen Zhou. Medxpertqa: Benchmarking expert-level medical reasoning and understanding. arXiv preprint arXiv:2501.18362, 2025." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "spans": [ + { + "bbox": [ + 106, + 33, + 189, + 57 + ], + "type": "text", + "content": "LARG LANGUAGE ANALYSIS REASONING GROUP" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 297, + 740, + 313, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 740, + 313, + 750 + ], + "spans": [ + { + "bbox": [ + 297, + 740, + 313, + 750 + ], + "type": "text", + "content": "109" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 108 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09595/b59876e9-da8b-438b-ab54-bb4c4d76820f_content_list.json b/data/2025/2503_09xxx/2503.09595/b59876e9-da8b-438b-ab54-bb4c4d76820f_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..5424e8ea80292778616983a42ac296bcd838ead9 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/b59876e9-da8b-438b-ab54-bb4c4d76820f_content_list.json @@ -0,0 +1,3679 @@ +[ + { + "type": "text", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "text_level": 1, + "bbox": [ + 220, + 109, + 751, + 156 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Chenyu Li $^{*1}$ Oscar Michel $^{*1}$ Xichen Pan $^{1}$ Sainan Liu $^{2}$ Mike Roberts $^{2}$ Saining Xie", + "bbox": [ + 171, + 198, + 797, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 241, + 242, + 320, + 258 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large-scale pre-trained video generation models excel in content creation but are not reliable as physically accurate world simulators out of the box. This work studies the process of posttraining these models for accurate world modeling through the lens of the simple, yet fundamental, physics task of modeling object freefall. We show state-of-the-art video generation models struggle with this basic task, despite their visually impressive outputs. To remedy this problem, we find that fine-tuning on a relatively small amount of simulated videos is effective in inducing the dropping behavior in the model, and we can further improve results through a novel reward modeling procedure we introduce. Our study also reveals key limitations of post-training in generalization and distribution modeling. Additionally, we release a benchmark for this task that may serve as a useful diagnostic tool for tracking physical accuracy in large-scale video generative model development. Code is available at this repository: https://github.com/vision-x-nyu/pisa-experiments.", + "bbox": [ + 117, + 270, + 444, + 617 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 86, + 652, + 217, + 667 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Over the past year, video generation models have advanced significantly, inspiring visions of a future where these models could serve as realistic world models (Craik, 1967; LeCun, 2022; Hafner et al., 2019; 2023; Ha & Schmidhuber, 2018). State-of-the-art video generation models models exhibit impressive results in content creation (OpenAI, 2024; Kuaishou, 2024; Luma, 2024; Runway, 2024) and are already being used in advertising and filmmaking (Runway, 2025; NBC, 2025). These advancements have sparked a line of research that seeks to evolve these models from content creators to world simulators for embodied agents (Yang", + "bbox": [ + 84, + 676, + 475, + 844 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "*Equal contribution, alphabetical order. 1New York University 2Intel Labs.", + "bbox": [ + 84, + 852, + 473, + 878 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "et al., 2023; 2024b; Agarwal et al., 2025). However, accurate world modeling is considerably more challenging than creative content creation because looking \"good enough\" is not sufficient: generated pixels must faithfully represent a world state evolving in accordance with the laws of physics and visual perspective.", + "bbox": [ + 496, + 243, + 887, + 335 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We find that although the generations of state-of-the-art models are impressive visually, these models still struggle to generate results that are accurate physically, even though these models are pretrained on internet-scale video data demonstrating a wide variety of complex physical interactions. The failure to ground and align visual generations to the laws of physics suggests that pretraining is not enough and a post-training stage is needed. Much like how pretrained Large Language Models (LLMs) need to be adapted through post-training before they can be useful conversational assistants, pretrained video generative models ought to be adapted through post-training before they can be deployed as physically accurate world simulators.", + "bbox": [ + 495, + 340, + 888, + 540 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this work, we rigorously examine the post-training process of video generation models by focusing on the simple yet fundamental physics task of modeling object freefall, which we find is highly challenging for state-of-the-art models. Specifically, we study an image-to-video $^{1}$ (I2V) scenario where the goal is to generate a video of an object falling and potentially colliding with other objects on the ground, starting from an initial image of the object suspended in midair. We chose to study this single task, rather than general physics ability as a whole, because its simplicity allows us to conduct controlled experiments that yield insights into the strengths and limitations of the post-training process, which we believe will become an increasingly important component of research in generative world modeling. Additionally, the simplicity of the dropping task allows it to be implemented in simulation which is desirable because it allows us to easily test the properties of dataset scaling, gives us access to ground truth annotations for evaluation, and gives us the ability to precisely manipulate the simulation environment for controlled experimentation.", + "bbox": [ + 495, + 546, + 888, + 848 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1We discuss our decision to formulate this task in the image-to-video setting instead of the video-to-video setting in Appendix A.", + "bbox": [ + 496, + 857, + 887, + 883 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.09595v1 [cs.CV] 12 Mar 2025", + "bbox": [ + 22, + 258, + 57, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/19a56cf5e00b438278d859ed1f2a1e950888d74b178b001eab56b1dd9862790e.jpg", + "image_caption": [ + "Figure 1. Our PISA (Physics-Informed Simulation and Alignment) evaluation framework includes a new video dataset, where objects are dropped in a variety of real-world (left) and synthetic (right) scenes. For visualization purposes, we depict object motion by overlaying multiple video frames in each image shown above. Our real-world videos enable us to evaluate the physical accuracy of generated video output, and our synthetic videos enable us to improve accuracy through the use of post-training alignment methods." + ], + "image_footnote": [], + "bbox": [ + 86, + 80, + 478, + 309 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/00bcbfd531744498cbd9c4457e5710d02b6c2db7bb28bac2414f98156285e998.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 493, + 80, + 885, + 309 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Named after Galileo's famous dropping experiment, we introduce the PISA (Physics-Informed Simulation and Alignment) framework for studying physics post-training in the context of the dropping task. PISA includes new real and simulated video datasets, as shown in Figure 1, containing a diverse set of dropping scenarios. PISA also includes a set of task-specific metrics that focus on measuring physical accuracy. Our real-world videos and metrics enable us to evaluate the physical accuracy of generated video output, and our synthetic videos enable us to improve accuracy through a post-training process we introduce.", + "bbox": [ + 83, + 404, + 475, + 570 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our study reveals that current state-of-the-art video generative models struggle significantly with the task of physically accurate object dropping. Generated objects frequently exhibit impossible behaviors, such as floating midair, defying gravity, or failing to preserve realistic trajectories during freefall. However, we find that simple fine-tuning can be remarkably effective: fine-tuning an open-source model on a small dataset of just a few thousand samples enables it to vastly outperform state-of-the-art models in physical accuracy. We further observe that pretrained models are critical for success; models initialized randomly, without leveraging pretraining on large-scale video datasets, fail to achieve comparable results. We also introduce a novel framework for reward modeling that yields further improvement. We demonstrate that our reward learning system is highly flexible in that different reward functions can be chosen to target different axes of physical improvement.", + "bbox": [ + 83, + 578, + 477, + 835 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our analysis also reveals key limitations. First, we see that model performance degrades when tasked with scenarios outside the training distribution, such as objects dropping from unseen depths or heights. Additionally, while our post", + "bbox": [ + 83, + 842, + 478, + 905 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "trained model generates object motion that is 3D-consistent and physically accurate, we observe misalignment between the generated and ground truth dropping time distribution.", + "bbox": [ + 493, + 404, + 885, + 450 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "These findings indicate that post-training is likely to be an essential component of future world modeling systems. The challenges we identify in this relatively simple task are likely to persist when modeling more sophisticated physical phenomena. By introducing the PISA framework and benchmark, we provide a useful diagnostic tool for researchers to test whether models are on the path to acquiring general physical abilities, as well as identify key limitations that researchers should be aware of when integrating new capabilities into their models through post-training.", + "bbox": [ + 493, + 455, + 888, + 609 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 496, + 628, + 638, + 643 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Modeling Intuitive Physics. Intuitive physics refers to the innate or learned human capacity to make quick and accurate judgments about the physical properties and behaviors of objects in the world, such as their motion, stability, or interactions. This ability, present even in infancy (Spelke et al., 1992; Baillargeon, 2004; Battaglia et al., 2013), is crucial for navigating and understanding everyday life. Replicating intuitive physics is a foundational step toward creating systems that can interact effectively and safely in dynamic, real-world environments (Lake et al., 2017). Gravity, as a core component of intuitive physics, plays a pivotal role in both domains. It is one of the most universal and observable physical forces, shaping our expectations about object motion, stability, and interaction (Hamrick et al., 2016; Ullman et al., 2017). Many studies in cognitive science (Battaglia et al., 2013) and AI (Wu et al., 2015; Bear et al., 2021) have", + "bbox": [ + 493, + 652, + 888, + 896 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 156, + 56, + 815, + 70 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "relied on physics engines to evaluate and model intuitive physics. Our work uses the Kubric engine (Greff et al., 2022) to generate training videos.", + "bbox": [ + 84, + 85, + 475, + 131 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Video Generation Models as World Simulators. Video generation has long been an intriguing topic in computer vision, particularly in the context of predicting future frames (Srivastava et al., 2015; Xue et al., 2016). More recently, as large-scale generative models have become prominent, Yang et al. explored how a wide range of real-world dynamics and decision-making processes can be expressed in terms of video modeling (Yang et al., 2024b; 2023). The introduction of the Sora model (OpenAI, 2024) marked a leap in the quality of generated videos and ignited interest in leveraging such models as \"world simulators.\" Over the past year, numerous video generation models have emerged, some open-source (Zheng et al., 2024; Yang et al., 2024c; Jin et al., 2024; Agarwal et al., 2025) and others commercially available (Kuaishou, 2024; Luma, 2024; Runway, 2024; OpenAI, 2024). Related to our work, Kang et al. (Kang et al., 2024) study the extent to which video generation models learn generalizable laws of physics when trained on 2D data from a synthetic environment.", + "bbox": [ + 86, + 137, + 477, + 425 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Evaluating Video Generation Models. Traditional image-based metrics for generative modeling, such Fréchet inception distance (FID) (Heusel et al., 2017) or inception score (IS) (Salimans et al., 2016), can be incorporated into the video domain, either by applying them on a frame-by-frame basis or by developing video-specific versions, such as Fréchet video distance (FVD) (Unterthiner et al., 2018). Going beyond distribution matching measures, several benchmarks have developed suites of metrics that aim to better evaluate the semantic or visual quality of generated videos. For example, V-Bench (Huang et al., 2024) offers a more granular evaluation by measuring video quality across multiple dimensions, such as with respect to subject consistency or spatial relationships. In physics, some works, such as VideoPhy (Bansal et al., 2024) and PhyGenBench (Meng et al., 2024), evaluate in the T2V setting by utilizing multimodal large language models (MLLM) to generate a VQA-based score. More recently, Cosmos (Agarwal et al., 2025) and Physics-IQ (Motamed et al., 2025), evaluate physics in the image-to-video and video-to-video settings.", + "bbox": [ + 86, + 431, + 477, + 734 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. PisaBench", + "text_level": 1, + "bbox": [ + 84, + 753, + 197, + 768 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our benchmark, PisaBench, examines the ability of video generative models to produce accurate physical phenomena by focusing on a straightforward dropping task.", + "bbox": [ + 84, + 779, + 473, + 825 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Task Definition & Assumptions", + "text_level": 1, + "bbox": [ + 84, + 840, + 336, + 857 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our task can be summarized as follows: given an image of an object suspended in midair, generate a video of the object", + "bbox": [ + 84, + 864, + 475, + 896 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/1ac3de96698eccbe7ffd3f2cdeee9e6722a7a59125ffbd3af395c8b98701c5c7.jpg", + "image_caption": [ + "Figure 2. The setup for collecting real-world videos." + ], + "image_footnote": [], + "bbox": [ + 519, + 84, + 859, + 335 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "falling and colliding with the ground and potentially other objects. Since a video is an incomplete partial observation of the 4D world, we make a number of assumptions to constrain the task space. These assumptions are crucial for ensuring that our metrics are reliable signals for physical accuracy, since they are only approximations of task success computed from a single ground truth and generated video.", + "bbox": [ + 496, + 382, + 885, + 489 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Specifically, we assume that the falling object is completely still in the initial frame, that only the force of gravity is acting on the object while it falls, and that the camera does not move. The first two assumptions are necessary for the image-to-video setting. Since we do not provide multiple frames as input, it is otherwise impossible to establish the initial velocity or acceleration of the falling object without these assumptions. The last assumption is necessary as our metrics derive from the motion of segmentation masks, which would be affected in the presence of camera motion.", + "bbox": [ + 496, + 496, + 888, + 647 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Real World Data", + "text_level": 1, + "bbox": [ + 496, + 664, + 650, + 678 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/75bc9f270274a876e0f2d9bb2c4307c38aafd436f796800aeee64fbd5f2d9b29.jpg", + "image_caption": [ + "Figure 3. Statistics of the real-world data: (a) number of objects in each video, (b) the proportions of different scenes in the videos." + ], + "image_footnote": [], + "bbox": [ + 506, + 695, + 696, + 816 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/2a038e4df7899a3c6a7af3f168c48dd30504eb434167e140afe024da4ee58ef5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 717, + 695, + 880, + 816 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Real World Videos. We collect a set of 361 real-world videos demonstrating the dropping task for evaluation. As is shown in Figure 4, the dataset includes a diverse set", + "bbox": [ + 496, + 859, + 885, + 905 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 155, + 56, + 815, + 71 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/7d4ceaab34fce33596b3dd1e9d4ba7a5b2fc93095d7bd082617fa0ffe00a3c57.jpg", + "image_caption": [ + "Figure 4. Examples of various objects included in our dataset. For simulation, we utilize the GSO dataset (Downs et al., 2022), while for the real-world dataset, we curate our own set of common household objects." + ], + "image_footnote": [], + "bbox": [ + 91, + 85, + 472, + 291 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "of objects with different shapes and sizes, captured across various settings such as offices, kitchens, parks, and more (see Figure 3). Each video begins with an object suspended by an invisible wire in the first frame, which is necessary to enforce the assumption that objects are stationary at the start of the video. This assumption is required in our imaged-to-video setting; otherwise, the initial velocity of an object is ambiguous. We cut the video clips to begin as soon as the wire is released. We record the videos in slow-motion at 120 frames per second (fps) with cellphone cameras mounted on tripods to eliminate camera motion. An example of our video collection setup is shown in Figure 2. Additional details on our collection system are provided in Appendix H.", + "bbox": [ + 84, + 397, + 475, + 594 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Simulated Test Videos. Since our post-training process uses a dataset of simulated videos, we also create a simulation test-set of 60 videos for understanding sim2real transfer. We create two splits of 30 videos each: one featuring objects and backgrounds seen during training, and the other featuring unseen objects and backgrounds. See Section 4.1 for details on how our simulated data is created.", + "bbox": [ + 84, + 601, + 475, + 705 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Annotations. As is shown in Figure 5, we annotate each video with a caption and segmentation masks estimated from the SAM 2 (Ravi et al., 2024) video segmentation model. We create a descriptive caption for each object in the format of “{object description} falls.” This caption is used to provide context to the task when text input is supported.", + "bbox": [ + 84, + 714, + 473, + 806 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Metrics", + "text_level": 1, + "bbox": [ + 84, + 821, + 173, + 835 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We propose three metrics to assess the accuracy of trajectories, shape fidelity, and object permanence. Each of our metrics compare frames from the ground-truth video with the generated video. Further details about the metrics, including", + "bbox": [ + 84, + 845, + 475, + 906 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/fdf0c1240713a0f765f0563462d434ced74c2fc826cdce4f827d1a1a50939cc7.jpg", + "image_caption": [ + "Figure 5. Example of annotations in real-world data. For segmentation masks, we manually annotate first frame and utilize SAM 2 to produce segmentation masks across frames. For captions, we annotate “{object description} falls.” for all video segments." + ], + "image_footnote": [], + "bbox": [ + 498, + 80, + 887, + 313 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "their formulas and our resampling procedure for accounting for differences in fps, is described in Appendix B.", + "bbox": [ + 496, + 412, + 885, + 443 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Trajectory L2. For each frame in both the generated video and ground truth, we calculate the centroid of the masked region. After doing this, we compute the average $L_{2}$ distance between the centroids of corresponding frames.", + "bbox": [ + 496, + 450, + 887, + 511 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Chamfer Distance (CD). To assess the shape fidelity of objects, we calculate the Chamfer Distance (CD) between the mask regions of the generated video and ground truth.", + "bbox": [ + 496, + 518, + 887, + 564 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Intersection over Union (IoU). We use the Intersection over Union (IoU) metric to evaluate object permanence. The IoU measures objects' degree of overlap between the generated video and ground truth.", + "bbox": [ + 496, + 571, + 887, + 632 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Evaluation Results", + "text_level": 1, + "bbox": [ + 496, + 648, + 661, + 662 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We evaluate 4 open models including CogVideoX-5B-I2V(Yang et al., 2024c), DynamiCrafter(Xing et al., 2023), Pyramid-Flow(Jin et al., 2024), and Open-Sora-V1.2(Zheng et al., 2024), as well as 4 proprietary models including Sora (OpenAI, 2024), Kling-V1(Kuaishou, 2024), Kling-V1.5(Kuaishou, 2024), and Runway Gen3 (Runway, 2024). We also evaluate OpenSora post-trained through the processes of Supervised Fine-Tuning (PSFT) and Object Reward Optimization (ORO); see Section 4 for details.", + "bbox": [ + 495, + 671, + 887, + 808 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The results of running the baseline models on the benchmark indicate a consistent failure to generate physically accurate dropping behavior, despite the visual realism of their generated frames. Qualitatively, we see common failure cases in Figure 6, such as implausible object deformations, floating, hallucination of new objects, and unrealistic special", + "bbox": [ + 495, + 814, + 888, + 906 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 156, + 56, + 815, + 71 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/342a3b4296640d6e8ed1d72350bfb1237311d4fb53b1dc49759aa08ed93e6617.jpg", + "image_caption": [ + "Figure 6. Qualitative comparison of results on real test set (row 1-2), simulated seen test set (row 3-4) and simulated unseen test set (row 5-6). We present the results of popular open-source and commercially available models alongside those of models fine-tuned through our method. Existing models often struggle to generate videos depicting objects falling, whereas our PSFT method effectively introduces knowledge of free-fall into the model. ORO enables the model to more accurately learn object motion and shape." + ], + "image_footnote": [], + "bbox": [ + 86, + 85, + 885, + 304 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "effects. We further visualize a random subset of generated trajectories on the left of Figure 8. In many cases, the object remains completely static, and sometimes the object even moves upward. When downward motion is present, it is often slow or contains unrealistic horizontal movement.", + "bbox": [ + 84, + 377, + 473, + 452 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Physics Post-Training", + "text_level": 1, + "bbox": [ + 84, + 470, + 292, + 489 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We present a post-training process to address the limitations of current models described in Section 3.4. We utilize simulated videos that demonstrate realistic dropping behavior. Our approach for post-training is inspired by the two-stage pipeline consisting of supervised fine-tuning followed by reward modeling commonly used in LLMs. We find that our pipeline improves performance on both real and simulated evaluations, with greater gains observed in simulation. This is due to the sim-to-real gap, though our approach still shows substantial gains in transferring to real-world data.", + "bbox": [ + 84, + 498, + 475, + 650 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Simulated Adaptation Data", + "text_level": 1, + "bbox": [ + 84, + 665, + 310, + 681 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The first stage of our approach involves supervised fine-tuning. We use Kubric (Greff et al., 2022), a simulation and rendering engine designed for scalable video generation, to create simulated videos of objects dropping and colliding with other objects on the ground. Each video consists of 1-6 dropping objects onto a (possibly empty) pile of up to 4 objects underneath them. The videos are 2 seconds long, consisting of 32 frames at 16 fps. The objects are sourced from the Google Scanned Objects (GSO) dataset (Downs et al., 2022), which provides true-to-scale 3D models created from real-world scans across diverse categories (examples shown in Figure 4). The camera remains stationary in each video and is oriented parallel to the ground plane. To introduce variability, we randomly sample the camera height", + "bbox": [ + 84, + 689, + 475, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "between 0.4 and 0.6 meters and position objects between 1 and 3 meters away from the camera, which corresponds to the distributions observed in the real-world dataset. More information about the dataset can be found in Appendix K.", + "bbox": [ + 496, + 377, + 885, + 438 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Physics Supervised Fine-Tuning (PSFT).", + "text_level": 1, + "bbox": [ + 496, + 454, + 812, + 469 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/9cb6168eacd785e4f741b3dbc66836cc1b13221d84cacac490669a7f161086a6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 484, + 684, + 587 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/6adf02f646ee4da228177a9bb3d6b9810b033aeaac6c9f68056cd271eb9c47c2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 700, + 484, + 883, + 585 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/91f342955c4abb85297c05b6fbf080e1ae798b04587e39e4104a2985d585e4d7.jpg", + "image_caption": [ + "Figure 7. Plots (a), (b), and (c) demonstrate that our metrics tend to improve with further training and that leveraging a pre-trained video diffusion model enhances performance compared to random initialization. In plot (d), the size of the training dataset varies in each training run (each consisting of 5k steps). With only 5k samples, we can achieve optimal results." + ], + "image_footnote": [], + "bbox": [ + 500, + 588, + 683, + 688 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/a7f340f4c1bc1798d01d2bcfa3aac741d7c979c6b80afc4910bba5d647c78286.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 700, + 587, + 883, + 688 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We use the pretrained Open-Sora v1.2 (Zheng et al., 2024) model as our base model and fine-tune it on our simulated video dataset. We employ Open-Sora v1.2's rectified flow training objective without modification (Liu et al., 2022). Each fine-tuning experiment is conducted with a batch size of 128 and a learning rate of $1\\mathrm{e} - 4$ on two 80GB NVIDIA A100 GPUs. As shown in Figure 6, fine-tuning with this", + "bbox": [ + 495, + 799, + 885, + 905 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 156, + 56, + 815, + 71 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/19982fa17713845a5d804164e55f0ffa9571cbf7063e297859e10dcd17cb35ca.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodRealSim (Seen)Sim (Unseen)
L2 (↓)CD (↓)IoU (↑)L2 (↓)CD (↓)IoU (↑)L2 (↓)CD (↓)IoU (↑)
ProprietarySora (OpenAI, 2024)0.1740.4880.0650.1490.4460.0400.1400.4190.031
Kling-V1 (Kuaishou, 2024)0.1570.4250.0560.1420.4150.0320.1450.4370.028
Kling-V1.5 (Kuaishou, 2024)0.1550.4240.0580.1370.3960.0330.1320.4050.029
Runway Gen3 (Runway, 2024)0.1870.5260.0420.1700.5090.0400.1490.4600.038
OpenCogVideoX-5B-I2V (Yang et al., 2024c)0.1380.3660.0800.1120.3150.0200.1010.2900.020
DynamiCrafter (Xing et al., 2023)0.1870.5040.0210.1570.4850.0390.1360.4300.033
Pyramid-Flow (Jin et al., 2024)0.1750.4850.0620.1260.3520.0590.1300.3810.048
Open-Sora (Zheng et al., 2024)0.1750.5020.0690.1390.4090.0360.1300.3680.034
OursOpen-Sora + PSFT (base)0.0760.1880.1390.0360.0880.1650.0280.0580.129
base + ORO (Seg)0.0750.1830.1420.0330.0760.1700.0320.0630.145
base + ORO (Flow)0.0670.1640.1360.0260.0620.1220.0220.0450.071
base + ORO (Depth)0.0670.1590.1290.0310.0720.1240.0220.0460.096
", + "bbox": [ + 86, + 71, + 885, + 347 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1. PisaBench Evaluation Results. This table compares the performance of four proprietary models, four open models, and the models fine-tuned with PSFT and $\\mathrm{PSFT + ORO}$ on our real-world and simulated test set which is decomposed into seen and unseen object splits. Across all metrics, our PSFT models outperform all other baselines, including proprietary models like Sora. Reward modeling further enhances results, with segmentation rewards improving the shape-based IoU metric and optical rewards and depth rewards enhancing the motion-based L2 and CD metrics. This suggests that rewards can be flexibly adjusted to target specific aspects of performance.", + "bbox": [ + 83, + 356, + 888, + 428 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "data alone is sufficient to induce realistic dropping behavior in the model. Quantitatively, our PSFT model substantially improves on both our simulated and real-world benchmark, as shown in Table 1. Dataset size. We conduct an ablation study on the number of training samples to understand the amount of data required for optimal performance on our benchmark. We create random subsets from 500 to 20,000 samples and train our model for 5,000 gradient steps on each subset. Notably, as shown in Figure 7, only 5,000 samples are needed to achieve optimal results. Effect of pretraining. Additionally, we investigate the impact of Open-Sora's pre-training on adaptation. We randomly initialize the Open-Sora's denoising network while keeping the pre-trained initialization of the compressor network and train the model on a dataset of 5k training samples. As shown in Figure 8, the learned knowledge from Open-Sora's pretraining plays a critical role in our task.", + "bbox": [ + 83, + 450, + 475, + 708 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Overall, using PSFT on only 5k samples is sufficient to push Open-Sora's performance past all other evaluated models, including state-of-the-art commercial video generators, by a wide margin. This is made possible by leveraging the knowledge from the sufficiently pre-trained base model.", + "bbox": [ + 83, + 715, + 473, + 791 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Object Reward Optimization (ORO)", + "text_level": 1, + "bbox": [ + 84, + 806, + 375, + 821 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the second stage, we propose Object Reward Optimization (ORO) to use reward gradients to guide the video generation model toward generating videos where the object's motion and shape more closely align with the ground truth.", + "bbox": [ + 84, + 830, + 475, + 891 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2b0b1b1edbe72353a808756a9abec58b083266dae8f49a22cf820119f4e51b9d.jpg", + "image_caption": [ + "Figure 8. On the left, we plot random trajectories from the baseline models in Table 1. On the right, we show random trajectories from our fine-tuned model. The baseline trajectories exhibit unrealistic behavior, and most of them stay completely static. On the right, we see the trajectories consistently falling downward with collision and rolling behavior being modeled after the point of contact." + ], + "image_footnote": [], + "bbox": [ + 506, + 452, + 883, + 582 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We follow the VADER framework from (Prabhudesai et al., 2024) and introduce three reward models. The differences between our approach and VADER include: (1) our reward model utilizes both generated videos and ground truth instead of generated videos and conditioning. (2) gradients propagate through all denoising time steps in fine-tuning. Consequently, the VADER objective is modified as follows:", + "bbox": [ + 495, + 691, + 888, + 799 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nJ (\\theta) = \\mathbb {E} _ {\\left(x _ {0}, c\\right) \\sim \\mathcal {D}, x _ {0} ^ {\\prime} \\sim p _ {\\theta} \\left(x _ {0} ^ {\\prime} \\mid c\\right)} \\left[ R \\left(x _ {0} ^ {\\prime}, x _ {0}\\right) \\right] \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 553, + 809, + 885, + 829 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\mathcal{D}$ is the ground truth dataset, $p_{\\theta}(.)$ is a given video diffusion model, $x_0^{\\prime}, x_0 \\in \\mathbb{R}^{H \\times W \\times 3}$ are generated video and ground truth, and $c \\in \\mathbb{R}^{H \\times W \\times 3}$ is the initial image.", + "bbox": [ + 495, + 837, + 885, + 883 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Segmentation Reward. We utilize SAM 2 (Ravi et al.,", + "bbox": [ + 496, + 890, + 885, + 905 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 156, + 56, + 815, + 70 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2024) to generate segmentation masks across frames for generated videos. We define segmentation reward as the IoU between the dropping object's mask in generated video and the mask from the ground truth simulated segmentation.", + "bbox": [ + 84, + 84, + 473, + 147 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Optical Flow Reward. We utilize RAFT (Teed & Deng, 2020) to generate generated video's optical flow $V^{\\mathrm{gen}}$ and ground truth's optical flow $V^{\\mathrm{gt}}$ . We define the optical flow reward as $R(x_0', x_0) = -|V^{\\mathrm{gen}} - V^{\\mathrm{gt}}|$ .", + "bbox": [ + 84, + 152, + 475, + 214 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Depth Reward. We utilize Depth-Anything-V2 (Yang et al., 2024a) to generate generated video's depth map $D^{\\mathrm{gen}}$ and ground truth's depth map $D^{\\mathrm{gt}}$ . We define the optical flow reward as $R(x_0', x_0) = -|D^{\\mathrm{gen}} - D^{\\mathrm{gt}}|$ .", + "bbox": [ + 84, + 220, + 475, + 281 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Details on implementation can be found in Appendix C.", + "bbox": [ + 84, + 287, + 455, + 304 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We begin from the checkpoint of the first stage, which is trained on 5,000 samples trained over 5,000 gradient steps. We then fine-tune the model with ORO on the simulated dataset, using a batch size of 1 and two 80GB NVIDIA A100 GPUs for each fine-tuning experiment. We set a learning rate of $1\\mathrm{e} - 6$ for segmentation reward and depth reward and $1\\mathrm{e} - 5$ for optical flow.", + "bbox": [ + 84, + 311, + 475, + 417 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As shown in Table 1, incorporating ORO in reward modeling further improves performance. Additionally, each reward function enhances the aspect of physicality that aligns with its intended purpose—segmentation rewards improve shape accuracy, while flow rewards and depth rewards improve motion accuracy. This demonstrates the process is both modular and interpretable.", + "bbox": [ + 83, + 424, + 475, + 530 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5. Assessing Learned Physical Behavior", + "text_level": 1, + "bbox": [ + 84, + 549, + 419, + 566 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Having introduced our post-training approaches in Section 4, we probe into the model's understanding of the interaction between gravity and perspective—the two laws that determine the dynamics of our videos. We first test if the learned physical behavior of our model can generalize to dropping heights and depths beyond its training distribution. Then, we study the ability of the model to learn the probability distribution induced by the uncertainty of perspective.", + "bbox": [ + 84, + 575, + 475, + 696 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.1. Generalization to Unseen Depths and Heights", + "text_level": 1, + "bbox": [ + 84, + 713, + 436, + 729 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Depth and height are the main factors that affect the dynamics of a falling object in our videos. We can see this by combining the laws of gravity with perspective under our camera assumptions to model the object's image $y$ coordinate as a function of time (further details on our coordinate system are described in Appendix G):", + "bbox": [ + 84, + 736, + 475, + 828 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\ny (t) = \\frac {f}{Z} \\left(Y _ {0} - \\frac {1}{2} g t ^ {2}\\right). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 191, + 838, + 475, + 872 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "From Equation (2), we see that the random variables that af", + "bbox": [ + 84, + 890, + 475, + 905 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "fect object motion are $Z$ (depth) and $Y$ (height) (the camera focal length $f$ is fixed). Thus, we are interested in testing generalization on unseen values of $Y$ and $Z$ .", + "bbox": [ + 496, + 84, + 883, + 130 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We create a simulated test set in which a single object is dropped from varying depths and heights, using objects and backgrounds unseen during training. We uniformly sample depth and height values (in meters) from the Cartesian product of the ranges [1, 5] and [0.5, 2.5], respectively. The camera height is fixed at $0.5m$ , and depth-height pairs outside the camera viewing frustum are discarded. A sample is in-distribution (ID) if its dropping depth and height both fall in the range [1, 3] and [0.5, 1.5].", + "bbox": [ + 495, + 137, + 885, + 273 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Since we have access to the ground truth dropping time in simulation, we also employ a dropping time error, a metric we describe in Appendix B. Our analysis in Table 2 shows that performance degrades for out-of-distribution scenarios.", + "bbox": [ + 495, + 281, + 885, + 342 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Since depth and height are the main physical quantities that affect falling dynamics, this finding indicates that our model may struggle to learn a fully generalizable law that accounts for the interaction of perspective and gravity.", + "bbox": [ + 495, + 349, + 885, + 410 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/88670abb85fb73081ca571f4fc556131bc94f22fa766b8d2ff19594c1ac79e8b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
SettingL2 (↓)Chamfer (↓)IOU (↑)Time Error (↓)
ID0.0360.0880.1550.091
OOD0.0440.1430.0490.187
", + "bbox": [ + 498, + 422, + 885, + 481 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. Results of our metrics on in-distribution (ID) and out-of-distribution (OOD) depth-height combinations. The values used for depth range from $1 - 5\\mathrm{m}$ (ID range [1,3]) and height values range from 0.5-2.5 (ID range [0.5, 1.5]).", + "bbox": [ + 496, + 489, + 885, + 547 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2. Distributional Analysis", + "text_level": 1, + "bbox": [ + 496, + 571, + 692, + 585 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/173d344ac811372831211d71e1f089dcd0ee90fa7170fbcfc6b6036661f678d4.jpg", + "image_caption": [ + "Figure 9. Demonstration of ambiguity in 2D perspective projections. Each of the three clouds appears the exact same in the camera's image. The right side shows how we perform a scale and translation augmentation to generate deliberately ambiguous data." + ], + "image_footnote": [], + "bbox": [ + 498, + 603, + 651, + 713 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/dcf5758aab2710279523e38fe46165467a3e180eeef0ed0b3050233202412b9e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 602, + 883, + 713 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The evolution of a physical system is not uniquely determined by a single initial image, since the lossy uncertainty of perspective induces a distribution of possible outcomes as shown in Figure 9. An ideal video world model should (1) output videos that are faithful to the evolution of some plausible world state and (2) provide accurate coverage across the entire distribution of the world that is possible from", + "bbox": [ + 495, + 799, + 885, + 905 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 155, + 56, + 815, + 71 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "its conditioning signal. In this section, we examine these two facets by studying $p(t|y)$ : the distribution of dropping times possible from an object at coordinate $y$ in the image plane. To do this, we create a simulated dataset that has a much wider distribution $p(t|y)$ than our PSFT dataset. See Appendix F for more details on its construction.", + "bbox": [ + 84, + 85, + 475, + 175 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/73a89a0c5b340af83339e7bdee6111a2850a250f52d7a51435656c3c43642fe2.jpg", + "image_caption": [ + "Figure 10. Examples of model trajectories lifted to 3D. The blue line represents the height of the camera ray passing through the bottom of the dropping object as a function of depth. The set of possible dropping trajectories at a given depth are depicted in gray. The lifted trajectory of the model is depicted in green." + ], + "image_footnote": [], + "bbox": [ + 86, + 186, + 473, + 287 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8862d09f5523d53e73a41fca13423e9ac96d265eb77cc7788176d5f31c7abc8a.jpg", + "image_caption": [ + "Figure 11. Visualizing $p(t|y)$ misalignment for different images. Green shows the ground-truth CDF, orange is the 32-frame quantized version, and blue is the empirical CDF of 128 different samples of dropping times from the model." + ], + "image_footnote": [], + "bbox": [ + 86, + 388, + 472, + 530 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Testing (1): 3D faithfulness of trajectories.", + "text_level": 1, + "bbox": [ + 84, + 611, + 383, + 626 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "After training our model on this new dataset, we test whether its trajectories are consistent with a valid 3D world state. We first obtain an estimated dropping time from generated videos using the procedure described in Section 5.1. Using knowledge of the camera position, focal length, sensor width, and $y$ , we can obtain an implied depth and height of the trajectory. We can then back-project the video trajectory to 3D and analyze whether they constitute physically accurate trajectories. We give further details about this process in Appendix G. As show in Figure 10, we find that our model's lifted trajectories consistently align with the 3D trajectory at the height and depth implied by its dropping time, giving evidence that the model's visual outputs are faithful to some plausible real-world state.", + "bbox": [ + 84, + 633, + 475, + 845 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Testing (2): distributional alignment.", + "text_level": 1, + "bbox": [ + 84, + 852, + 346, + 868 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Going beyond the level of individual trajectories, we study the model's learned conditional distribution $p(t|y)$ . We", + "bbox": [ + 84, + 875, + 473, + 906 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "create 50 different initial images with differing values of $y$ , generate 128 different videos from each, and estimate the dropping time in each video. Using the laws of gravity, the laws of perspective, and the assumption of uniform depth sampling in our dataset, we can analytically derive the probability $p(t|y)$ as", + "bbox": [ + 496, + 84, + 885, + 175 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\np (t | y) = \\left\\{ \\begin{array}{l l} \\frac {g t}{\\left(Z _ {\\max } - Z _ {\\min }\\right) \\beta}, & t _ {\\min } \\leq t \\leq t _ {\\max } \\\\ 0, & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 539, + 196, + 885, + 237 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\beta$ is a constant that depends on $f$ , $y$ and the camera height. The derivation is given in Appendix E.", + "bbox": [ + 496, + 256, + 885, + 287 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We then measure goodness-of-fit for each of the 50 experiments using the Kolmogorov-Smirnov (KS) test (Massey Jr, 1951). The null hypothesis of the KS test is that the two distributions being compared are equal, and we consider p-values less than 0.05 as evidence of misalignment. Since our measured times have limited precision and can only take 32 distinct values—due to estimating the contact frame—we approximate the ground truth $p(t|y)$ using a Monte Carlo method. We sample 1000 values from the ground truth distribution and then quantized them into 32 bins corresponding to their frame, which we use as ground truth observations in the KS test. We find that in all 50/50 cases, the p-value from the test is less than 0.05, which provides evidence that the model does not learn the correct distribution of dropping times. We visualize the misalignment between the empirical CDF of the model's in Figure 11.", + "bbox": [ + 496, + 294, + 885, + 535 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In summary, while our model's trajectories show promising tendencies to ground themselves to plausible 3D world states, the range of possible outputs from the model does not align with the ground truth distribution.", + "bbox": [ + 496, + 542, + 885, + 603 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 496, + 623, + 614, + 638 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work studies post-training as an avenue for adapting adapting pre-trained video generator into world models. We introduce a post-training strategy that is highly effective in aligning our model. Our work raises interesting insights into the learned distributions of generative models. Qualitatively, large scale image or video generative models appear to excel at generating likely samples from the data distribution, but this alone does not imply that they match the data distribution well in its entirety. As long as a model is able to generate likely samples, global distributional misalignment is not necessarily a problem for content creation. However, this problem becomes critical for world models, where alignment across the entire distribution is necessary for faithful world simulation. The insights revealed by our study, made possible by our constrained and tractable setting, indicate that although post-training improves per-sample accuracy, general distributional alignment remains unsolved.", + "bbox": [ + 495, + 648, + 885, + 905 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 155, + 56, + 815, + 71 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgment", + "text_level": 1, + "bbox": [ + 86, + 83, + 236, + 99 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We thank Boyang Zheng, Srivats Poddar, Ellis Brown, Shengbang Tong, Shusheng Yang, Jihan Yang, Daohan Lu, Anjali Gupta and Ziteng Wang for their help with data collection. We thank Jiraphon Yenphraphai for valuable assistance in setting up our simulation code. We thank Runway and Kling AI for providing API credit. SX also acknowledges support from Intel AI SRS, Korean AI Research Hub, Open Path AI Foundation, Amazon Research Award, Google TRC program, and NSF Award IIS-2443404.", + "bbox": [ + 84, + 109, + 475, + 244 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 86, + 263, + 181, + 280 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Agarwal, N., Ali, A., Bala, M., Balaji, Y., Barker, E., Cai, T., Chattopadhyay, P., Chen, Y., Cui, Y., Ding, Y., et al. Cosmos world foundation model platform for physical AI. arXiv preprint arXiv:2501.03575, 2025.", + "Baillargeon, R. Infants' physical world. Current directions in psychological science, 13(3):89-94, 2004.", + "Bansal, H., Lin, Z., Xie, T., Zong, Z., Yarom, M., Bitton, Y., Jiang, C., Sun, Y., Chang, K.-W., and Grover, A. Videophy: Evaluating physical commonsense for video generation. arXiv preprint arXiv:2406.03520, 2024.", + "Battaglia, P. W., Hamrick, J. B., and Tenenbaum, J. B. Simulation as an engine of physical scene understanding. Proceedings of the National Academy of Sciences, 110 (45):18327-18332, 2013.", + "Bear, D. M., Wang, E., Mrowca, D., Binder, F. J., Tung, H.-Y. F., Pramod, R., Holdaway, C., Tao, S., Smith, K., Sun, F.-Y., et al. Physion: Evaluating physical prediction from vision in humans and machines. arXiv preprint arXiv:2106.08261, 2021.", + "Community, B. O. Blender - a 3d modelling and rendering package, 2018. URL http://www.blender.org.", + "Coumans, E. et al. Bullet physics engine. Open Source Software: http://bulletphysics.org, 1(3):84, 2010.", + "Craik, K. J. W. The nature of explanation, volume 445. CUP Archive, 1967.", + "Downs, L., Francis, A., Koenig, N., Kinman, B., Hickman, R., Reymann, K., McHugh, T. B., and Vanhoucke, V. Google scanned objects: A high-quality dataset of 3d scanned household items. In 2022 International Conference on Robotics and Automation (ICRA), pp. 2553-2560. IEEE, 2022.", + "Greff, K., Belletti, F., Beyer, L., Doersch, C., Du, Y., Duckworth, D., Fleet, D. J., Gnanapragasam, D., Golemo, F., Herrmann, C., et al. Kubric: A scalable dataset generator. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 3749-3761, 2022." + ], + "bbox": [ + 86, + 287, + 475, + 905 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ha, D. and Schmidhuber, J. Recurrent world models facilitate policy evolution. Advances in neural information processing systems, 31, 2018.", + "Hafner, D., Lillicrap, T., Ba, J., and Norouzi, M. Dream to control: Learning behaviors by latent imagination. arXiv preprint arXiv:1912.01603, 2019.", + "Hafner, D., Pasukonis, J., Ba, J., and Lillicrap, T. Mastering diverse domains through world models. arXiv preprint arXiv:2301.04104, 2023.", + "Hamrick, J. B., Battaglia, P. W., Griffiths, T. L., and Tenenbaum, J. B. Inferring mass in complex scenes by mental simulation. Cognition, 157:61-76, 2016.", + "Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., and Hochreiter, S. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017.", + "Huang, Z., He, Y., Yu, J., Zhang, F., Si, C., Jiang, Y., Zhang, Y., Wu, T., Jin, Q., Chanpaisit, N., et al. Vbench: Comprehensive benchmark suite for video generative models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 21807-21818, 2024.", + "Jin, Y., Sun, Z., Li, N., Xu, K., Jiang, H., Zhuang, N., Huang, Q., Song, Y., Mu, Y., and Lin, Z. Pyramidal flow matching for efficient video generative modeling. arXiv preprint arXiv:2410.05954, 2024.", + "Kang, B., Yue, Y., Lu, R., Lin, Z., Zhao, Y., Wang, K., Huang, G., and Feng, J. How far is video generation from world model: A physical law perspective. arXiv preprint arXiv:2411.02385, 2024.", + "Kuaishou. Kling, 2024. URL https://kling.kuaishou.com. Accessed: 2024.", + "Lake, B. M., Ullman, T. D., Tenenbaum, J. B., and Gershman, S. J. Building machines that learn and think like people. Behavioral and brain sciences, 40:e253, 2017.", + "LeCun, Y. A path towards autonomous machine intelligence version 0.9.2, 2022-06-27. Open Review, 62(1):1-62, 2022.", + "Liu, X., Gong, C., and Liu, Q. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022.", + "Luma. Dream machine, 2024. URL https://lumalabs.ai/dream-machine. Accessed: 2024.", + "Massey Jr, F. J. The kolmogorov-smirnov test for goodness of fit. Journal of the American statistical Association, 46 (253):68-78, 1951." + ], + "bbox": [ + 500, + 84, + 887, + 905 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 155, + 56, + 815, + 71 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 480, + 922, + 491, + 934 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Meng, F., Liao, J., Tan, X., Shao, W., Lu, Q., Zhang, K., Cheng, Y., Li, D., Qiao, Y., and Luo, P. Towards world simulator: Crafting physical commonsense-based benchmark for video generation. arXiv preprint arXiv:2410.05363, 2024.", + "Motamed, S., Culp, L., Swersky, K., Jaini, P., and Geirhos, R. Do generative video models learn physical principles from watching videos? arXiv preprint arXiv:2501.09038, 2025.", + "NBC. Coca-Cola causes controversy with ai-made ad, 2025. Accessed: 2025-01-17.", + "OpenAI. Sora, 2024. URL https://sora.com. Accessed: 2024.", + "Prabhudesai, M., Mendonca, R., Qin, Z., Fragkiadaki, K., and Pathak, D. Video diffusion alignment via reward gradients. arXiv preprint arXiv:2407.08737, 2024.", + "Ravi, N., Gabeur, V., Hu, Y.-T., Hu, R., Ryali, C., Ma, T., Khedr, H., Rädle, R., Rolland, C., Gustafson, L., Mintun, E., Pan, J., Alwala, K. V., Carion, N., Wu, C.-Y., Girshick, R., Dollár, P., and Feichtenhofer, C. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024. URL https://arxiv.org/abs/2408.00714.", + "Runway. Gen-3 alpha, 2024. URL https://runwayml.com/research/introducing-gen-3alpha. Accessed: 2024.", + "Runway. AIFF 2025: AI Film Festival, 2025. URL https://aiff.runwayml.com/. Accessed: 2025-01-17.", + "Salimans, T., Goodfellow, I., Zaremba, W., Cheung, V., Radford, A., and Chen, X. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016.", + "Spelke, E. S., Breinlinger, K., Macomber, J., and Jacobson, K. Origins of knowledge. Psychological review, 99(4): 605, 1992.", + "Srivastava, N., Mansimov, E., and Salakhudinov, R. Unsupervised learning of video representations using lstms. In International conference on machine learning, pp. 843-852. PMLR, 2015.", + "Teed, Z. and Deng, J. Raft: Recurrent all-pairs field transforms for optical flow. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part II 16, pp. 402-419. Springer, 2020.", + "Ullman, T. D., Spelke, E., Battaglia, P., and Tenenbaum, J. B. Mind games: Game engines as an architecture for" + ], + "bbox": [ + 86, + 84, + 478, + 906 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "intuitive physics. Trends in cognitive sciences, 21(9): 649-665, 2017.", + "Unterthiner, T., Van Steenkiste, S., Kurach, K., Marinier, R., Michalski, M., and Gelly, S. Towards accurate generative models of video: A new metric & challenges. arXiv preprint arXiv:1812.01717, 2018.", + "Wu, J., Yildirim, I., Lim, J. J., Freeman, B., and Tenenbaum, J. Galileo: Perceiving physical object properties by integrating a physics engine with deep learning. Advances in neural information processing systems, 28, 2015.", + "Xing, J., Xia, M., Zhang, Y., Chen, H., Yu, W., Liu, H., Wang, X., Wong, T.-T., and Shan, Y. Dynamiccafter: Animating open-domain images with video diffusion priors. arXiv preprint arXiv:2310.12190, 2023.", + "Xue, T., Wu, J., Bouman, K., and Freeman, B. Visual dynamics: Probabilistic future frame synthesis via cross convolutional networks. Advances in neural information processing systems, 29, 2016.", + "Yang, L., Kang, B., Huang, Z., Zhao, Z., Xu, X., Feng, J., and Zhao, H. Depth anything v2. arXiv:2406.09414, 2024a.", + "Yang, M., Du, Y., Ghasemipour, K., Tompson, J., Schuurmans, D., and Abbeel, P. Learning interactive real-world simulators. arXiv preprint arXiv:2310.06114, 2023.", + "Yang, S., Walker, J., Parker-Holder, J., Du, Y., Bruce, J., Barreto, A., Abbeel, P., and Schuurmans, D. Video as the new language for real-world decision making. arXiv preprint arXiv:2402.17139, 2024b.", + "Yang, Z., Teng, J., Zheng, W., Ding, M., Huang, S., Xu, J., Yang, Y., Hong, W., Zhang, X., Feng, G., et al. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072, 2024c.", + "Zheng, Z., Peng, X., Yang, T., Shen, C., Li, S., Liu, H., Zhou, Y., Li, T., and You, Y. Open-sora: Democratizing efficient video production for all, March 2024. URL https://github.com/hpcaitech/Open-Sora." + ], + "bbox": [ + 500, + 84, + 885, + 734 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 156, + 56, + 815, + 71 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "A. Discussion of Image-to-Video setting.", + "text_level": 1, + "bbox": [ + 84, + 83, + 421, + 99 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We note that our choice of single-image input, as opposed to multi-frame input, comes with some trade-offs. We choose the image-to-video setting because it is widely supported among many different models, allowing us to make effective comparisons across the current state-of-the-art. However, only conditioning on a single frame introduces significant ambiguity. Due to the loss of information caused by projecting the 3D world through perspective, it may not be possible to directly infer the size of the object or its height. In practice, we find our metrics are still reliable signals of task success, but we still study the problem of ambiguity more extensively in Section 5.2.", + "bbox": [ + 84, + 109, + 883, + 200 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "B. Metric details.", + "text_level": 1, + "bbox": [ + 84, + 219, + 233, + 234 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We propose three metrics to assess the accuracy of trajectories, shape fidelity, and object permanence. Each of our metrics compare frames from the ground-truth video with the generated video. Because different models can operate at different fps, we perform fps alignment as part of our evaluation process. To perform fps alignment, we map each frame index of the generated videos to the ground truth using $f_{\\mathrm{gen}}$ and $f_{\\mathrm{gt}}$ , where $f_{\\mathrm{gen}}$ and $f_{\\mathrm{gt}}$ are the fps of generated video and ground truth respectively. For $i$ -th frame in the generated video, we find the corresponding aligned frame index $j$ in the ground truth video:", + "bbox": [ + 84, + 244, + 883, + 332 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nj = \\operatorname {r o u n d} \\left(i \\cdot \\frac {f _ {\\text {g e n}}}{f _ {\\mathrm {g t}}}\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 416, + 332, + 885, + 364 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Through fps alignment, we downsample the ground truth video to match the frame number of the generated video. We denote the downsampled ground truth as $\\{I_i^{\\mathrm{gt}}\\}_{i = 1}^N$ and the generated video as $\\{I_i^{\\mathrm{gen}}\\}_{i = 1}^N$ , where $N$ is the number of frames in the generated video.", + "bbox": [ + 84, + 369, + 885, + 412 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Trajectory L2. For each frame in both the generated video and ground truth, we calculate the centroid of the masked region. We then compute $L_{2}$ distance between the centroids of corresponding frames:", + "bbox": [ + 84, + 414, + 885, + 445 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nL _ {2} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\| C _ {i} ^ {\\text {g e n}} - C _ {i} ^ {\\mathrm {g t}} \\| _ {2} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 454, + 885, + 496 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where $C_i^{\\mathrm{gen}}, C_i^{\\mathrm{gt}} \\in \\mathbb{R}^2$ are the centroids of the dropping object in the $i$ -th frame of generated video and the ground truth respectively.", + "bbox": [ + 84, + 507, + 883, + 537 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Chamfer Distance (CD). To assess the shape fidelity of objects, we calculate the Chamfer Distance (CD) between the mask regions of the generated video and ground truth:", + "bbox": [ + 84, + 537, + 883, + 569 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {C D} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left(\\frac {1}{| P _ {i} |} \\sum_ {p \\in P _ {i}} \\min _ {q \\in Q _ {i}} \\| p - q \\| _ {2} + \\frac {1}{| Q _ {i} |} \\sum_ {q \\in Q _ {i}} \\min _ {p \\in P _ {i}} \\| q - p \\| _ {2}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 254, + 579, + 712, + 622 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where $P_{i} = \\{p_{j}\\}_{j = 1}^{|P_{i}|}$ and $Q_{i} = \\{q_{j}\\}_{j = 1}^{|Q_{i}|}$ are the sets of mask points in the $i$ -th frame of the generated video and ground truth respectively.", + "bbox": [ + 84, + 633, + 883, + 666 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Intersection over Union (IoU). We use the Intersection over Union (IoU) metric to evaluate object permanence. IoU measures objects' degree of overlap between the generated video and ground truth. This is formulated as follows:", + "bbox": [ + 84, + 666, + 883, + 696 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {I o U} = \\frac {1}{| N |} \\sum_ {i = 1} ^ {N} \\frac {\\left| M _ {i} ^ {\\text {g e n}} \\cap M _ {i} ^ {\\mathrm {g t}} \\right|}{\\left| M _ {i} ^ {\\text {g e n}} \\cup M _ {i} ^ {\\mathrm {g t}} \\right|} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 707, + 885, + 750 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where $M_{i}^{\\mathrm{gen}}$ , $M_{i}^{\\mathrm{gt}} \\in \\{0,1\\}^{H\\times W}$ are binary segmentation masks of the falling object in the $i$ -th frame of the generated and ground truth videos respectively.", + "bbox": [ + 84, + 758, + 883, + 792 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Time error. When testing on videos generated in simulation, we can provide a timing error. From the dropping height $Y_{0}$ of the ground truth video, which we have access to from the simulator, we can derive $t_{\\mathrm{drop}} = \\sqrt{Y_0\\frac{2}{g}}$ . We then obtain a dropping time from the model's output by estimating the frame of impact as the first frame $F$ whose centroid velocity in the $y$ direction is negative. If $t_{\\mathrm{drop}}$ occurs in between $F$ and $F - 1$ , then we define the time error $E_{\\mathrm{time}}$ as zero. Otherwise, we define the time error as", + "bbox": [ + 84, + 797, + 883, + 876 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nE _ {\\text {t i m e}} = \\min \\left(\\left| \\frac {F - 1}{\\mathrm {f p s}} - t _ {\\text {d r o p}} \\right|, \\left| \\frac {F}{\\mathrm {f p s}} - t _ {\\text {d r o p}} \\right|\\right). \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 326, + 876, + 885, + 910 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 155, + 56, + 815, + 71 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 477, + 922, + 493, + 934 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "C. ORO implementation details.", + "text_level": 1, + "bbox": [ + 84, + 83, + 359, + 99 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In our setting, we do not cut the gradient after step $k$ like VADER. The gradient $\\nabla_{\\theta}R(x_0',x_0)$ backpropagates through all diffusion timesteps and update the model weights $\\theta$ :", + "bbox": [ + 84, + 109, + 885, + 140 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {\\theta} \\left(R \\left(x _ {0} ^ {\\prime}, x _ {0}\\right)\\right) = \\sum_ {t = 0} ^ {T} \\frac {\\partial R \\left(x _ {0} ^ {\\prime} , x _ {0}\\right)}{\\partial x _ {t}} \\cdot \\frac {\\partial x _ {t}}{\\partial \\theta} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 348, + 148, + 885, + 191 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where $T$ is the total diffusion timesteps.", + "bbox": [ + 84, + 200, + 346, + 215 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Segmentation Reward. We utilize SAM 2 (Ravi et al., 2024) to generate segmentation masks across frames for generated video:", + "bbox": [ + 84, + 222, + 885, + 251 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nM ^ {\\text {g e n}} = \\operatorname {S A M} - 2 \\left(x _ {0}\\right) \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 415, + 265, + 885, + 282 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where $M^{\\mathrm{gen}}$ denotes the masks of the falling object in the generated video. We obtain ground truth masks $M^{\\mathrm{gt}}$ using Kubric (Greff et al., 2022). To avoid non-differentiable reward, we use Sigmoid to normalize mask logits of generated video instead of converting them to binary masks. We use IoU between $M^{\\mathrm{gen}}$ and $M^{\\mathrm{gt}}$ as reward function:", + "bbox": [ + 84, + 294, + 885, + 339 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nR \\left(x _ {0} ^ {\\prime}, x _ {0}\\right) = \\operatorname {I o U} \\left(M ^ {\\text {g e n}}, M ^ {\\text {g t}}\\right) \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 349, + 885, + 367 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Maximizing objective 1 is equivalent to minimizing the following objective:", + "bbox": [ + 84, + 378, + 591, + 393 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nJ (\\theta) = \\mathbb {E} _ {\\left(x _ {0}, c\\right) \\sim \\mathcal {D}, x _ {0} ^ {\\prime} \\sim p _ {\\theta} \\left(x _ {0} ^ {\\prime} \\mid c\\right)} \\left[ 1 - \\operatorname {I o U} \\left(M ^ {\\text {g e n}}, M ^ {\\text {g t}}\\right) \\right] \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 313, + 402, + 885, + 422 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "This objective constrains the position and shape of the generated object in the video, encouraging a greater intersection with the object region in the ground truth video. The model learns to generate more accurate object positions and shapes through training with this objective.", + "bbox": [ + 84, + 431, + 885, + 477 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Optical Flow Reward. We utilize RAFT (Teed & Deng, 2020) to generate optical flow for both generated videos and ground truth:", + "bbox": [ + 84, + 484, + 885, + 513 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nV ^ {\\text {g e n}} = \\operatorname {R A F T} \\left(x _ {0} ^ {\\prime}\\right) \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 419, + 513, + 885, + 536 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nV ^ {\\mathrm {g t}} = \\operatorname {R A F T} (x _ {0})\n$$\n", + "text_format": "latex", + "bbox": [ + 428, + 532, + 550, + 547 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where $V^{\\mathrm{gen}}$ , $V^{\\mathrm{gt}}$ denote the optical flows of generated videos and ground truth. We define the reward as follows:", + "bbox": [ + 84, + 554, + 818, + 570 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nR \\left(x _ {0} ^ {\\prime}, x _ {0}\\right) = - \\left| V ^ {\\text {g e n}} - V ^ {\\text {g t}} \\right| \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 580, + 885, + 598 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Maximizing objective 1 is equivalent to minimizing the following objective:", + "bbox": [ + 84, + 607, + 591, + 623 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nJ (\\theta) = \\mathbb {E} _ {\\left(x _ {0}, c\\right) \\sim \\mathcal {D}, x _ {0} ^ {\\prime} \\sim p _ {\\theta} \\left(x _ {0} ^ {\\prime} \\mid c\\right)} \\left[ \\left| V ^ {\\text {g e n}} - V ^ {\\text {g t}} \\right| \\right] \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 633, + 885, + 652 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "This objective constrains the motion of the generated object in the video. The model learns to generate more accurate physical motion through training with this objective.", + "bbox": [ + 84, + 661, + 885, + 691 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Depth Reward. We utilize Depth-Anything-V2 (Yang et al., 2024a) to generate optical depth maps for both generated videos and ground truth:", + "bbox": [ + 84, + 698, + 885, + 728 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nD ^ {\\text {g e n}} = \\text {D e p t h - A n y t h i n g - V 2} \\left(x _ {0} ^ {\\prime}\\right) \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 728, + 885, + 750 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nD ^ {\\mathrm {g t}} = \\text {D e p t h - A n y t h i n g - V 2} (x _ {0})\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 747, + 598, + 762 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where $D^{\\mathrm{gen}}$ , $D^{\\mathrm{gt}}$ denote the depth maps of generated videos and ground truth. We define the reward as follows:", + "bbox": [ + 84, + 768, + 812, + 785 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nR \\left(x _ {0} ^ {\\prime}, x _ {0}\\right) = - \\left| D ^ {\\text {g e n}} - D ^ {\\mathrm {g t}} \\right| \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 795, + 885, + 811 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Maximizing objective 1 is equivalent to minimizing the following objective:", + "bbox": [ + 84, + 821, + 591, + 837 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nJ (\\theta) = \\mathbb {E} _ {\\left(x _ {0}, c\\right) \\sim \\mathcal {D}, x _ {0} ^ {\\prime} \\sim p _ {\\theta} \\left(x _ {0} ^ {\\prime} \\mid c\\right)} \\left[ \\left| D ^ {\\mathrm {g e n}} - D ^ {\\mathrm {g t}} \\right| \\right] \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 848, + 885, + 867 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "This objective constrains the 3d motion of the generated object in the video. The model learns to generate more accurate 3d physical motion through training with this objective.", + "bbox": [ + 84, + 875, + 885, + 906 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 155, + 56, + 815, + 71 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "D. Coordinate system", + "text_level": 1, + "bbox": [ + 84, + 83, + 272, + 101 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We give a visualization of the coordinate system used in this paper in Figure 12. To compute $y$ , we first leverage a segmentation map and find pixel row index that is just below the object. Once this row index is found, $y$ can easily be computed from the camera position, camera sensor size, and image resolution. We note that because our camera is assumed to be in perspective with the $XY$ plane, we can ignore $X$ and $x$ (not shown in figure) in our analyses in Section 5.1 and Section 5.2.", + "bbox": [ + 83, + 109, + 887, + 186 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/afd6a41eb9e3a6bfc87ee7d008d6fc6d00d3b71c75ccf91adf12e6a11f200f01.jpg", + "image_caption": [ + "Figure 12. A visualization of the coordinate system used in this paper (not to scale). The image plane height of the object is denoted as $y$ , its actual height in 3D as $Y$ , and its depth as $Z$ . The camera focal length is denoted as $f$ ." + ], + "image_footnote": [], + "bbox": [ + 86, + 199, + 331, + 407 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/0492c7160061c2167d23b2371de275bf324606389678afb1b4fcc06512b614c9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 375, + 198, + 879, + 411 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "E. Derivation of $p(t|y)$", + "text_level": 1, + "bbox": [ + 84, + 492, + 274, + 510 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In our dataset construction, we assume a uniform distribution for $Z$ , where $Z \\sim \\mathcal{U}(Z_{\\min}, Z_{\\max})$ , where $Z_{\\min} = 2$ and $Z_{\\max} = 18$ . As shown in Figure 12, the dropping height $Y$ is a linear function of $Z$ , i.e. $Y = y + \\beta Z$ for the slope $\\beta$ that can be computed from $y, f$ , the sensor size, and the camera height. This means we can solve for dropping time as $t = \\sqrt{\\frac{2}{g}Y} = \\sqrt{\\frac{2}{g}(y + \\beta Z)}$ . Applying the transformation rule for probability density yields", + "bbox": [ + 84, + 518, + 887, + 589 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\np (t | y) = \\left\\{ \\begin{array}{l l} \\frac {g t}{\\left(Z _ {\\max } - Z _ {\\min }\\right) \\beta}, & t _ {\\min } \\leq t \\leq t _ {\\max } \\\\ 0, & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 594, + 885, + 633 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "where $t_{\\mathrm{min}} = \\sqrt{\\frac{2}{g} (y + \\beta Z_{\\mathrm{min}})}$ and $t_{\\mathrm{max}} = \\sqrt{\\frac{2}{g} (y + \\beta Z_{\\mathrm{max}})}$ . Plugging in $Z_{\\mathrm{min}} = 2$ and $Z_{\\mathrm{max}} = 18$ yields Equation (3).", + "bbox": [ + 83, + 641, + 883, + 667 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "F. Ambiguous dataset", + "text_level": 1, + "bbox": [ + 84, + 683, + 272, + 700 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We introduce a new dataset for distributional analysis that broadens $p(t|y)$ , in contrast to the PSFT dataset, which prioritizes realism and has a narrower distribution due to limited object depth variability. To create a dataset with $p(t|y)$ that is sufficiently diverse for meaningful analysis, we first set up the initial scenes as before, but then apply an augmentation where a new depth values is sampled uniformly from [2, 18] and the object is scaled and translated such that it appears the same in the original image, as shown in Figure 9. For simplicity, we limit our scenes to a single dropping object with no other objects on the ground. We also disable shadows, preventing the model from using them as cues to infer depth and height. Our dataset contains 5k samples consisting of 1k unique initial scenes each containing 5 different trajectories produced by the augmentation.", + "bbox": [ + 83, + 708, + 887, + 830 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "G. Lifting trajectories to 3D", + "text_level": 1, + "bbox": [ + 84, + 849, + 326, + 867 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To lift trajectories to 3D, we first estimate $t_{\\mathrm{drop}}$ as described in Section 5.1. Using SAM2 to estimate object masks in the generated video, we can obtain a trajectory of the bottom of the object which we denote as $y_0, y_1, \\ldots, y_N$ where", + "bbox": [ + 83, + 875, + 887, + 906 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 155, + 56, + 815, + 71 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "$N = t_{\\mathrm{drop}} \\times \\mathrm{fps}$ . From $t_{\\mathrm{drop}}$ , we can solve for an implied depth $Z = \\frac{\\frac{1}{2}gt^2 - y}{\\beta}$ . We then compute the lifted 3D trajectory as $y_i \\mapsto y_i + \\beta Z$", + "bbox": [ + 84, + 84, + 887, + 119 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "H. PisaBench Details", + "text_level": 1, + "bbox": [ + 86, + 136, + 264, + 152 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this section, we discuss the details of our data collection pipeline and annotations. We present more examples of real-world videos and corresponding annotations in Figure 13.", + "bbox": [ + 84, + 162, + 887, + 193 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "H.1. Data Collection Pipeline", + "text_level": 1, + "bbox": [ + 84, + 209, + 294, + 224 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Collecting Real World Videos. We enlist approximately 15 volunteers to participate in the data collection process. We hand out a tripod, tape, and invisible wire for each volunteer. To ensure the quality, diversity, and minimize the ambiguity introduced by the environments, volunteers are provided with detailed guidelines. The key points of the data collection guidelines are shown in Table 3.", + "bbox": [ + 84, + 232, + 887, + 294 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Raw videos processing. For the collected raw videos, we cut each video into multiple clips and crop their sizes. For each video clip, we annotate its starting position in the original long video and ensure that the duration of each segment does not exceed 12 seconds. Regarding the sizes of the videos, we manually crop each video to an aspect ratio of $1:1$ , ensuring that the falling objects remain fully visible within the frame during the cropping process. The processing interface is shown in Figure 14.", + "bbox": [ + 84, + 300, + 887, + 377 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "H.2. Annotation Details", + "text_level": 1, + "bbox": [ + 86, + 393, + 256, + 407 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We present our annotation details in Figure 15. For video captions, we present the word cloud figure in (a). For segmentation masks, we annotate all objects in the first frame using positive and negative points, which are then propagated across frames using the SAM 2 (Ravi et al., 2024) model to produce segmentation masks for all objects throughout the video. The annotation interface is shown in (b).", + "bbox": [ + 84, + 416, + 887, + 477 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In addition to providing the annotated caption \" {object description} falls,\" we also add information to inform off-the-shelf models of the task's context as much as possible. To further enhance task comprehension, we append an additional description \"A video that conforms to the laws of physics.\" We also employ negative prompts \"no camera motion\" and \"no slow-motion\" to ensure environmental stability and impose constraints on the generated videos. These prompts explicitly instruct the models to avoid including camera motion or any non-real-time object motion, thereby maintaining consistency with real-world physics.", + "bbox": [ + 84, + 484, + 887, + 575 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "I. Inference Details", + "text_level": 1, + "bbox": [ + 86, + 594, + 251, + 609 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We present the inference configurations of each closed or open model we evaluate in Table 4. For models that do not support generating videos with 1:1 aspect ratio, we pad initial frames with black borders to the resolution supported by these models, and finally remove the black borders from the generated videos.", + "bbox": [ + 84, + 619, + 887, + 667 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "J. More Qualitative Examples", + "text_level": 1, + "bbox": [ + 86, + 685, + 339, + 702 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We present more qualitative examples in Figure 16 - Figure 22. Although in some showcases, models can roughly predict the downward trend, models still struggle to predict plausible shape and motion. The defects in the models can be mainly attributed to the following aspects:", + "bbox": [ + 84, + 710, + 887, + 757 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Trajectory correctness: in most videos, models fail to predict even the basic falling trajectory of objects, as shown in Figure 19 (a), despite this being highly intuitive for humans. Even in cases where the falling trajectory is roughly correctly predicted, the models still struggle to accurately predict subsequent events, such as collisions, as illustrated in Figure 16 (f).", + "- Object consistency: in many generated videos, object consistency is poor. Models struggle to infer the appearance of objects from multiple viewpoints in a physically plausible manner, resulting in unnatural appearances, as shown in Figure 16 (a). Additionally, models perform poorly in maintaining object permanence, causing objects to appear blurry, as illustrated in Figure 20 (f). Furthermore, models sometimes introduce new objects into the video, as depicted in" + ], + "bbox": [ + 102, + 773, + 883, + 902 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 155, + 56, + 815, + 71 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Figure 20 (e).", + "bbox": [ + 120, + 85, + 210, + 99 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "- Scene consistency: models struggle to maintain scene consistency, leading to abrupt transitions in many videos. These sudden changes make videos appear unnatural, as shown in Figure 18 (f).", + "bbox": [ + 107, + 109, + 887, + 141 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "K. Simulated Adaption Details", + "text_level": 1, + "bbox": [ + 88, + 159, + 344, + 176 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We use the Kubric (Greff et al., 2022) simulation and rendering engine for creating our simulated videos. Kubric uses PyBullet (Coumans et al., 2010) for running physics simulations and Blender (Community, 2018) for rendering. We set the simulation rate to 240 steps per second and render 2-second videos at 16 fps, resulting in 32 frames per video. Each scene consists of objects from the Google Scanned Objects (GSO) dataset (Downs et al., 2022) and uses environmental lighting from HDRI maps provided by Kubric. We use 930 objects and 458 HDRI maps for training and 103 objects and 51 HDRI maps for testing.", + "bbox": [ + 88, + 185, + 887, + 277 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For each video, we randomly choose 1-6 objects to drop. These objects are placed at a height uniformly sampled from $0.5\\mathrm{m}$ to $1.5\\mathrm{m}$ . Below each of these objects, a possibly empty pile of up to 4 objects spawns beneath to create collisions. The objects are placed in a spawn region of size $2\\mathrm{m} \\times 2\\mathrm{m}$ .", + "bbox": [ + 88, + 282, + 887, + 329 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The camera is initially positioned $1\\mathrm{m}$ behind this region, with its height varying uniformly between $0.4\\mathrm{m}$ and $0.6\\mathrm{m}$ . Once all objects are placed, the camera moves back in random increments until all objects are visible within the camera frame. The camera uses a focal length of $35\\mathrm{mm}$ , a sensor width of $32\\mathrm{mm}$ , and an aspect ratio of $1\\times 1$ .", + "bbox": [ + 88, + 335, + 887, + 382 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "L. Limitations", + "text_level": 1, + "bbox": [ + 88, + 401, + 210, + 417 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In this work, we collect and manually annotate a dataset of 361 real-world videos and design three spatial metrics to evaluate the performance of state-of-the-art image-to-video (I2V) models in a fundamental physical scenario: free fall. Our metrics focus solely on spatial positional relationships, excluding object appearance attributes such as color. To enable more fine-grained evaluations of appearance characteristics, we aim to develop metrics based on Multimodal Large Language Models (MLLMs) or pixel-level analysis in future work.", + "bbox": [ + 88, + 426, + 887, + 502 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Furthermore, we propose the PSFT and ORO methods to fine-tune the Open-Sora model (Zheng et al., 2024), improving its ability to generate physically plausible videos. Despite these improvements, certain limitations remain, specifically, the generation of blurry objects in some videos. We hope to address these challenges in future research by refining both the dataset and the fine-tuning strategies, aiming to produce videos that better maintain object visuals.", + "bbox": [ + 88, + 508, + 887, + 571 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 155, + 56, + 815, + 70 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 478, + 922, + 493, + 934 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/2c3255e61507df64f2fe2b8fc33da6df4f8074be9bf5d02291a3cf3bb8a8a94a.jpg", + "image_caption": [ + "(a) A white paper roll falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 119, + 883, + 275 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/76661e958af011fe7f1f7ab37c8d6b11f4f98a6f04af442101dbe8308c1127ee.jpg", + "image_caption": [ + "(c) A black bottle falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 297, + 883, + 452 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/f96ae24b9af0730f8094994e180d7d5a6e32804e7b580a5dadd002836de5fced.jpg", + "image_caption": [ + "(b) A transparent bottle falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 476, + 883, + 630 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/14aef21ca7858494c097f6f67d50528c4b5917b1b19db5da86d4a491cd5a520f.jpg", + "image_caption": [ + "(d) A white bottle falls.", + "Figure 13. Examples of real world videos and annotations. We present video frames in the first row and mask annotations in the second row." + ], + "image_footnote": [], + "bbox": [ + 86, + 654, + 883, + 808 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 156, + 56, + 815, + 71 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/1211a8336c5089da8a8c3424e1cc2df81e4f22b093cc82bb6236a03de951a3b1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
AspectRequirements
Camera·The camera must be stabilized using a tripod.\n·The dropping object should remain visible throughout the entire fall.\n·The trajectory of the object should be sufficiently centered in the frame.\n·Ensure the slow-motion setting is configured to 120 fps.\n·Avoid a completely top-down perspective; the frame should include both the floor and the wall for spatial context.\n·It is acceptable to record one long video containing multiple drops at the same location.
Objects·Most objects should be rigid and non-deformable.\n·A limited number of flexible or deformable objects may be included, as such data is also valuable.
Dropping Procedure·Secure the object with a wire using tape, ensuring stability. Multiple tapings may be necessary for proper stabilization.\n·Visibility of the wire in the video is acceptable.\n·No body parts should appear in the frame. If this is challenging, consider having a partner monitor the camera or use screen-sharing software to view the camera feed on a laptop for uninterrupted framing.\n·Record videos in a horizontal orientation to simplify cropping and to help keep the frame free of unnecessary elements.\n·Use a short wire to enhance object stability.\n·The object should remain stationary before being dropped.
Scene Composition·Make the scenes dynamic and engaging. Include interactions with other objects, such as collisions or objects tipping over. Static objects should serve as active elements rather than mere background props.\n·Avoid filming in classroom or laboratory environments.\n·Include a variety of dropping heights.\n·Film in different environments, ensuring at least one setting is outside your apartment.\n·Minimize human shadows in the frame whenever possible.\n·Ensure good lighting and maintain strong contrast between the objects and the back-ground.
", + "bbox": [ + 89, + 92, + 883, + 854 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 3. Key points of real world videos collection guideline. We have detailed requirements for camera, objects, dropping procedure and scene composition to ensure the quality, diversity and minimize ambiguity introduced by environments.", + "bbox": [ + 84, + 863, + 883, + 892 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 156, + 56, + 815, + 71 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/945c3d2b2e7ba5dd24811fc027b02c812dd3fdb62673324359a47eefccc06ed5.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 86, + 87, + 460, + 281 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/4d595693598e6f902081fbae2160c14f17629bf42e1282931e980a4273a07e54.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 491, + 87, + 887, + 281 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/8f522784783a329f5017e6371582ae27e84586d17a8f7bcd9025f339933400e5.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 89, + 348, + 424, + 606 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/733aaf4a714c7ec870de8f5e762d4a545653a15ef5ea24677ad5c2400309aed0.jpg", + "image_caption": [ + "Figure 14. Video processing interface. (a) we annotate starting positions in the original long videos and clip them into multiple clips less than 12 seconds. (b) We drag the cropping box to crop the video size to an aspect ratio of 1:1.", + "(b)", + "Figure 15. Annotation details of real world videos. (a) Word cloud of objects in video captions. Our videos contain a variety of daily life objects. (b) Interface for annotating positive and negative points in the first frame. Red and blue dots indicate positive and negative points respectively. We annotate all objects in the midair and ground." + ], + "image_footnote": [], + "bbox": [ + 473, + 345, + 885, + 609 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/5ff8dcc1fc52105fef32fc975178bc49046b998c818452290cac625a89916132.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelResolutionNumber of FramesFPSGuidance ScaleSampling StepsNoise Scheduler
ClosedSora720 × 72015030---
Kling-V1.5960 × 960150301.0--
Kling-V1960 × 960150301.0--
Runway Gen31280 × 76815630---
OpenCogVideoX-5B-I2V720 × 4804886.050DDIM
DynamiCrafter512 × 32090300.750DDIM
Pyramid-Flow1280 × 768120244.010EulerDiscrete
Open-Sora512 × 51290307.030RFLOW
", + "bbox": [ + 86, + 702, + 885, + 873 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 4. Inference details for models we evaluate, where “-” indicates the information is not available.", + "bbox": [ + 183, + 882, + 787, + 896 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 156, + 56, + 815, + 70 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 477, + 922, + 493, + 934 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/c26bfecf28ebcde71af6d60da63253f75d02e0c5503f1425cbff24859ab09444.jpg", + "image_caption": [ + "(a) A brown bottle falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 132, + 883, + 210 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/5f5749c22b33d6469cf285b89aba3d78b780ffbd4338568515ee93f4e1fa544d.jpg", + "image_caption": [ + "(b) A grey bottle falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 241, + 883, + 321 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/1f5a8409a2aefddb489c74e7deb33bffefdf80fdb5c072687ddd0946e16f3e00.jpg", + "image_caption": [ + "(c) A grey paper cup falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 353, + 883, + 431 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/19fe4f4ece7488446d3a1550236f6f8bf176f70d2615cfa7db7c28a0720cf550.jpg", + "image_caption": [ + "(d) A paper cup falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 464, + 883, + 542 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/7ecc93d403dcb06758ac993647a892395af1264eeca956ba139d2929c18192ba.jpg", + "image_caption": [ + "(e) A white bottle falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 574, + 883, + 654 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/c8f534dc359b0c6486ea7d10444e18fb31e91a7d173e50458a7fb898ac132de4.jpg", + "image_caption": [ + "(f) A white box falls.", + "Figure 16. Qualitative examples of Kling-V1 (Kuaishou, 2024). In (a) (b) (c) (f), objects have a tendency to fall. (b) (c) are roughly consistent with the laws of physics. In (a) (f), the shape of the object does not match the first frame. In (d), the paper cup is suspended in midair. In (e), new object is introduced. In (e), the model fails to correctly predict the collision that occurs after the white box falls and the chain of events that follows." + ], + "image_footnote": [], + "bbox": [ + 86, + 686, + 883, + 763 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 156, + 56, + 815, + 71 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 477, + 922, + 495, + 934 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/d24e9d610f63859bf566728e5b51c496af6f6f54f828b2cc2aae2a5186055f4c.jpg", + "image_caption": [ + "(a) A black and grey glove falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 143, + 885, + 224 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/936b8ee3ceeca5f82d0050ae068b79e918e73267eaa429a43c2317f1f2323c25.jpg", + "image_caption": [ + "(b) A black bottle falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 255, + 885, + 335 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/2d23c7b2c224aa44ab3a8eed9dfe17398040ed64c31737411568651ceb6dcf15.jpg", + "image_caption": [ + "(c) A blue and white box falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 366, + 885, + 446 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/20194bd551fbcc50f5676b0a019e1d10041918fadd0264051f4f9a7540545bac.jpg", + "image_caption": [ + "(d) A brown bottle falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 477, + 885, + 556 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/baf79faae3094c985d469cccd5f8af1aef1f088b8c03e4b23c119ec86aebd807.jpg", + "image_caption": [ + "(e) A Coca-Cola can falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 587, + 885, + 667 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/8cb3aa9d253fc5e51c44a557e5629610fa5ad282a6ce25364e557518e7b8d6c7.jpg", + "image_caption": [ + "(f) A pink box falls.", + "Figure 17. Qualitative examples of Runway Gen3 (Runway, 2024). In (b) (e), objects have a tendency to fall. In (a) (e) (f), new objects are introduced. In (b) (d), the shape of the object does not match the first frame. In (c), the box is suspended in midair." + ], + "image_footnote": [], + "bbox": [ + 86, + 699, + 885, + 777 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 156, + 56, + 816, + 71 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 475, + 922, + 496, + 934 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/a79a3d2c12bca59c6526e7216d60bb7580bc8c63576be8552b2f29dfd5c346d2.jpg", + "image_caption": [ + "(a) A black bottle falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 138, + 885, + 218 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/498fa6c0164db5fd88370c112e6dd2c5829dbe74624f6e825ac28219d5f5aa3d.jpg", + "image_caption": [ + "(b) A black helmet falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 247, + 885, + 328 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/8317550ad7aa20a11f3bf97aa716e1da5f6f471ce296a6b0a5da5eba39db5c52.jpg", + "image_caption": [ + "(c) A paper box falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 358, + 885, + 439 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/5259adbc56734c97b69c0f5ac5a0debfc574cf0ee9fab5cae058c157503ada7b.jpg", + "image_caption": [ + "(d) A white bottle falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 469, + 885, + 549 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/fb0eb57bfd7df70674700e7d467e475c936b995b0451b6a6746c321bb30fd4dc.jpg", + "image_caption": [ + "(e) A grey paper cup falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 580, + 885, + 660 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/dad22c2ccf02d28f88996ada3f6458c8f7ba92408c341e39e6da0ed00ee05fbb.jpg", + "image_caption": [ + "(f) A white box falls.", + "Figure 18. Qualitative examples of CogVideoX-5B-I2V (Yang et al., 2024c). In (a) - (f), objects have a tendency to fall. However, in all the videos, there are violations of physics. In (a) (b), the objects are divided into two parts. In (c) (d) (e), the shape of the object does not match the first frame. In (c), the trajectory is not a vertical fall. In (f), scene changes suddenly, which does not match the first frame." + ], + "image_footnote": [], + "bbox": [ + 86, + 691, + 789, + 771 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 156, + 56, + 816, + 71 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 475, + 922, + 493, + 934 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/e39efd16222ce5ec5787e1ab30db7573f7cb53089f14019eb71cfee5130fa917.jpg", + "image_caption": [ + "(a) A black box falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 151, + 885, + 231 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/a17478f38168df434239dbbc27ca455093b6c1e40467cd5487d213bb5d83095d.jpg", + "image_caption": [ + "(b) A card holder falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 262, + 885, + 342 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/f5955939eff061d366f9461ca64daddccce438bc7cf26e0ca0177e4bea14e76c.jpg", + "image_caption": [ + "(c) A white bottle falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 372, + 885, + 454 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/3064ab2bd099f6c34a0b0143af9b37cafbc9b07d4aff556a41357fd47bdbe8d3.jpg", + "image_caption": [ + "(d) A white box falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 484, + 885, + 564 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/f9b6d84ca868eb7b320e5bf66a6830342a093523d4a8c01e2b0d2c6c9b32e833.jpg", + "image_caption": [ + "(e) An orange and white box falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 594, + 885, + 674 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/b68b172758804959f5687238c0b53d284f904543618dda06d81c04419d71a2ce.jpg", + "image_caption": [ + "(f) A shoe falls.", + "Figure 19. Qualitative examples of DynamiCrafter (?). In all the videos, objects do not have a tendency to fall, suspended in the midair." + ], + "image_footnote": [], + "bbox": [ + 86, + 705, + 885, + 785 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 156, + 56, + 816, + 71 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 475, + 922, + 495, + 934 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/b39f635b4b2874364c63137dcdc9fc15f4eb68ba57cc4f852a463fac4a5c56f0.jpg", + "image_caption": [ + "(a) A black bottle falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 143, + 885, + 224 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/c3078aa13d861c89ebf70c3db430662cdcd8c83d8880aa9a934a258e5c3598e5.jpg", + "image_caption": [ + "(b) A green and white box falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 255, + 885, + 335 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/47bbef5b7ad9b2e303cbcdd98429261eea928bc17ba376000894eba09e78ab5b.jpg", + "image_caption": [ + "(c) A grey bottle falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 364, + 885, + 445 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/d20954189b98d66653a95abacce5f2c333474ea3a756f9e105766842d1c52aaf.jpg", + "image_caption": [ + "(d) An orange tube falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 477, + 885, + 556 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/52a4ded90059019d5599737a9ccf91373f0a89472a4470044d57fa2d382b574c.jpg", + "image_caption": [ + "(e) A white bottle falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 587, + 885, + 667 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/1bfe20845da001136df0ce41999f5d2267a843826b8f310807bc262f0f0570d5.jpg", + "image_caption": [ + "(f) A plastic box falls.", + "Figure 20. Qualitative examples of Pyramid-Flow (Jin et al., 2024). In (b) (d) (e), objects have a tendency to fall. In (a) (b) (e) (f), new objects are introduced. In (c), scene changes, which does not match the first frame.. In (d), the tube becomes blurry." + ], + "image_footnote": [], + "bbox": [ + 86, + 698, + 885, + 777 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 156, + 56, + 816, + 71 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 475, + 922, + 495, + 934 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/161db114927f1a531685c756c1a59ca048d49f95035d117b63e02913d1aeda55.jpg", + "image_caption": [ + "(a) A bottle full of water falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 143, + 885, + 224 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/00dd072f248c8d96ba525c8f9026f6dbe513577c2833c0a39bcc58836058a3ae.jpg", + "image_caption": [ + "(b) A brown bottle falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 255, + 885, + 335 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/20691688d65a9c34668843431f6da68506d385837a8adf32d934a8338cfbb623.jpg", + "image_caption": [ + "(c) A grey paper cup falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 366, + 885, + 445 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/20dc724f7197dd626747f2db293d23a957a1d7672a436a2fd07469a0a937b08a.jpg", + "image_caption": [ + "(d) A paper box falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 477, + 885, + 556 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/5f2dbd497dd96b551059198818fd41a1cc13a1d01d40d7814fd8343102c994a3.jpg", + "image_caption": [ + "(e) A white bottle falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 587, + 885, + 667 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/5794ff6b8e6a1c23e1b350538a38f18063fb742919db3216ae76aa58e6f391f5.jpg", + "image_caption": [ + "(f) A white box falls.", + "Figure 21. Qualitative examples of Open-Sora (Zheng et al., 2024). In all the videos, objects do not have a tendency to fall, suspended in the midair. In (b) (d), scene changes suddenly, which does not match the first frame. In (e), new object is introduced." + ], + "image_footnote": [], + "bbox": [ + 86, + 698, + 885, + 777 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 156, + 56, + 816, + 71 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 475, + 922, + 495, + 934 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/0cc35a21a9c1a24bb99fd5ae428c1b943237dcc4bf44654420f09f88cbbe622f.jpg", + "image_caption": [ + "(a) A brown bottle falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 143, + 885, + 224 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/20453f58b53c0dbbefd7d26b5854ca263406b127771a6983ee453bcedd34a9b8.jpg", + "image_caption": [ + "(b) A grey eraser falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 255, + 885, + 335 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/d0de139686427ffdf841921b45e326470ebe6a127e77cc98801376148a86c3b4.jpg", + "image_caption": [ + "(c) A grey paper cup falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 366, + 885, + 445 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/2c9f209b0aeac72570d8c4fde360b1cf6b4dfcb2b6a0e8d4e853f7b3a78fa4d6.jpg", + "image_caption": [ + "(d) A transparent bottle falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 477, + 885, + 556 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/f52bf9a1a1b538e95d70fbf8e8072fe5c66e0fb960e1c0adc65706d6253840a0.jpg", + "image_caption": [ + "(e) A red wrapping paper falls." + ], + "image_footnote": [], + "bbox": [ + 86, + 587, + 885, + 667 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/a00eb2f6afd73e7890435740d404820471ec3c29a424ac89fa8f08f0f78a9a90.jpg", + "image_caption": [ + "(f) A white bottle falls.", + "Figure 22. Qualitative examples of our method (Open-Sora + PSFT + ORO). In all the videos, objects have a tendency to fall. However, the consistency of objects is still insufficient. In some frames, objects become blurry. Objects sometimes disappear after collision." + ], + "image_footnote": [], + "bbox": [ + 86, + 699, + 885, + 777 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop", + "bbox": [ + 156, + 56, + 816, + 71 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 475, + 922, + 495, + 934 + ], + "page_idx": 24 + } +] \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09595/b59876e9-da8b-438b-ab54-bb4c4d76820f_model.json b/data/2025/2503_09xxx/2503.09595/b59876e9-da8b-438b-ab54-bb4c4d76820f_model.json new file mode 100644 index 0000000000000000000000000000000000000000..813cd0c22a465ed5c0357f02283e82731c23dd00 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/b59876e9-da8b-438b-ab54-bb4c4d76820f_model.json @@ -0,0 +1,4661 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.222, + 0.11, + 0.753, + 0.157 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.199, + 0.798, + 0.217 + ], + "angle": 0, + "content": "Chenyu Li \\(^{*1}\\) Oscar Michel \\(^{*1}\\) Xichen Pan \\(^{1}\\) Sainan Liu \\(^{2}\\) Mike Roberts \\(^{2}\\) Saining Xie" + }, + { + "type": "title", + "bbox": [ + 0.242, + 0.243, + 0.321, + 0.259 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.271, + 0.445, + 0.618 + ], + "angle": 0, + "content": "Large-scale pre-trained video generation models excel in content creation but are not reliable as physically accurate world simulators out of the box. This work studies the process of posttraining these models for accurate world modeling through the lens of the simple, yet fundamental, physics task of modeling object freefall. We show state-of-the-art video generation models struggle with this basic task, despite their visually impressive outputs. To remedy this problem, we find that fine-tuning on a relatively small amount of simulated videos is effective in inducing the dropping behavior in the model, and we can further improve results through a novel reward modeling procedure we introduce. Our study also reveals key limitations of post-training in generalization and distribution modeling. Additionally, we release a benchmark for this task that may serve as a useful diagnostic tool for tracking physical accuracy in large-scale video generative model development. Code is available at this repository: https://github.com/vision-x-nyu/pisa-experiments." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.653, + 0.218, + 0.669 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.678, + 0.477, + 0.845 + ], + "angle": 0, + "content": "Over the past year, video generation models have advanced significantly, inspiring visions of a future where these models could serve as realistic world models (Craik, 1967; LeCun, 2022; Hafner et al., 2019; 2023; Ha & Schmidhuber, 2018). State-of-the-art video generation models models exhibit impressive results in content creation (OpenAI, 2024; Kuaishou, 2024; Luma, 2024; Runway, 2024) and are already being used in advertising and filmmaking (Runway, 2025; NBC, 2025). These advancements have sparked a line of research that seeks to evolve these models from content creators to world simulators for embodied agents (Yang" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.853, + 0.475, + 0.88 + ], + "angle": 0, + "content": "*Equal contribution, alphabetical order. 1New York University 2Intel Labs." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.244, + 0.888, + 0.336 + ], + "angle": 0, + "content": "et al., 2023; 2024b; Agarwal et al., 2025). However, accurate world modeling is considerably more challenging than creative content creation because looking \"good enough\" is not sufficient: generated pixels must faithfully represent a world state evolving in accordance with the laws of physics and visual perspective." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.342, + 0.889, + 0.541 + ], + "angle": 0, + "content": "We find that although the generations of state-of-the-art models are impressive visually, these models still struggle to generate results that are accurate physically, even though these models are pretrained on internet-scale video data demonstrating a wide variety of complex physical interactions. The failure to ground and align visual generations to the laws of physics suggests that pretraining is not enough and a post-training stage is needed. Much like how pretrained Large Language Models (LLMs) need to be adapted through post-training before they can be useful conversational assistants, pretrained video generative models ought to be adapted through post-training before they can be deployed as physically accurate world simulators." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.547, + 0.889, + 0.849 + ], + "angle": 0, + "content": "In this work, we rigorously examine the post-training process of video generation models by focusing on the simple yet fundamental physics task of modeling object freefall, which we find is highly challenging for state-of-the-art models. Specifically, we study an image-to-video\\(^{1}\\) (I2V) scenario where the goal is to generate a video of an object falling and potentially colliding with other objects on the ground, starting from an initial image of the object suspended in midair. We chose to study this single task, rather than general physics ability as a whole, because its simplicity allows us to conduct controlled experiments that yield insights into the strengths and limitations of the post-training process, which we believe will become an increasingly important component of research in generative world modeling. Additionally, the simplicity of the dropping task allows it to be implemented in simulation which is desirable because it allows us to easily test the properties of dataset scaling, gives us access to ground truth annotations for evaluation, and gives us the ability to precisely manipulate the simulation environment for controlled experimentation." + }, + { + "type": "page_footnote", + "bbox": [ + 0.497, + 0.858, + 0.888, + 0.885 + ], + "angle": 0, + "content": "1We discuss our decision to formulate this task in the image-to-video setting instead of the video-to-video setting in Appendix A." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.26, + 0.058, + 0.707 + ], + "angle": 270, + "content": "arXiv:2503.09595v1 [cs.CV] 12 Mar 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.157, + 0.057, + 0.816, + 0.071 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.082, + 0.48, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.495, + 0.082, + 0.887, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.324, + 0.888, + 0.381 + ], + "angle": 0, + "content": "Figure 1. Our PISA (Physics-Informed Simulation and Alignment) evaluation framework includes a new video dataset, where objects are dropped in a variety of real-world (left) and synthetic (right) scenes. For visualization purposes, we depict object motion by overlaying multiple video frames in each image shown above. Our real-world videos enable us to evaluate the physical accuracy of generated video output, and our synthetic videos enable us to improve accuracy through the use of post-training alignment methods." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.405, + 0.477, + 0.571 + ], + "angle": 0, + "content": "Named after Galileo's famous dropping experiment, we introduce the PISA (Physics-Informed Simulation and Alignment) framework for studying physics post-training in the context of the dropping task. PISA includes new real and simulated video datasets, as shown in Figure 1, containing a diverse set of dropping scenarios. PISA also includes a set of task-specific metrics that focus on measuring physical accuracy. Our real-world videos and metrics enable us to evaluate the physical accuracy of generated video output, and our synthetic videos enable us to improve accuracy through a post-training process we introduce." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.579, + 0.478, + 0.837 + ], + "angle": 0, + "content": "Our study reveals that current state-of-the-art video generative models struggle significantly with the task of physically accurate object dropping. Generated objects frequently exhibit impossible behaviors, such as floating midair, defying gravity, or failing to preserve realistic trajectories during freefall. However, we find that simple fine-tuning can be remarkably effective: fine-tuning an open-source model on a small dataset of just a few thousand samples enables it to vastly outperform state-of-the-art models in physical accuracy. We further observe that pretrained models are critical for success; models initialized randomly, without leveraging pretraining on large-scale video datasets, fail to achieve comparable results. We also introduce a novel framework for reward modeling that yields further improvement. We demonstrate that our reward learning system is highly flexible in that different reward functions can be chosen to target different axes of physical improvement." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.843, + 0.479, + 0.906 + ], + "angle": 0, + "content": "Our analysis also reveals key limitations. First, we see that model performance degrades when tasked with scenarios outside the training distribution, such as objects dropping from unseen depths or heights. Additionally, while our post" + }, + { + "type": "text", + "bbox": [ + 0.495, + 0.405, + 0.887, + 0.452 + ], + "angle": 0, + "content": "trained model generates object motion that is 3D-consistent and physically accurate, we observe misalignment between the generated and ground truth dropping time distribution." + }, + { + "type": "text", + "bbox": [ + 0.495, + 0.457, + 0.889, + 0.61 + ], + "angle": 0, + "content": "These findings indicate that post-training is likely to be an essential component of future world modeling systems. The challenges we identify in this relatively simple task are likely to persist when modeling more sophisticated physical phenomena. By introducing the PISA framework and benchmark, we provide a useful diagnostic tool for researchers to test whether models are on the path to acquiring general physical abilities, as well as identify key limitations that researchers should be aware of when integrating new capabilities into their models through post-training." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.629, + 0.64, + 0.644 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.495, + 0.654, + 0.889, + 0.897 + ], + "angle": 0, + "content": "Modeling Intuitive Physics. Intuitive physics refers to the innate or learned human capacity to make quick and accurate judgments about the physical properties and behaviors of objects in the world, such as their motion, stability, or interactions. This ability, present even in infancy (Spelke et al., 1992; Baillargeon, 2004; Battaglia et al., 2013), is crucial for navigating and understanding everyday life. Replicating intuitive physics is a foundational step toward creating systems that can interact effectively and safely in dynamic, real-world environments (Lake et al., 2017). Gravity, as a core component of intuitive physics, plays a pivotal role in both domains. It is one of the most universal and observable physical forces, shaping our expectations about object motion, stability, and interaction (Hamrick et al., 2016; Ullman et al., 2017). Many studies in cognitive science (Battaglia et al., 2013) and AI (Wu et al., 2015; Bear et al., 2021) have" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.493, + 0.935 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.156, + 0.057, + 0.816, + 0.073 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.086, + 0.477, + 0.132 + ], + "angle": 0, + "content": "relied on physics engines to evaluate and model intuitive physics. Our work uses the Kubric engine (Greff et al., 2022) to generate training videos." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.138, + 0.478, + 0.426 + ], + "angle": 0, + "content": "Video Generation Models as World Simulators. Video generation has long been an intriguing topic in computer vision, particularly in the context of predicting future frames (Srivastava et al., 2015; Xue et al., 2016). More recently, as large-scale generative models have become prominent, Yang et al. explored how a wide range of real-world dynamics and decision-making processes can be expressed in terms of video modeling (Yang et al., 2024b; 2023). The introduction of the Sora model (OpenAI, 2024) marked a leap in the quality of generated videos and ignited interest in leveraging such models as \"world simulators.\" Over the past year, numerous video generation models have emerged, some open-source (Zheng et al., 2024; Yang et al., 2024c; Jin et al., 2024; Agarwal et al., 2025) and others commercially available (Kuaishou, 2024; Luma, 2024; Runway, 2024; OpenAI, 2024). Related to our work, Kang et al. (Kang et al., 2024) study the extent to which video generation models learn generalizable laws of physics when trained on 2D data from a synthetic environment." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.433, + 0.478, + 0.735 + ], + "angle": 0, + "content": "Evaluating Video Generation Models. Traditional image-based metrics for generative modeling, such Fréchet inception distance (FID) (Heusel et al., 2017) or inception score (IS) (Salimans et al., 2016), can be incorporated into the video domain, either by applying them on a frame-by-frame basis or by developing video-specific versions, such as Fréchet video distance (FVD) (Unterthiner et al., 2018). Going beyond distribution matching measures, several benchmarks have developed suites of metrics that aim to better evaluate the semantic or visual quality of generated videos. For example, V-Bench (Huang et al., 2024) offers a more granular evaluation by measuring video quality across multiple dimensions, such as with respect to subject consistency or spatial relationships. In physics, some works, such as VideoPhy (Bansal et al., 2024) and PhyGenBench (Meng et al., 2024), evaluate in the T2V setting by utilizing multimodal large language models (MLLM) to generate a VQA-based score. More recently, Cosmos (Agarwal et al., 2025) and Physics-IQ (Motamed et al., 2025), evaluate physics in the image-to-video and video-to-video settings." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.754, + 0.199, + 0.77 + ], + "angle": 0, + "content": "3. PisaBench" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.78, + 0.475, + 0.827 + ], + "angle": 0, + "content": "Our benchmark, PisaBench, examines the ability of video generative models to produce accurate physical phenomena by focusing on a straightforward dropping task." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.842, + 0.338, + 0.858 + ], + "angle": 0, + "content": "3.1. Task Definition & Assumptions" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.866, + 0.476, + 0.897 + ], + "angle": 0, + "content": "Our task can be summarized as follows: given an image of an object suspended in midair, generate a video of the object" + }, + { + "type": "image", + "bbox": [ + 0.521, + 0.085, + 0.86, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.535, + 0.344, + 0.848, + 0.358 + ], + "angle": 0, + "content": "Figure 2. The setup for collecting real-world videos." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.383, + 0.887, + 0.49 + ], + "angle": 0, + "content": "falling and colliding with the ground and potentially other objects. Since a video is an incomplete partial observation of the 4D world, we make a number of assumptions to constrain the task space. These assumptions are crucial for ensuring that our metrics are reliable signals for physical accuracy, since they are only approximations of task success computed from a single ground truth and generated video." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.497, + 0.889, + 0.648 + ], + "angle": 0, + "content": "Specifically, we assume that the falling object is completely still in the initial frame, that only the force of gravity is acting on the object while it falls, and that the camera does not move. The first two assumptions are necessary for the image-to-video setting. Since we do not provide multiple frames as input, it is otherwise impossible to establish the initial velocity or acceleration of the falling object without these assumptions. The last assumption is necessary as our metrics derive from the motion of segmentation masks, which would be affected in the presence of camera motion." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.665, + 0.651, + 0.679 + ], + "angle": 0, + "content": "3.2. Real World Data" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.696, + 0.697, + 0.817 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.718, + 0.696, + 0.882, + 0.817 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.497, + 0.828, + 0.886, + 0.856 + ], + "angle": 0, + "content": "Figure 3. Statistics of the real-world data: (a) number of objects in each video, (b) the proportions of different scenes in the videos." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.861, + 0.887, + 0.906 + ], + "angle": 0, + "content": "Real World Videos. We collect a set of 361 real-world videos demonstrating the dropping task for evaluation. As is shown in Figure 4, the dataset includes a diverse set" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.157, + 0.057, + 0.816, + 0.073 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "image", + "bbox": [ + 0.092, + 0.086, + 0.473, + 0.292 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.31, + 0.476, + 0.366 + ], + "angle": 0, + "content": "Figure 4. Examples of various objects included in our dataset. For simulation, we utilize the GSO dataset (Downs et al., 2022), while for the real-world dataset, we curate our own set of common household objects." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.398, + 0.476, + 0.595 + ], + "angle": 0, + "content": "of objects with different shapes and sizes, captured across various settings such as offices, kitchens, parks, and more (see Figure 3). Each video begins with an object suspended by an invisible wire in the first frame, which is necessary to enforce the assumption that objects are stationary at the start of the video. This assumption is required in our imaged-to-video setting; otherwise, the initial velocity of an object is ambiguous. We cut the video clips to begin as soon as the wire is released. We record the videos in slow-motion at 120 frames per second (fps) with cellphone cameras mounted on tripods to eliminate camera motion. An example of our video collection setup is shown in Figure 2. Additional details on our collection system are provided in Appendix H." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.602, + 0.476, + 0.707 + ], + "angle": 0, + "content": "Simulated Test Videos. Since our post-training process uses a dataset of simulated videos, we also create a simulation test-set of 60 videos for understanding sim2real transfer. We create two splits of 30 videos each: one featuring objects and backgrounds seen during training, and the other featuring unseen objects and backgrounds. See Section 4.1 for details on how our simulated data is created." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.715, + 0.475, + 0.807 + ], + "angle": 0, + "content": "Annotations. As is shown in Figure 5, we annotate each video with a caption and segmentation masks estimated from the SAM 2 (Ravi et al., 2024) video segmentation model. We create a descriptive caption for each object in the format of “{object description} falls.” This caption is used to provide context to the task when text input is supported." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.822, + 0.174, + 0.836 + ], + "angle": 0, + "content": "3.3. Metrics" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.846, + 0.476, + 0.907 + ], + "angle": 0, + "content": "We propose three metrics to assess the accuracy of trajectories, shape fidelity, and object permanence. Each of our metrics compare frames from the ground-truth video with the generated video. Further details about the metrics, including" + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.081, + 0.888, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.497, + 0.328, + 0.888, + 0.385 + ], + "angle": 0, + "content": "Figure 5. Example of annotations in real-world data. For segmentation masks, we manually annotate first frame and utilize SAM 2 to produce segmentation masks across frames. For captions, we annotate “{object description} falls.” for all video segments." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.413, + 0.886, + 0.444 + ], + "angle": 0, + "content": "their formulas and our resampling procedure for accounting for differences in fps, is described in Appendix B." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.451, + 0.888, + 0.512 + ], + "angle": 0, + "content": "Trajectory L2. For each frame in both the generated video and ground truth, we calculate the centroid of the masked region. After doing this, we compute the average \\(L_{2}\\) distance between the centroids of corresponding frames." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.519, + 0.888, + 0.565 + ], + "angle": 0, + "content": "Chamfer Distance (CD). To assess the shape fidelity of objects, we calculate the Chamfer Distance (CD) between the mask regions of the generated video and ground truth." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.572, + 0.888, + 0.633 + ], + "angle": 0, + "content": "Intersection over Union (IoU). We use the Intersection over Union (IoU) metric to evaluate object permanence. The IoU measures objects' degree of overlap between the generated video and ground truth." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.649, + 0.663, + 0.663 + ], + "angle": 0, + "content": "3.4. Evaluation Results" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.672, + 0.888, + 0.809 + ], + "angle": 0, + "content": "We evaluate 4 open models including CogVideoX-5B-I2V(Yang et al., 2024c), DynamiCrafter(Xing et al., 2023), Pyramid-Flow(Jin et al., 2024), and Open-Sora-V1.2(Zheng et al., 2024), as well as 4 proprietary models including Sora (OpenAI, 2024), Kling-V1(Kuaishou, 2024), Kling-V1.5(Kuaishou, 2024), and Runway Gen3 (Runway, 2024). We also evaluate OpenSora post-trained through the processes of Supervised Fine-Tuning (PSFT) and Object Reward Optimization (ORO); see Section 4 for details." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.815, + 0.889, + 0.907 + ], + "angle": 0, + "content": "The results of running the baseline models on the benchmark indicate a consistent failure to generate physically accurate dropping behavior, despite the visual realism of their generated frames. Qualitatively, we see common failure cases in Figure 6, such as implausible object deformations, floating, hallucination of new objects, and unrealistic special" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.157, + 0.057, + 0.816, + 0.072 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.086, + 0.887, + 0.305 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.309, + 0.889, + 0.366 + ], + "angle": 0, + "content": "Figure 6. Qualitative comparison of results on real test set (row 1-2), simulated seen test set (row 3-4) and simulated unseen test set (row 5-6). We present the results of popular open-source and commercially available models alongside those of models fine-tuned through our method. Existing models often struggle to generate videos depicting objects falling, whereas our PSFT method effectively introduces knowledge of free-fall into the model. ORO enables the model to more accurately learn object motion and shape." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.378, + 0.475, + 0.453 + ], + "angle": 0, + "content": "effects. We further visualize a random subset of generated trajectories on the left of Figure 8. In many cases, the object remains completely static, and sometimes the object even moves upward. When downward motion is present, it is often slow or contains unrealistic horizontal movement." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.472, + 0.294, + 0.49 + ], + "angle": 0, + "content": "4. Physics Post-Training" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.499, + 0.477, + 0.651 + ], + "angle": 0, + "content": "We present a post-training process to address the limitations of current models described in Section 3.4. We utilize simulated videos that demonstrate realistic dropping behavior. Our approach for post-training is inspired by the two-stage pipeline consisting of supervised fine-tuning followed by reward modeling commonly used in LLMs. We find that our pipeline improves performance on both real and simulated evaluations, with greater gains observed in simulation. This is due to the sim-to-real gap, though our approach still shows substantial gains in transferring to real-world data." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.666, + 0.312, + 0.682 + ], + "angle": 0, + "content": "4.1. Simulated Adaptation Data" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.69, + 0.476, + 0.901 + ], + "angle": 0, + "content": "The first stage of our approach involves supervised fine-tuning. We use Kubric (Greff et al., 2022), a simulation and rendering engine designed for scalable video generation, to create simulated videos of objects dropping and colliding with other objects on the ground. Each video consists of 1-6 dropping objects onto a (possibly empty) pile of up to 4 objects underneath them. The videos are 2 seconds long, consisting of 32 frames at 16 fps. The objects are sourced from the Google Scanned Objects (GSO) dataset (Downs et al., 2022), which provides true-to-scale 3D models created from real-world scans across diverse categories (examples shown in Figure 4). The camera remains stationary in each video and is oriented parallel to the ground plane. To introduce variability, we randomly sample the camera height" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.378, + 0.886, + 0.439 + ], + "angle": 0, + "content": "between 0.4 and 0.6 meters and position objects between 1 and 3 meters away from the camera, which corresponds to the distributions observed in the real-world dataset. More information about the dataset can be found in Appendix K." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.455, + 0.813, + 0.47 + ], + "angle": 0, + "content": "4.2. Physics Supervised Fine-Tuning (PSFT)." + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.486, + 0.685, + 0.588 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.702, + 0.486, + 0.884, + 0.587 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.589, + 0.684, + 0.689 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.702, + 0.588, + 0.885, + 0.689 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.496, + 0.7, + 0.886, + 0.783 + ], + "angle": 0, + "content": "Figure 7. Plots (a), (b), and (c) demonstrate that our metrics tend to improve with further training and that leveraging a pre-trained video diffusion model enhances performance compared to random initialization. In plot (d), the size of the training dataset varies in each training run (each consisting of 5k steps). With only 5k samples, we can achieve optimal results." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.8, + 0.887, + 0.906 + ], + "angle": 0, + "content": "We use the pretrained Open-Sora v1.2 (Zheng et al., 2024) model as our base model and fine-tune it on our simulated video dataset. We employ Open-Sora v1.2's rectified flow training objective without modification (Liu et al., 2022). Each fine-tuning experiment is conducted with a batch size of 128 and a learning rate of \\(1\\mathrm{e} - 4\\) on two 80GB NVIDIA A100 GPUs. As shown in Figure 6, fine-tuning with this" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.157, + 0.057, + 0.816, + 0.071 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "table", + "bbox": [ + 0.088, + 0.073, + 0.887, + 0.348 + ], + "angle": 0, + "content": "
MethodRealSim (Seen)Sim (Unseen)
L2 (↓)CD (↓)IoU (↑)L2 (↓)CD (↓)IoU (↑)L2 (↓)CD (↓)IoU (↑)
ProprietarySora (OpenAI, 2024)0.1740.4880.0650.1490.4460.0400.1400.4190.031
Kling-V1 (Kuaishou, 2024)0.1570.4250.0560.1420.4150.0320.1450.4370.028
Kling-V1.5 (Kuaishou, 2024)0.1550.4240.0580.1370.3960.0330.1320.4050.029
Runway Gen3 (Runway, 2024)0.1870.5260.0420.1700.5090.0400.1490.4600.038
OpenCogVideoX-5B-I2V (Yang et al., 2024c)0.1380.3660.0800.1120.3150.0200.1010.2900.020
DynamiCrafter (Xing et al., 2023)0.1870.5040.0210.1570.4850.0390.1360.4300.033
Pyramid-Flow (Jin et al., 2024)0.1750.4850.0620.1260.3520.0590.1300.3810.048
Open-Sora (Zheng et al., 2024)0.1750.5020.0690.1390.4090.0360.1300.3680.034
OursOpen-Sora + PSFT (base)0.0760.1880.1390.0360.0880.1650.0280.0580.129
base + ORO (Seg)0.0750.1830.1420.0330.0760.1700.0320.0630.145
base + ORO (Flow)0.0670.1640.1360.0260.0620.1220.0220.0450.071
base + ORO (Depth)0.0670.1590.1290.0310.0720.1240.0220.0460.096
" + }, + { + "type": "table_caption", + "bbox": [ + 0.084, + 0.357, + 0.889, + 0.429 + ], + "angle": 0, + "content": "Table 1. PisaBench Evaluation Results. This table compares the performance of four proprietary models, four open models, and the models fine-tuned with PSFT and \\(\\mathrm{PSFT + ORO}\\) on our real-world and simulated test set which is decomposed into seen and unseen object splits. Across all metrics, our PSFT models outperform all other baselines, including proprietary models like Sora. Reward modeling further enhances results, with segmentation rewards improving the shape-based IoU metric and optical rewards and depth rewards enhancing the motion-based L2 and CD metrics. This suggests that rewards can be flexibly adjusted to target specific aspects of performance." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.452, + 0.477, + 0.709 + ], + "angle": 0, + "content": "data alone is sufficient to induce realistic dropping behavior in the model. Quantitatively, our PSFT model substantially improves on both our simulated and real-world benchmark, as shown in Table 1. Dataset size. We conduct an ablation study on the number of training samples to understand the amount of data required for optimal performance on our benchmark. We create random subsets from 500 to 20,000 samples and train our model for 5,000 gradient steps on each subset. Notably, as shown in Figure 7, only 5,000 samples are needed to achieve optimal results. Effect of pretraining. Additionally, we investigate the impact of Open-Sora's pre-training on adaptation. We randomly initialize the Open-Sora's denoising network while keeping the pre-trained initialization of the compressor network and train the model on a dataset of 5k training samples. As shown in Figure 8, the learned knowledge from Open-Sora's pretraining plays a critical role in our task." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.716, + 0.475, + 0.792 + ], + "angle": 0, + "content": "Overall, using PSFT on only 5k samples is sufficient to push Open-Sora's performance past all other evaluated models, including state-of-the-art commercial video generators, by a wide margin. This is made possible by leveraging the knowledge from the sufficiently pre-trained base model." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.808, + 0.376, + 0.823 + ], + "angle": 0, + "content": "4.3. Object Reward Optimization (ORO)" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.832, + 0.476, + 0.892 + ], + "angle": 0, + "content": "In the second stage, we propose Object Reward Optimization (ORO) to use reward gradients to guide the video generation model toward generating videos where the object's motion and shape more closely align with the ground truth." + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.453, + 0.885, + 0.583 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.496, + 0.598, + 0.887, + 0.682 + ], + "angle": 0, + "content": "Figure 8. On the left, we plot random trajectories from the baseline models in Table 1. On the right, we show random trajectories from our fine-tuned model. The baseline trajectories exhibit unrealistic behavior, and most of them stay completely static. On the right, we see the trajectories consistently falling downward with collision and rolling behavior being modeled after the point of contact." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.693, + 0.889, + 0.8 + ], + "angle": 0, + "content": "We follow the VADER framework from (Prabhudesai et al., 2024) and introduce three reward models. The differences between our approach and VADER include: (1) our reward model utilizes both generated videos and ground truth instead of generated videos and conditioning. (2) gradients propagate through all denoising time steps in fine-tuning. Consequently, the VADER objective is modified as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.555, + 0.81, + 0.887, + 0.83 + ], + "angle": 0, + "content": "\\[\nJ (\\theta) = \\mathbb {E} _ {\\left(x _ {0}, c\\right) \\sim \\mathcal {D}, x _ {0} ^ {\\prime} \\sim p _ {\\theta} \\left(x _ {0} ^ {\\prime} \\mid c\\right)} \\left[ R \\left(x _ {0} ^ {\\prime}, x _ {0}\\right) \\right] \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.838, + 0.886, + 0.884 + ], + "angle": 0, + "content": "where \\(\\mathcal{D}\\) is the ground truth dataset, \\(p_{\\theta}(.)\\) is a given video diffusion model, \\(x_0^{\\prime}, x_0 \\in \\mathbb{R}^{H \\times W \\times 3}\\) are generated video and ground truth, and \\(c \\in \\mathbb{R}^{H \\times W \\times 3}\\) is the initial image." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.891, + 0.887, + 0.906 + ], + "angle": 0, + "content": "Segmentation Reward. We utilize SAM 2 (Ravi et al.," + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.156, + 0.057, + 0.816, + 0.072 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.085, + 0.475, + 0.148 + ], + "angle": 0, + "content": "2024) to generate segmentation masks across frames for generated videos. We define segmentation reward as the IoU between the dropping object's mask in generated video and the mask from the ground truth simulated segmentation." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.153, + 0.476, + 0.215 + ], + "angle": 0, + "content": "Optical Flow Reward. We utilize RAFT (Teed & Deng, 2020) to generate generated video's optical flow \\( V^{\\mathrm{gen}} \\) and ground truth's optical flow \\( V^{\\mathrm{gt}} \\). We define the optical flow reward as \\( R(x_0', x_0) = -|V^{\\mathrm{gen}} - V^{\\mathrm{gt}}| \\)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.221, + 0.476, + 0.282 + ], + "angle": 0, + "content": "Depth Reward. We utilize Depth-Anything-V2 (Yang et al., 2024a) to generate generated video's depth map \\( D^{\\mathrm{gen}} \\) and ground truth's depth map \\( D^{\\mathrm{gt}} \\). We define the optical flow reward as \\( R(x_0', x_0) = -|D^{\\mathrm{gen}} - D^{\\mathrm{gt}}| \\)." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.289, + 0.457, + 0.305 + ], + "angle": 0, + "content": "Details on implementation can be found in Appendix C." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.312, + 0.476, + 0.418 + ], + "angle": 0, + "content": "We begin from the checkpoint of the first stage, which is trained on 5,000 samples trained over 5,000 gradient steps. We then fine-tune the model with ORO on the simulated dataset, using a batch size of 1 and two 80GB NVIDIA A100 GPUs for each fine-tuning experiment. We set a learning rate of \\(1\\mathrm{e} - 6\\) for segmentation reward and depth reward and \\(1\\mathrm{e} - 5\\) for optical flow." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.425, + 0.476, + 0.531 + ], + "angle": 0, + "content": "As shown in Table 1, incorporating ORO in reward modeling further improves performance. Additionally, each reward function enhances the aspect of physicality that aligns with its intended purpose—segmentation rewards improve shape accuracy, while flow rewards and depth rewards improve motion accuracy. This demonstrates the process is both modular and interpretable." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.55, + 0.421, + 0.568 + ], + "angle": 0, + "content": "5. Assessing Learned Physical Behavior" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.576, + 0.476, + 0.698 + ], + "angle": 0, + "content": "Having introduced our post-training approaches in Section 4, we probe into the model's understanding of the interaction between gravity and perspective—the two laws that determine the dynamics of our videos. We first test if the learned physical behavior of our model can generalize to dropping heights and depths beyond its training distribution. Then, we study the ability of the model to learn the probability distribution induced by the uncertainty of perspective." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.714, + 0.437, + 0.73 + ], + "angle": 0, + "content": "5.1. Generalization to Unseen Depths and Heights" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.737, + 0.476, + 0.829 + ], + "angle": 0, + "content": "Depth and height are the main factors that affect the dynamics of a falling object in our videos. We can see this by combining the laws of gravity with perspective under our camera assumptions to model the object's image \\(y\\) coordinate as a function of time (further details on our coordinate system are described in Appendix G):" + }, + { + "type": "equation", + "bbox": [ + 0.192, + 0.839, + 0.476, + 0.873 + ], + "angle": 0, + "content": "\\[\ny (t) = \\frac {f}{Z} \\left(Y _ {0} - \\frac {1}{2} g t ^ {2}\\right). \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.891, + 0.476, + 0.906 + ], + "angle": 0, + "content": "From Equation (2), we see that the random variables that af" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.085, + 0.885, + 0.131 + ], + "angle": 0, + "content": "fect object motion are \\( Z \\) (depth) and \\( Y \\) (height) (the camera focal length \\( f \\) is fixed). Thus, we are interested in testing generalization on unseen values of \\( Y \\) and \\( Z \\)." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.138, + 0.887, + 0.275 + ], + "angle": 0, + "content": "We create a simulated test set in which a single object is dropped from varying depths and heights, using objects and backgrounds unseen during training. We uniformly sample depth and height values (in meters) from the Cartesian product of the ranges [1, 5] and [0.5, 2.5], respectively. The camera height is fixed at \\(0.5m\\), and depth-height pairs outside the camera viewing frustum are discarded. A sample is in-distribution (ID) if its dropping depth and height both fall in the range [1, 3] and [0.5, 1.5]." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.282, + 0.887, + 0.343 + ], + "angle": 0, + "content": "Since we have access to the ground truth dropping time in simulation, we also employ a dropping time error, a metric we describe in Appendix B. Our analysis in Table 2 shows that performance degrades for out-of-distribution scenarios." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.35, + 0.887, + 0.411 + ], + "angle": 0, + "content": "Since depth and height are the main physical quantities that affect falling dynamics, this finding indicates that our model may struggle to learn a fully generalizable law that accounts for the interaction of perspective and gravity." + }, + { + "type": "table", + "bbox": [ + 0.499, + 0.423, + 0.887, + 0.482 + ], + "angle": 0, + "content": "
SettingL2 (↓)Chamfer (↓)IOU (↑)Time Error (↓)
ID0.0360.0880.1550.091
OOD0.0440.1430.0490.187
" + }, + { + "type": "table_caption", + "bbox": [ + 0.497, + 0.491, + 0.887, + 0.548 + ], + "angle": 0, + "content": "Table 2. Results of our metrics on in-distribution (ID) and out-of-distribution (OOD) depth-height combinations. The values used for depth range from \\(1 - 5\\mathrm{m}\\) (ID range [1,3]) and height values range from 0.5-2.5 (ID range [0.5, 1.5])." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.572, + 0.693, + 0.587 + ], + "angle": 0, + "content": "5.2. Distributional Analysis" + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.604, + 0.653, + 0.714 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.659, + 0.603, + 0.885, + 0.714 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.496, + 0.729, + 0.887, + 0.786 + ], + "angle": 0, + "content": "Figure 9. Demonstration of ambiguity in 2D perspective projections. Each of the three clouds appears the exact same in the camera's image. The right side shows how we perform a scale and translation augmentation to generate deliberately ambiguous data." + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.8, + 0.887, + 0.906 + ], + "angle": 0, + "content": "The evolution of a physical system is not uniquely determined by a single initial image, since the lossy uncertainty of perspective induces a distribution of possible outcomes as shown in Figure 9. An ideal video world model should (1) output videos that are faithful to the evolution of some plausible world state and (2) provide accurate coverage across the entire distribution of the world that is possible from" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.923, + 0.492, + 0.935 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.156, + 0.057, + 0.816, + 0.073 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.086, + 0.477, + 0.176 + ], + "angle": 0, + "content": "its conditioning signal. In this section, we examine these two facets by studying \\( p(t|y) \\): the distribution of dropping times possible from an object at coordinate \\( y \\) in the image plane. To do this, we create a simulated dataset that has a much wider distribution \\( p(t|y) \\) than our PSFT dataset. See Appendix F for more details on its construction." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.187, + 0.475, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.303, + 0.476, + 0.373 + ], + "angle": 0, + "content": "Figure 10. Examples of model trajectories lifted to 3D. The blue line represents the height of the camera ray passing through the bottom of the dropping object as a function of depth. The set of possible dropping trajectories at a given depth are depicted in gray. The lifted trajectory of the model is depicted in green." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.389, + 0.473, + 0.531 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.549, + 0.476, + 0.605 + ], + "angle": 0, + "content": "Figure 11. Visualizing \\( p(t|y) \\) misalignment for different images. Green shows the ground-truth CDF, orange is the 32-frame quantized version, and blue is the empirical CDF of 128 different samples of dropping times from the model." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.612, + 0.384, + 0.627 + ], + "angle": 0, + "content": "Testing (1): 3D faithfulness of trajectories." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.635, + 0.476, + 0.846 + ], + "angle": 0, + "content": "After training our model on this new dataset, we test whether its trajectories are consistent with a valid 3D world state. We first obtain an estimated dropping time from generated videos using the procedure described in Section 5.1. Using knowledge of the camera position, focal length, sensor width, and \\( y \\), we can obtain an implied depth and height of the trajectory. We can then back-project the video trajectory to 3D and analyze whether they constitute physically accurate trajectories. We give further details about this process in Appendix G. As show in Figure 10, we find that our model's lifted trajectories consistently align with the 3D trajectory at the height and depth implied by its dropping time, giving evidence that the model's visual outputs are faithful to some plausible real-world state." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.853, + 0.347, + 0.869 + ], + "angle": 0, + "content": "Testing (2): distributional alignment." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.876, + 0.475, + 0.907 + ], + "angle": 0, + "content": "Going beyond the level of individual trajectories, we study the model's learned conditional distribution \\( p(t|y) \\). We" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.085, + 0.886, + 0.176 + ], + "angle": 0, + "content": "create 50 different initial images with differing values of \\( y \\), generate 128 different videos from each, and estimate the dropping time in each video. Using the laws of gravity, the laws of perspective, and the assumption of uniform depth sampling in our dataset, we can analytically derive the probability \\( p(t|y) \\) as" + }, + { + "type": "equation", + "bbox": [ + 0.54, + 0.197, + 0.887, + 0.238 + ], + "angle": 0, + "content": "\\[\np (t | y) = \\left\\{ \\begin{array}{l l} \\frac {g t}{\\left(Z _ {\\max } - Z _ {\\min }\\right) \\beta}, & t _ {\\min } \\leq t \\leq t _ {\\max } \\\\ 0, & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.257, + 0.886, + 0.288 + ], + "angle": 0, + "content": "where \\(\\beta\\) is a constant that depends on \\(f\\), \\(y\\) and the camera height. The derivation is given in Appendix E." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.295, + 0.887, + 0.536 + ], + "angle": 0, + "content": "We then measure goodness-of-fit for each of the 50 experiments using the Kolmogorov-Smirnov (KS) test (Massey Jr, 1951). The null hypothesis of the KS test is that the two distributions being compared are equal, and we consider p-values less than 0.05 as evidence of misalignment. Since our measured times have limited precision and can only take 32 distinct values—due to estimating the contact frame—we approximate the ground truth \\(p(t|y)\\) using a Monte Carlo method. We sample 1000 values from the ground truth distribution and then quantized them into 32 bins corresponding to their frame, which we use as ground truth observations in the KS test. We find that in all 50/50 cases, the p-value from the test is less than 0.05, which provides evidence that the model does not learn the correct distribution of dropping times. We visualize the misalignment between the empirical CDF of the model's in Figure 11." + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.544, + 0.887, + 0.604 + ], + "angle": 0, + "content": "In summary, while our model's trajectories show promising tendencies to ground themselves to plausible 3D world states, the range of possible outputs from the model does not align with the ground truth distribution." + }, + { + "type": "title", + "bbox": [ + 0.498, + 0.624, + 0.616, + 0.639 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.65, + 0.887, + 0.906 + ], + "angle": 0, + "content": "This work studies post-training as an avenue for adapting adapting pre-trained video generator into world models. We introduce a post-training strategy that is highly effective in aligning our model. Our work raises interesting insights into the learned distributions of generative models. Qualitatively, large scale image or video generative models appear to excel at generating likely samples from the data distribution, but this alone does not imply that they match the data distribution well in its entirety. As long as a model is able to generate likely samples, global distributional misalignment is not necessarily a problem for content creation. However, this problem becomes critical for world models, where alignment across the entire distribution is necessary for faithful world simulation. The insights revealed by our study, made possible by our constrained and tractable setting, indicate that although post-training improves per-sample accuracy, general distributional alignment remains unsolved." + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.156, + 0.057, + 0.816, + 0.072 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.084, + 0.238, + 0.101 + ], + "angle": 0, + "content": "Acknowledgment" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.11, + 0.477, + 0.246 + ], + "angle": 0, + "content": "We thank Boyang Zheng, Srivats Poddar, Ellis Brown, Shengbang Tong, Shusheng Yang, Jihan Yang, Daohan Lu, Anjali Gupta and Ziteng Wang for their help with data collection. We thank Jiraphon Yenphraphai for valuable assistance in setting up our simulation code. We thank Runway and Kling AI for providing API credit. SX also acknowledges support from Intel AI SRS, Korean AI Research Hub, Open Path AI Foundation, Amazon Research Award, Google TRC program, and NSF Award IIS-2443404." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.265, + 0.182, + 0.281 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.289, + 0.476, + 0.35 + ], + "angle": 0, + "content": "Agarwal, N., Ali, A., Bala, M., Balaji, Y., Barker, E., Cai, T., Chattopadhyay, P., Chen, Y., Cui, Y., Ding, Y., et al. Cosmos world foundation model platform for physical AI. arXiv preprint arXiv:2501.03575, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.357, + 0.474, + 0.388 + ], + "angle": 0, + "content": "Baillargeon, R. Infants' physical world. Current directions in psychological science, 13(3):89-94, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.396, + 0.476, + 0.457 + ], + "angle": 0, + "content": "Bansal, H., Lin, Z., Xie, T., Zong, Z., Yarom, M., Bitton, Y., Jiang, C., Sun, Y., Chang, K.-W., and Grover, A. Videophy: Evaluating physical commonsense for video generation. arXiv preprint arXiv:2406.03520, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.465, + 0.476, + 0.525 + ], + "angle": 0, + "content": "Battaglia, P. W., Hamrick, J. B., and Tenenbaum, J. B. Simulation as an engine of physical scene understanding. Proceedings of the National Academy of Sciences, 110 (45):18327-18332, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.533, + 0.476, + 0.609 + ], + "angle": 0, + "content": "Bear, D. M., Wang, E., Mrowca, D., Binder, F. J., Tung, H.-Y. F., Pramod, R., Holdaway, C., Tao, S., Smith, K., Sun, F.-Y., et al. Physion: Evaluating physical prediction from vision in humans and machines. arXiv preprint arXiv:2106.08261, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.617, + 0.473, + 0.647 + ], + "angle": 0, + "content": "Community, B. O. Blender - a 3d modelling and rendering package, 2018. URL http://www.blender.org." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.655, + 0.473, + 0.686 + ], + "angle": 0, + "content": "Coumans, E. et al. Bullet physics engine. Open Source Software: http://bulletphysics.org, 1(3):84, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.693, + 0.473, + 0.723 + ], + "angle": 0, + "content": "Craik, K. J. W. The nature of explanation, volume 445. CUP Archive, 1967." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.732, + 0.476, + 0.821 + ], + "angle": 0, + "content": "Downs, L., Francis, A., Koenig, N., Kinman, B., Hickman, R., Reymann, K., McHugh, T. B., and Vanhoucke, V. Google scanned objects: A high-quality dataset of 3d scanned household items. In 2022 International Conference on Robotics and Automation (ICRA), pp. 2553-2560. IEEE, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.83, + 0.476, + 0.906 + ], + "angle": 0, + "content": "Greff, K., Belletti, F., Beyer, L., Doersch, C., Du, Y., Duckworth, D., Fleet, D. J., Gnanapragasam, D., Golemo, F., Herrmann, C., et al. Kubric: A scalable dataset generator. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 3749-3761, 2022." + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.289, + 0.476, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.085, + 0.888, + 0.131 + ], + "angle": 0, + "content": "Ha, D. and Schmidhuber, J. Recurrent world models facilitate policy evolution. Advances in neural information processing systems, 31, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.142, + 0.886, + 0.187 + ], + "angle": 0, + "content": "Hafner, D., Lillicrap, T., Ba, J., and Norouzi, M. Dream to control: Learning behaviors by latent imagination. arXiv preprint arXiv:1912.01603, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.198, + 0.886, + 0.242 + ], + "angle": 0, + "content": "Hafner, D., Pasukonis, J., Ba, J., and Lillicrap, T. Mastering diverse domains through world models. arXiv preprint arXiv:2301.04104, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.254, + 0.887, + 0.299 + ], + "angle": 0, + "content": "Hamrick, J. B., Battaglia, P. W., Griffiths, T. L., and Tenenbaum, J. B. Inferring mass in complex scenes by mental simulation. Cognition, 157:61-76, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.31, + 0.886, + 0.371 + ], + "angle": 0, + "content": "Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., and Hochreiter, S. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.381, + 0.887, + 0.457 + ], + "angle": 0, + "content": "Huang, Z., He, Y., Yu, J., Zhang, F., Si, C., Jiang, Y., Zhang, Y., Wu, T., Jin, Q., Chanpaisit, N., et al. Vbench: Comprehensive benchmark suite for video generative models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 21807-21818, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.468, + 0.887, + 0.528 + ], + "angle": 0, + "content": "Jin, Y., Sun, Z., Li, N., Xu, K., Jiang, H., Zhuang, N., Huang, Q., Song, Y., Mu, Y., and Lin, Z. Pyramidal flow matching for efficient video generative modeling. arXiv preprint arXiv:2410.05954, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.539, + 0.887, + 0.599 + ], + "angle": 0, + "content": "Kang, B., Yue, Y., Lu, R., Lin, Z., Zhao, Y., Wang, K., Huang, G., and Feng, J. How far is video generation from world model: A physical law perspective. arXiv preprint arXiv:2411.02385, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.61, + 0.888, + 0.64 + ], + "angle": 0, + "content": "Kuaishou. Kling, 2024. URL https://kling.kuaishou.com. Accessed: 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.651, + 0.887, + 0.697 + ], + "angle": 0, + "content": "Lake, B. M., Ullman, T. D., Tenenbaum, J. B., and Gershman, S. J. Building machines that learn and think like people. Behavioral and brain sciences, 40:e253, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.707, + 0.887, + 0.751 + ], + "angle": 0, + "content": "LeCun, Y. A path towards autonomous machine intelligence version 0.9.2, 2022-06-27. Open Review, 62(1):1-62, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.763, + 0.887, + 0.809 + ], + "angle": 0, + "content": "Liu, X., Gong, C., and Liu, Q. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.82, + 0.887, + 0.85 + ], + "angle": 0, + "content": "Luma. Dream machine, 2024. URL https://lumalabs.ai/dream-machine. Accessed: 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.861, + 0.887, + 0.906 + ], + "angle": 0, + "content": "Massey Jr, F. J. The kolmogorov-smirnov test for goodness of fit. Journal of the American statistical Association, 46 (253):68-78, 1951." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.085, + 0.888, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.924, + 0.492, + 0.935 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.157, + 0.057, + 0.816, + 0.072 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.085, + 0.479, + 0.162 + ], + "angle": 0, + "content": "Meng, F., Liao, J., Tan, X., Shao, W., Lu, Q., Zhang, K., Cheng, Y., Li, D., Qiao, Y., and Luo, P. Towards world simulator: Crafting physical commonsense-based benchmark for video generation. arXiv preprint arXiv:2410.05363, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.171, + 0.479, + 0.232 + ], + "angle": 0, + "content": "Motamed, S., Culp, L., Swersky, K., Jaini, P., and Geirhos, R. Do generative video models learn physical principles from watching videos? arXiv preprint arXiv:2501.09038, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.242, + 0.478, + 0.273 + ], + "angle": 0, + "content": "NBC. Coca-Cola causes controversy with ai-made ad, 2025. Accessed: 2025-01-17." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.283, + 0.478, + 0.313 + ], + "angle": 0, + "content": "OpenAI. Sora, 2024. URL https://sora.com. Accessed: 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.323, + 0.477, + 0.37 + ], + "angle": 0, + "content": "Prabhudesai, M., Mendonca, R., Qin, Z., Fragkiadaki, K., and Pathak, D. Video diffusion alignment via reward gradients. arXiv preprint arXiv:2407.08737, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.379, + 0.478, + 0.485 + ], + "angle": 0, + "content": "Ravi, N., Gabeur, V., Hu, Y.-T., Hu, R., Ryali, C., Ma, T., Khedr, H., Rädle, R., Rolland, C., Gustafson, L., Mintun, E., Pan, J., Alwala, K. V., Carion, N., Wu, C.-Y., Girshick, R., Dollár, P., and Feichtenhofer, C. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024. URL https://arxiv.org/abs/2408.00714." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.496, + 0.478, + 0.54 + ], + "angle": 0, + "content": "Runway. Gen-3 alpha, 2024. URL https://runwayml.com/research/introducing-gen-3alpha. Accessed: 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.551, + 0.477, + 0.582 + ], + "angle": 0, + "content": "Runway. AIFF 2025: AI Film Festival, 2025. URL https://aiff.runwayml.com/. Accessed: 2025-01-17." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.592, + 0.477, + 0.653 + ], + "angle": 0, + "content": "Salimans, T., Goodfellow, I., Zaremba, W., Cheung, V., Radford, A., and Chen, X. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.663, + 0.477, + 0.708 + ], + "angle": 0, + "content": "Spelke, E. S., Breinlinger, K., Macomber, J., and Jacobson, K. Origins of knowledge. Psychological review, 99(4): 605, 1992." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.718, + 0.478, + 0.779 + ], + "angle": 0, + "content": "Srivastava, N., Mansimov, E., and Salakhudinov, R. Unsupervised learning of video representations using lstms. In International conference on machine learning, pp. 843-852. PMLR, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.79, + 0.478, + 0.865 + ], + "angle": 0, + "content": "Teed, Z. and Deng, J. Raft: Recurrent all-pairs field transforms for optical flow. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part II 16, pp. 402-419. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.876, + 0.478, + 0.907 + ], + "angle": 0, + "content": "Ullman, T. D., Spelke, E., Battaglia, P., and Tenenbaum, J. B. Mind games: Game engines as an architecture for" + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.085, + 0.479, + 0.907 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.085, + 0.886, + 0.115 + ], + "angle": 0, + "content": "intuitive physics. Trends in cognitive sciences, 21(9): 649-665, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.126, + 0.887, + 0.186 + ], + "angle": 0, + "content": "Unterthiner, T., Van Steenkiste, S., Kurach, K., Marinier, R., Michalski, M., and Gelly, S. Towards accurate generative models of video: A new metric & challenges. arXiv preprint arXiv:1812.01717, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.196, + 0.887, + 0.257 + ], + "angle": 0, + "content": "Wu, J., Yildirim, I., Lim, J. J., Freeman, B., and Tenenbaum, J. Galileo: Perceiving physical object properties by integrating a physics engine with deep learning. Advances in neural information processing systems, 28, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.266, + 0.887, + 0.327 + ], + "angle": 0, + "content": "Xing, J., Xia, M., Zhang, Y., Chen, H., Yu, W., Liu, H., Wang, X., Wong, T.-T., and Shan, Y. Dynamiccafter: Animating open-domain images with video diffusion priors. arXiv preprint arXiv:2310.12190, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.336, + 0.887, + 0.398 + ], + "angle": 0, + "content": "Xue, T., Wu, J., Bouman, K., and Freeman, B. Visual dynamics: Probabilistic future frame synthesis via cross convolutional networks. Advances in neural information processing systems, 29, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.407, + 0.887, + 0.452 + ], + "angle": 0, + "content": "Yang, L., Kang, B., Huang, Z., Zhao, Z., Xu, X., Feng, J., and Zhao, H. Depth anything v2. arXiv:2406.09414, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.462, + 0.887, + 0.509 + ], + "angle": 0, + "content": "Yang, M., Du, Y., Ghasemipour, K., Tompson, J., Schuurmans, D., and Abbeel, P. Learning interactive real-world simulators. arXiv preprint arXiv:2310.06114, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.518, + 0.887, + 0.579 + ], + "angle": 0, + "content": "Yang, S., Walker, J., Parker-Holder, J., Du, Y., Bruce, J., Barreto, A., Abbeel, P., and Schuurmans, D. Video as the new language for real-world decision making. arXiv preprint arXiv:2402.17139, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.589, + 0.887, + 0.663 + ], + "angle": 0, + "content": "Yang, Z., Teng, J., Zheng, W., Ding, M., Huang, S., Xu, J., Yang, Y., Hong, W., Zhang, X., Feng, G., et al. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.5, + 0.673, + 0.887, + 0.735 + ], + "angle": 0, + "content": "Zheng, Z., Peng, X., Yang, T., Shen, C., Li, S., Liu, H., Zhou, Y., Li, T., and You, Y. Open-sora: Democratizing efficient video production for all, March 2024. URL https://github.com/hpcaitech/Open-Sora." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.085, + 0.887, + 0.735 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.156, + 0.057, + 0.816, + 0.072 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.084, + 0.423, + 0.101 + ], + "angle": 0, + "content": "A. Discussion of Image-to-Video setting." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.11, + 0.885, + 0.201 + ], + "angle": 0, + "content": "We note that our choice of single-image input, as opposed to multi-frame input, comes with some trade-offs. We choose the image-to-video setting because it is widely supported among many different models, allowing us to make effective comparisons across the current state-of-the-art. However, only conditioning on a single frame introduces significant ambiguity. Due to the loss of information caused by projecting the 3D world through perspective, it may not be possible to directly infer the size of the object or its height. In practice, we find our metrics are still reliable signals of task success, but we still study the problem of ambiguity more extensively in Section 5.2." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.22, + 0.235, + 0.236 + ], + "angle": 0, + "content": "B. Metric details." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.246, + 0.885, + 0.333 + ], + "angle": 0, + "content": "We propose three metrics to assess the accuracy of trajectories, shape fidelity, and object permanence. Each of our metrics compare frames from the ground-truth video with the generated video. Because different models can operate at different fps, we perform fps alignment as part of our evaluation process. To perform fps alignment, we map each frame index of the generated videos to the ground truth using \\( f_{\\mathrm{gen}} \\) and \\( f_{\\mathrm{gt}} \\), where \\( f_{\\mathrm{gen}} \\) and \\( f_{\\mathrm{gt}} \\) are the fps of generated video and ground truth respectively. For \\( i \\)-th frame in the generated video, we find the corresponding aligned frame index \\( j \\) in the ground truth video:" + }, + { + "type": "equation", + "bbox": [ + 0.417, + 0.333, + 0.887, + 0.365 + ], + "angle": 0, + "content": "\\[\nj = \\operatorname {r o u n d} \\left(i \\cdot \\frac {f _ {\\text {g e n}}}{f _ {\\mathrm {g t}}}\\right) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.37, + 0.886, + 0.414 + ], + "angle": 0, + "content": "Through fps alignment, we downsample the ground truth video to match the frame number of the generated video. We denote the downsampled ground truth as \\(\\{I_i^{\\mathrm{gt}}\\}_{i = 1}^N\\) and the generated video as \\(\\{I_i^{\\mathrm{gen}}\\}_{i = 1}^N\\), where \\(N\\) is the number of frames in the generated video." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.415, + 0.886, + 0.446 + ], + "angle": 0, + "content": "Trajectory L2. For each frame in both the generated video and ground truth, we calculate the centroid of the masked region. We then compute \\(L_{2}\\) distance between the centroids of corresponding frames:" + }, + { + "type": "equation", + "bbox": [ + 0.39, + 0.455, + 0.887, + 0.497 + ], + "angle": 0, + "content": "\\[\nL _ {2} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\| C _ {i} ^ {\\text {g e n}} - C _ {i} ^ {\\mathrm {g t}} \\| _ {2} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.508, + 0.885, + 0.539 + ], + "angle": 0, + "content": "where \\( C_i^{\\mathrm{gen}}, C_i^{\\mathrm{gt}} \\in \\mathbb{R}^2 \\) are the centroids of the dropping object in the \\( i \\)-th frame of generated video and the ground truth respectively." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.539, + 0.885, + 0.57 + ], + "angle": 0, + "content": "Chamfer Distance (CD). To assess the shape fidelity of objects, we calculate the Chamfer Distance (CD) between the mask regions of the generated video and ground truth:" + }, + { + "type": "equation", + "bbox": [ + 0.256, + 0.58, + 0.713, + 0.623 + ], + "angle": 0, + "content": "\\[\n\\mathrm {C D} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left(\\frac {1}{| P _ {i} |} \\sum_ {p \\in P _ {i}} \\min _ {q \\in Q _ {i}} \\| p - q \\| _ {2} + \\frac {1}{| Q _ {i} |} \\sum_ {q \\in Q _ {i}} \\min _ {p \\in P _ {i}} \\| q - p \\| _ {2}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.634, + 0.885, + 0.667 + ], + "angle": 0, + "content": "where \\( P_{i} = \\{p_{j}\\}_{j = 1}^{|P_{i}|} \\) and \\( Q_{i} = \\{q_{j}\\}_{j = 1}^{|Q_{i}|} \\) are the sets of mask points in the \\( i \\)-th frame of the generated video and ground truth respectively." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.667, + 0.885, + 0.698 + ], + "angle": 0, + "content": "Intersection over Union (IoU). We use the Intersection over Union (IoU) metric to evaluate object permanence. IoU measures objects' degree of overlap between the generated video and ground truth. This is formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.382, + 0.708, + 0.887, + 0.75 + ], + "angle": 0, + "content": "\\[\n\\mathrm {I o U} = \\frac {1}{| N |} \\sum_ {i = 1} ^ {N} \\frac {\\left| M _ {i} ^ {\\text {g e n}} \\cap M _ {i} ^ {\\mathrm {g t}} \\right|}{\\left| M _ {i} ^ {\\text {g e n}} \\cup M _ {i} ^ {\\mathrm {g t}} \\right|} \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.759, + 0.885, + 0.793 + ], + "angle": 0, + "content": "where \\( M_{i}^{\\mathrm{gen}} \\), \\( M_{i}^{\\mathrm{gt}} \\in \\{0,1\\}^{H\\times W} \\) are binary segmentation masks of the falling object in the \\( i \\)-th frame of the generated and ground truth videos respectively." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.799, + 0.885, + 0.877 + ], + "angle": 0, + "content": "Time error. When testing on videos generated in simulation, we can provide a timing error. From the dropping height \\( Y_{0} \\) of the ground truth video, which we have access to from the simulator, we can derive \\( t_{\\mathrm{drop}} = \\sqrt{Y_0\\frac{2}{g}} \\). We then obtain a dropping time from the model's output by estimating the frame of impact as the first frame \\( F \\) whose centroid velocity in the \\( y \\) direction is negative. If \\( t_{\\mathrm{drop}} \\) occurs in between \\( F \\) and \\( F - 1 \\), then we define the time error \\( E_{\\mathrm{time}} \\) as zero. Otherwise, we define the time error as" + }, + { + "type": "equation", + "bbox": [ + 0.327, + 0.877, + 0.887, + 0.911 + ], + "angle": 0, + "content": "\\[\nE _ {\\text {t i m e}} = \\min \\left(\\left| \\frac {F - 1}{\\mathrm {f p s}} - t _ {\\text {d r o p}} \\right|, \\left| \\frac {F}{\\mathrm {f p s}} - t _ {\\text {d r o p}} \\right|\\right). \\tag {7}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.495, + 0.935 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.156, + 0.057, + 0.816, + 0.073 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.084, + 0.36, + 0.101 + ], + "angle": 0, + "content": "C. ORO implementation details." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.11, + 0.887, + 0.141 + ], + "angle": 0, + "content": "In our setting, we do not cut the gradient after step \\(k\\) like VADER. The gradient \\(\\nabla_{\\theta}R(x_0',x_0)\\) backpropagates through all diffusion timesteps and update the model weights \\(\\theta\\):" + }, + { + "type": "equation", + "bbox": [ + 0.349, + 0.15, + 0.887, + 0.192 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {\\theta} \\left(R \\left(x _ {0} ^ {\\prime}, x _ {0}\\right)\\right) = \\sum_ {t = 0} ^ {T} \\frac {\\partial R \\left(x _ {0} ^ {\\prime} , x _ {0}\\right)}{\\partial x _ {t}} \\cdot \\frac {\\partial x _ {t}}{\\partial \\theta} \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.201, + 0.348, + 0.216 + ], + "angle": 0, + "content": "where \\(T\\) is the total diffusion timesteps." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.223, + 0.887, + 0.252 + ], + "angle": 0, + "content": "Segmentation Reward. We utilize SAM 2 (Ravi et al., 2024) to generate segmentation masks across frames for generated video:" + }, + { + "type": "equation", + "bbox": [ + 0.416, + 0.266, + 0.887, + 0.283 + ], + "angle": 0, + "content": "\\[\nM ^ {\\text {g e n}} = \\operatorname {S A M} - 2 \\left(x _ {0}\\right) \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.295, + 0.887, + 0.34 + ], + "angle": 0, + "content": "where \\( M^{\\mathrm{gen}} \\) denotes the masks of the falling object in the generated video. We obtain ground truth masks \\( M^{\\mathrm{gt}} \\) using Kubric (Greff et al., 2022). To avoid non-differentiable reward, we use Sigmoid to normalize mask logits of generated video instead of converting them to binary masks. We use IoU between \\( M^{\\mathrm{gen}} \\) and \\( M^{\\mathrm{gt}} \\) as reward function:" + }, + { + "type": "equation", + "bbox": [ + 0.385, + 0.351, + 0.887, + 0.368 + ], + "angle": 0, + "content": "\\[\nR \\left(x _ {0} ^ {\\prime}, x _ {0}\\right) = \\operatorname {I o U} \\left(M ^ {\\text {g e n}}, M ^ {\\text {g t}}\\right) \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.379, + 0.592, + 0.394 + ], + "angle": 0, + "content": "Maximizing objective 1 is equivalent to minimizing the following objective:" + }, + { + "type": "equation", + "bbox": [ + 0.314, + 0.404, + 0.887, + 0.423 + ], + "angle": 0, + "content": "\\[\nJ (\\theta) = \\mathbb {E} _ {\\left(x _ {0}, c\\right) \\sim \\mathcal {D}, x _ {0} ^ {\\prime} \\sim p _ {\\theta} \\left(x _ {0} ^ {\\prime} \\mid c\\right)} \\left[ 1 - \\operatorname {I o U} \\left(M ^ {\\text {g e n}}, M ^ {\\text {g t}}\\right) \\right] \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.432, + 0.887, + 0.478 + ], + "angle": 0, + "content": "This objective constrains the position and shape of the generated object in the video, encouraging a greater intersection with the object region in the ground truth video. The model learns to generate more accurate object positions and shapes through training with this objective." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.485, + 0.887, + 0.514 + ], + "angle": 0, + "content": "Optical Flow Reward. We utilize RAFT (Teed & Deng, 2020) to generate optical flow for both generated videos and ground truth:" + }, + { + "type": "equation", + "bbox": [ + 0.42, + 0.514, + 0.887, + 0.537 + ], + "angle": 0, + "content": "\\[\nV ^ {\\text {g e n}} = \\operatorname {R A F T} \\left(x _ {0} ^ {\\prime}\\right) \\tag {12}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.429, + 0.533, + 0.551, + 0.549 + ], + "angle": 0, + "content": "\\[\nV ^ {\\mathrm {g t}} = \\operatorname {R A F T} (x _ {0})\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.555, + 0.82, + 0.571 + ], + "angle": 0, + "content": "where \\( V^{\\mathrm{gen}} \\), \\( V^{\\mathrm{gt}} \\) denote the optical flows of generated videos and ground truth. We define the reward as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.392, + 0.581, + 0.887, + 0.599 + ], + "angle": 0, + "content": "\\[\nR \\left(x _ {0} ^ {\\prime}, x _ {0}\\right) = - \\left| V ^ {\\text {g e n}} - V ^ {\\text {g t}} \\right| \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.608, + 0.592, + 0.624 + ], + "angle": 0, + "content": "Maximizing objective 1 is equivalent to minimizing the following objective:" + }, + { + "type": "equation", + "bbox": [ + 0.341, + 0.634, + 0.887, + 0.653 + ], + "angle": 0, + "content": "\\[\nJ (\\theta) = \\mathbb {E} _ {\\left(x _ {0}, c\\right) \\sim \\mathcal {D}, x _ {0} ^ {\\prime} \\sim p _ {\\theta} \\left(x _ {0} ^ {\\prime} \\mid c\\right)} \\left[ \\left| V ^ {\\text {g e n}} - V ^ {\\text {g t}} \\right| \\right] \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.662, + 0.887, + 0.692 + ], + "angle": 0, + "content": "This objective constrains the motion of the generated object in the video. The model learns to generate more accurate physical motion through training with this objective." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.699, + 0.887, + 0.729 + ], + "angle": 0, + "content": "Depth Reward. We utilize Depth-Anything-V2 (Yang et al., 2024a) to generate optical depth maps for both generated videos and ground truth:" + }, + { + "type": "equation", + "bbox": [ + 0.371, + 0.729, + 0.887, + 0.751 + ], + "angle": 0, + "content": "\\[\nD ^ {\\text {g e n}} = \\text {D e p t h - A n y t h i n g - V 2} \\left(x _ {0} ^ {\\prime}\\right) \\tag {15}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.382, + 0.748, + 0.599, + 0.763 + ], + "angle": 0, + "content": "\\[\nD ^ {\\mathrm {g t}} = \\text {D e p t h - A n y t h i n g - V 2} (x _ {0})\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.77, + 0.813, + 0.786 + ], + "angle": 0, + "content": "where \\( D^{\\mathrm{gen}} \\), \\( D^{\\mathrm{gt}} \\) denote the depth maps of generated videos and ground truth. We define the reward as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.391, + 0.796, + 0.887, + 0.813 + ], + "angle": 0, + "content": "\\[\nR \\left(x _ {0} ^ {\\prime}, x _ {0}\\right) = - \\left| D ^ {\\text {g e n}} - D ^ {\\mathrm {g t}} \\right| \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.823, + 0.592, + 0.838 + ], + "angle": 0, + "content": "Maximizing objective 1 is equivalent to minimizing the following objective:" + }, + { + "type": "equation", + "bbox": [ + 0.34, + 0.849, + 0.887, + 0.868 + ], + "angle": 0, + "content": "\\[\nJ (\\theta) = \\mathbb {E} _ {\\left(x _ {0}, c\\right) \\sim \\mathcal {D}, x _ {0} ^ {\\prime} \\sim p _ {\\theta} \\left(x _ {0} ^ {\\prime} \\mid c\\right)} \\left[ \\left| D ^ {\\mathrm {g e n}} - D ^ {\\mathrm {g t}} \\right| \\right] \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.876, + 0.887, + 0.907 + ], + "angle": 0, + "content": "This objective constrains the 3d motion of the generated object in the video. The model learns to generate more accurate 3d physical motion through training with this objective." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.156, + 0.057, + 0.816, + 0.073 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.084, + 0.273, + 0.102 + ], + "angle": 0, + "content": "D. Coordinate system" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.11, + 0.888, + 0.187 + ], + "angle": 0, + "content": "We give a visualization of the coordinate system used in this paper in Figure 12. To compute \\( y \\), we first leverage a segmentation map and find pixel row index that is just below the object. Once this row index is found, \\( y \\) can easily be computed from the camera position, camera sensor size, and image resolution. We note that because our camera is assumed to be in perspective with the \\( XY \\) plane, we can ignore \\( X \\) and \\( x \\) (not shown in figure) in our analyses in Section 5.1 and Section 5.2." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.2, + 0.332, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.377, + 0.199, + 0.88, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.43, + 0.89, + 0.461 + ], + "angle": 0, + "content": "Figure 12. A visualization of the coordinate system used in this paper (not to scale). The image plane height of the object is denoted as \\(y\\), its actual height in 3D as \\(Y\\), and its depth as \\(Z\\). The camera focal length is denoted as \\(f\\)." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.493, + 0.276, + 0.511 + ], + "angle": 0, + "content": "E. Derivation of \\( p(t|y) \\)" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.519, + 0.888, + 0.59 + ], + "angle": 0, + "content": "In our dataset construction, we assume a uniform distribution for \\( Z \\), where \\( Z \\sim \\mathcal{U}(Z_{\\min}, Z_{\\max}) \\), where \\( Z_{\\min} = 2 \\) and \\( Z_{\\max} = 18 \\). As shown in Figure 12, the dropping height \\( Y \\) is a linear function of \\( Z \\), i.e. \\( Y = y + \\beta Z \\) for the slope \\( \\beta \\) that can be computed from \\( y, f \\), the sensor size, and the camera height. This means we can solve for dropping time as \\( t = \\sqrt{\\frac{2}{g}Y} = \\sqrt{\\frac{2}{g}(y + \\beta Z)} \\). Applying the transformation rule for probability density yields" + }, + { + "type": "equation", + "bbox": [ + 0.333, + 0.595, + 0.887, + 0.635 + ], + "angle": 0, + "content": "\\[\np (t | y) = \\left\\{ \\begin{array}{l l} \\frac {g t}{\\left(Z _ {\\max } - Z _ {\\min }\\right) \\beta}, & t _ {\\min } \\leq t \\leq t _ {\\max } \\\\ 0, & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.642, + 0.885, + 0.669 + ], + "angle": 0, + "content": "where \\( t_{\\mathrm{min}} = \\sqrt{\\frac{2}{g} (y + \\beta Z_{\\mathrm{min}})} \\) and \\( t_{\\mathrm{max}} = \\sqrt{\\frac{2}{g} (y + \\beta Z_{\\mathrm{max}})} \\). Plugging in \\( Z_{\\mathrm{min}} = 2 \\) and \\( Z_{\\mathrm{max}} = 18 \\) yields Equation (3)." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.684, + 0.273, + 0.701 + ], + "angle": 0, + "content": "F. Ambiguous dataset" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.709, + 0.888, + 0.831 + ], + "angle": 0, + "content": "We introduce a new dataset for distributional analysis that broadens \\( p(t|y) \\), in contrast to the PSFT dataset, which prioritizes realism and has a narrower distribution due to limited object depth variability. To create a dataset with \\( p(t|y) \\) that is sufficiently diverse for meaningful analysis, we first set up the initial scenes as before, but then apply an augmentation where a new depth values is sampled uniformly from [2, 18] and the object is scaled and translated such that it appears the same in the original image, as shown in Figure 9. For simplicity, we limit our scenes to a single dropping object with no other objects on the ground. We also disable shadows, preventing the model from using them as cues to infer depth and height. Our dataset contains 5k samples consisting of 1k unique initial scenes each containing 5 different trajectories produced by the augmentation." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.85, + 0.327, + 0.868 + ], + "angle": 0, + "content": "G. Lifting trajectories to 3D" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.876, + 0.888, + 0.907 + ], + "angle": 0, + "content": "To lift trajectories to 3D, we first estimate \\( t_{\\mathrm{drop}} \\) as described in Section 5.1. Using SAM2 to estimate object masks in the generated video, we can obtain a trajectory of the bottom of the object which we denote as \\( y_0, y_1, \\ldots, y_N \\) where" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.156, + 0.057, + 0.816, + 0.072 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.085, + 0.888, + 0.12 + ], + "angle": 0, + "content": "\\(N = t_{\\mathrm{drop}} \\times \\mathrm{fps}\\). From \\(t_{\\mathrm{drop}}\\), we can solve for an implied depth \\(Z = \\frac{\\frac{1}{2}gt^2 - y}{\\beta}\\). We then compute the lifted 3D trajectory as \\(y_i \\mapsto y_i + \\beta Z\\)" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.137, + 0.266, + 0.153 + ], + "angle": 0, + "content": "H. PisaBench Details" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.163, + 0.888, + 0.194 + ], + "angle": 0, + "content": "In this section, we discuss the details of our data collection pipeline and annotations. We present more examples of real-world videos and corresponding annotations in Figure 13." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.21, + 0.295, + 0.226 + ], + "angle": 0, + "content": "H.1. Data Collection Pipeline" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.233, + 0.888, + 0.295 + ], + "angle": 0, + "content": "Collecting Real World Videos. We enlist approximately 15 volunteers to participate in the data collection process. We hand out a tripod, tape, and invisible wire for each volunteer. To ensure the quality, diversity, and minimize the ambiguity introduced by the environments, volunteers are provided with detailed guidelines. The key points of the data collection guidelines are shown in Table 3." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.301, + 0.888, + 0.378 + ], + "angle": 0, + "content": "Raw videos processing. For the collected raw videos, we cut each video into multiple clips and crop their sizes. For each video clip, we annotate its starting position in the original long video and ensure that the duration of each segment does not exceed 12 seconds. Regarding the sizes of the videos, we manually crop each video to an aspect ratio of \\(1:1\\), ensuring that the falling objects remain fully visible within the frame during the cropping process. The processing interface is shown in Figure 14." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.394, + 0.257, + 0.408 + ], + "angle": 0, + "content": "H.2. Annotation Details" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.417, + 0.888, + 0.478 + ], + "angle": 0, + "content": "We present our annotation details in Figure 15. For video captions, we present the word cloud figure in (a). For segmentation masks, we annotate all objects in the first frame using positive and negative points, which are then propagated across frames using the SAM 2 (Ravi et al., 2024) model to produce segmentation masks for all objects throughout the video. The annotation interface is shown in (b)." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.485, + 0.888, + 0.577 + ], + "angle": 0, + "content": "In addition to providing the annotated caption \" {object description} falls,\" we also add information to inform off-the-shelf models of the task's context as much as possible. To further enhance task comprehension, we append an additional description \"A video that conforms to the laws of physics.\" We also employ negative prompts \"no camera motion\" and \"no slow-motion\" to ensure environmental stability and impose constraints on the generated videos. These prompts explicitly instruct the models to avoid including camera motion or any non-real-time object motion, thereby maintaining consistency with real-world physics." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.595, + 0.252, + 0.611 + ], + "angle": 0, + "content": "I. Inference Details" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.621, + 0.888, + 0.668 + ], + "angle": 0, + "content": "We present the inference configurations of each closed or open model we evaluate in Table 4. For models that do not support generating videos with 1:1 aspect ratio, we pad initial frames with black borders to the resolution supported by these models, and finally remove the black borders from the generated videos." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.686, + 0.341, + 0.703 + ], + "angle": 0, + "content": "J. More Qualitative Examples" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.712, + 0.888, + 0.758 + ], + "angle": 0, + "content": "We present more qualitative examples in Figure 16 - Figure 22. Although in some showcases, models can roughly predict the downward trend, models still struggle to predict plausible shape and motion. The defects in the models can be mainly attributed to the following aspects:" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.775, + 0.884, + 0.833 + ], + "angle": 0, + "content": "- Trajectory correctness: in most videos, models fail to predict even the basic falling trajectory of objects, as shown in Figure 19 (a), despite this being highly intuitive for humans. Even in cases where the falling trajectory is roughly correctly predicted, the models still struggle to accurately predict subsequent events, such as collisions, as illustrated in Figure 16 (f)." + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.846, + 0.884, + 0.903 + ], + "angle": 0, + "content": "- Object consistency: in many generated videos, object consistency is poor. Models struggle to infer the appearance of objects from multiple viewpoints in a physically plausible manner, resulting in unnatural appearances, as shown in Figure 16 (a). Additionally, models perform poorly in maintaining object permanence, causing objects to appear blurry, as illustrated in Figure 20 (f). Furthermore, models sometimes introduce new objects into the video, as depicted in" + }, + { + "type": "list", + "bbox": [ + 0.104, + 0.775, + 0.884, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.156, + 0.058, + 0.816, + 0.071 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "image_caption", + "bbox": [ + 0.121, + 0.086, + 0.212, + 0.101 + ], + "angle": 0, + "content": "Figure 20 (e)." + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.111, + 0.888, + 0.142 + ], + "angle": 0, + "content": "- Scene consistency: models struggle to maintain scene consistency, leading to abrupt transitions in many videos. These sudden changes make videos appear unnatural, as shown in Figure 18 (f)." + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.16, + 0.346, + 0.178 + ], + "angle": 0, + "content": "K. Simulated Adaption Details" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.186, + 0.888, + 0.278 + ], + "angle": 0, + "content": "We use the Kubric (Greff et al., 2022) simulation and rendering engine for creating our simulated videos. Kubric uses PyBullet (Coumans et al., 2010) for running physics simulations and Blender (Community, 2018) for rendering. We set the simulation rate to 240 steps per second and render 2-second videos at 16 fps, resulting in 32 frames per video. Each scene consists of objects from the Google Scanned Objects (GSO) dataset (Downs et al., 2022) and uses environmental lighting from HDRI maps provided by Kubric. We use 930 objects and 458 HDRI maps for training and 103 objects and 51 HDRI maps for testing." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.284, + 0.888, + 0.33 + ], + "angle": 0, + "content": "For each video, we randomly choose 1-6 objects to drop. These objects are placed at a height uniformly sampled from \\(0.5\\mathrm{m}\\) to \\(1.5\\mathrm{m}\\). Below each of these objects, a possibly empty pile of up to 4 objects spawns beneath to create collisions. The objects are placed in a spawn region of size \\(2\\mathrm{m} \\times 2\\mathrm{m}\\)." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.336, + 0.888, + 0.383 + ], + "angle": 0, + "content": "The camera is initially positioned \\(1\\mathrm{m}\\) behind this region, with its height varying uniformly between \\(0.4\\mathrm{m}\\) and \\(0.6\\mathrm{m}\\). Once all objects are placed, the camera moves back in random increments until all objects are visible within the camera frame. The camera uses a focal length of \\(35\\mathrm{mm}\\), a sensor width of \\(32\\mathrm{mm}\\), and an aspect ratio of \\(1\\times 1\\)." + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.402, + 0.212, + 0.418 + ], + "angle": 0, + "content": "L. Limitations" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.427, + 0.888, + 0.503 + ], + "angle": 0, + "content": "In this work, we collect and manually annotate a dataset of 361 real-world videos and design three spatial metrics to evaluate the performance of state-of-the-art image-to-video (I2V) models in a fundamental physical scenario: free fall. Our metrics focus solely on spatial positional relationships, excluding object appearance attributes such as color. To enable more fine-grained evaluations of appearance characteristics, we aim to develop metrics based on Multimodal Large Language Models (MLLMs) or pixel-level analysis in future work." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.51, + 0.888, + 0.572 + ], + "angle": 0, + "content": "Furthermore, we propose the PSFT and ORO methods to fine-tune the Open-Sora model (Zheng et al., 2024), improving its ability to generate physically plausible videos. Despite these improvements, certain limitations remain, specifically, the generation of blurry objects in some videos. We hope to address these challenges in future research by refining both the dataset and the fine-tuning strategies, aiming to produce videos that better maintain object visuals." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.923, + 0.495, + 0.935 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.157, + 0.057, + 0.816, + 0.072 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.12, + 0.885, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.364, + 0.278, + 0.611, + 0.296 + ], + "angle": 0, + "content": "(a) A white paper roll falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.298, + 0.885, + 0.453 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.364, + 0.456, + 0.575, + 0.473 + ], + "angle": 0, + "content": "(c) A black bottle falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.477, + 0.885, + 0.631 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.364, + 0.634, + 0.631, + 0.651 + ], + "angle": 0, + "content": "(b) A transparent bottle falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.655, + 0.885, + 0.809 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.364, + 0.812, + 0.576, + 0.829 + ], + "angle": 0, + "content": "(d) A white bottle falls." + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.852, + 0.885, + 0.879 + ], + "angle": 0, + "content": "Figure 13. Examples of real world videos and annotations. We present video frames in the first row and mask annotations in the second row." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.157, + 0.057, + 0.816, + 0.072 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "table", + "bbox": [ + 0.091, + 0.093, + 0.884, + 0.855 + ], + "angle": 0, + "content": "
AspectRequirements
Camera·The camera must be stabilized using a tripod.\n·The dropping object should remain visible throughout the entire fall.\n·The trajectory of the object should be sufficiently centered in the frame.\n·Ensure the slow-motion setting is configured to 120 fps.\n·Avoid a completely top-down perspective; the frame should include both the floor and the wall for spatial context.\n·It is acceptable to record one long video containing multiple drops at the same location.
Objects·Most objects should be rigid and non-deformable.\n·A limited number of flexible or deformable objects may be included, as such data is also valuable.
Dropping Procedure·Secure the object with a wire using tape, ensuring stability. Multiple tapings may be necessary for proper stabilization.\n·Visibility of the wire in the video is acceptable.\n·No body parts should appear in the frame. If this is challenging, consider having a partner monitor the camera or use screen-sharing software to view the camera feed on a laptop for uninterrupted framing.\n·Record videos in a horizontal orientation to simplify cropping and to help keep the frame free of unnecessary elements.\n·Use a short wire to enhance object stability.\n·The object should remain stationary before being dropped.
Scene Composition·Make the scenes dynamic and engaging. Include interactions with other objects, such as collisions or objects tipping over. Static objects should serve as active elements rather than mere background props.\n·Avoid filming in classroom or laboratory environments.\n·Include a variety of dropping heights.\n·Film in different environments, ensuring at least one setting is outside your apartment.\n·Minimize human shadows in the frame whenever possible.\n·Ensure good lighting and maintain strong contrast between the objects and the back-ground.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.085, + 0.864, + 0.885, + 0.893 + ], + "angle": 0, + "content": "Table 3. Key points of real world videos collection guideline. We have detailed requirements for camera, objects, dropping procedure and scene composition to ensure the quality, diversity and minimize ambiguity introduced by environments." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.157, + 0.057, + 0.816, + 0.071 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.088, + 0.462, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.262, + 0.284, + 0.279, + 0.295 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.493, + 0.088, + 0.888, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.68, + 0.284, + 0.698, + 0.295 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.309, + 0.886, + 0.339 + ], + "angle": 0, + "content": "Figure 14. Video processing interface. (a) we annotate starting positions in the original long videos and clip them into multiple clips less than 12 seconds. (b) We drag the cropping box to crop the video size to an aspect ratio of 1:1." + }, + { + "type": "image", + "bbox": [ + 0.091, + 0.349, + 0.426, + 0.607 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.245, + 0.622, + 0.264, + 0.635 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.475, + 0.347, + 0.886, + 0.611 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.667, + 0.622, + 0.688, + 0.635 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.652, + 0.888, + 0.695 + ], + "angle": 0, + "content": "Figure 15. Annotation details of real world videos. (a) Word cloud of objects in video captions. Our videos contain a variety of daily life objects. (b) Interface for annotating positive and negative points in the first frame. Red and blue dots indicate positive and negative points respectively. We annotate all objects in the midair and ground." + }, + { + "type": "table", + "bbox": [ + 0.087, + 0.703, + 0.887, + 0.874 + ], + "angle": 0, + "content": "
ModelResolutionNumber of FramesFPSGuidance ScaleSampling StepsNoise Scheduler
ClosedSora720 × 72015030---
Kling-V1.5960 × 960150301.0--
Kling-V1960 × 960150301.0--
Runway Gen31280 × 76815630---
OpenCogVideoX-5B-I2V720 × 4804886.050DDIM
DynamiCrafter512 × 32090300.750DDIM
Pyramid-Flow1280 × 768120244.010EulerDiscrete
Open-Sora512 × 51290307.030RFLOW
" + }, + { + "type": "table_caption", + "bbox": [ + 0.184, + 0.883, + 0.789, + 0.897 + ], + "angle": 0, + "content": "Table 4. Inference details for models we evaluate, where “-” indicates the information is not available." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.924, + 0.495, + 0.935 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.157, + 0.057, + 0.816, + 0.072 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.133, + 0.885, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.353, + 0.219, + 0.571, + 0.236 + ], + "angle": 0, + "content": "(a) A brown bottle falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.242, + 0.885, + 0.323 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.353, + 0.33, + 0.553, + 0.348 + ], + "angle": 0, + "content": "(b) A grey bottle falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.354, + 0.885, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.353, + 0.441, + 0.593, + 0.459 + ], + "angle": 0, + "content": "(c) A grey paper cup falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.465, + 0.885, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.353, + 0.551, + 0.547, + 0.57 + ], + "angle": 0, + "content": "(d) A paper cup falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.575, + 0.885, + 0.655 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.353, + 0.662, + 0.562, + 0.679 + ], + "angle": 0, + "content": "(e) A white bottle falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.687, + 0.885, + 0.765 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.353, + 0.773, + 0.539, + 0.79 + ], + "angle": 0, + "content": "(f) A white box falls." + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.813, + 0.886, + 0.868 + ], + "angle": 0, + "content": "Figure 16. Qualitative examples of Kling-V1 (Kuaishou, 2024). In (a) (b) (c) (f), objects have a tendency to fall. (b) (c) are roughly consistent with the laws of physics. In (a) (f), the shape of the object does not match the first frame. In (d), the paper cup is suspended in midair. In (e), new object is introduced. In (e), the model fails to correctly predict the collision that occurs after the white box falls and the chain of events that follows." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.157, + 0.057, + 0.817, + 0.072 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.145, + 0.887, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.231, + 0.645, + 0.251 + ], + "angle": 0, + "content": "(a) A black and grey glove falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.256, + 0.887, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.342, + 0.563, + 0.36 + ], + "angle": 0, + "content": "(b) A black bottle falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.367, + 0.887, + 0.447 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.453, + 0.629, + 0.471 + ], + "angle": 0, + "content": "(c) A blue and white box falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.478, + 0.887, + 0.557 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.564, + 0.57, + 0.581 + ], + "angle": 0, + "content": "(d) A brown bottle falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.588, + 0.887, + 0.668 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.674, + 0.593, + 0.692 + ], + "angle": 0, + "content": "(e) A Coca-Cola can falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.7, + 0.887, + 0.778 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.785, + 0.529, + 0.804 + ], + "angle": 0, + "content": "(f) A pink box falls." + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.825, + 0.886, + 0.855 + ], + "angle": 0, + "content": "Figure 17. Qualitative examples of Runway Gen3 (Runway, 2024). In (b) (e), objects have a tendency to fall. In (a) (e) (f), new objects are introduced. In (b) (d), the shape of the object does not match the first frame. In (c), the box is suspended in midair." + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.923, + 0.497, + 0.935 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.157, + 0.057, + 0.817, + 0.072 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.139, + 0.887, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.224, + 0.562, + 0.242 + ], + "angle": 0, + "content": "(a) A black bottle falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.248, + 0.887, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.335, + 0.572, + 0.353 + ], + "angle": 0, + "content": "(b) A black helmet falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.359, + 0.887, + 0.44 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.446, + 0.546, + 0.464 + ], + "angle": 0, + "content": "(c) A paper box falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.47, + 0.887, + 0.55 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.556, + 0.563, + 0.574 + ], + "angle": 0, + "content": "(d) A white bottle falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.581, + 0.887, + 0.661 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.667, + 0.593, + 0.686 + ], + "angle": 0, + "content": "(e) A grey paper cup falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.692, + 0.79, + 0.772 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.777, + 0.54, + 0.795 + ], + "angle": 0, + "content": "(f) A white box falls." + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.818, + 0.888, + 0.861 + ], + "angle": 0, + "content": "Figure 18. Qualitative examples of CogVideoX-5B-I2V (Yang et al., 2024c). In (a) - (f), objects have a tendency to fall. However, in all the videos, there are violations of physics. In (a) (b), the objects are divided into two parts. In (c) (d) (e), the shape of the object does not match the first frame. In (c), the trajectory is not a vertical fall. In (f), scene changes suddenly, which does not match the first frame." + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.923, + 0.495, + 0.935 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.157, + 0.057, + 0.817, + 0.072 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.152, + 0.887, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.238, + 0.543, + 0.255 + ], + "angle": 0, + "content": "(a) A black box falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.263, + 0.887, + 0.343 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.349, + 0.56, + 0.367 + ], + "angle": 0, + "content": "(b) A card holder falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.373, + 0.887, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.46, + 0.563, + 0.478 + ], + "angle": 0, + "content": "(c) A white bottle falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.485, + 0.887, + 0.565 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.571, + 0.543, + 0.588 + ], + "angle": 0, + "content": "(d) A white box falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.595, + 0.887, + 0.675 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.681, + 0.663, + 0.7 + ], + "angle": 0, + "content": "(e) An orange and white box falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.706, + 0.887, + 0.786 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.792, + 0.493, + 0.809 + ], + "angle": 0, + "content": "(f) A shoe falls." + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.832, + 0.884, + 0.847 + ], + "angle": 0, + "content": "Figure 19. Qualitative examples of DynamiCrafter (?). In all the videos, objects do not have a tendency to fall, suspended in the midair." + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.924, + 0.496, + 0.935 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.157, + 0.057, + 0.817, + 0.072 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.145, + 0.887, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.231, + 0.562, + 0.249 + ], + "angle": 0, + "content": "(a) A black bottle falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.256, + 0.887, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.342, + 0.642, + 0.36 + ], + "angle": 0, + "content": "(b) A green and white box falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.366, + 0.887, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.453, + 0.553, + 0.471 + ], + "angle": 0, + "content": "(c) A grey bottle falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.478, + 0.887, + 0.557 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.564, + 0.576, + 0.581 + ], + "angle": 0, + "content": "(d) An orange tube falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.588, + 0.887, + 0.668 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.674, + 0.562, + 0.691 + ], + "angle": 0, + "content": "(e) A white bottle falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.699, + 0.887, + 0.778 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.785, + 0.551, + 0.803 + ], + "angle": 0, + "content": "(f) A plastic box falls." + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.825, + 0.886, + 0.854 + ], + "angle": 0, + "content": "Figure 20. Qualitative examples of Pyramid-Flow (Jin et al., 2024). In (b) (d) (e), objects have a tendency to fall. In (a) (b) (e) (f), new objects are introduced. In (c), scene changes, which does not match the first frame.. In (d), the tube becomes blurry." + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.157, + 0.057, + 0.817, + 0.072 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.145, + 0.887, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.231, + 0.625, + 0.249 + ], + "angle": 0, + "content": "(a) A bottle full of water falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.256, + 0.887, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.342, + 0.571, + 0.359 + ], + "angle": 0, + "content": "(b) A brown bottle falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.367, + 0.887, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.453, + 0.593, + 0.471 + ], + "angle": 0, + "content": "(c) A grey paper cup falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.478, + 0.887, + 0.558 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.564, + 0.547, + 0.581 + ], + "angle": 0, + "content": "(d) A paper box falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.588, + 0.887, + 0.668 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.674, + 0.563, + 0.691 + ], + "angle": 0, + "content": "(e) A white bottle falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.699, + 0.887, + 0.778 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.785, + 0.54, + 0.802 + ], + "angle": 0, + "content": "(f) A white box falls." + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.825, + 0.886, + 0.854 + ], + "angle": 0, + "content": "Figure 21. Qualitative examples of Open-Sora (Zheng et al., 2024). In all the videos, objects do not have a tendency to fall, suspended in the midair. In (b) (d), scene changes suddenly, which does not match the first frame. In (e), new object is introduced." + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.157, + 0.057, + 0.817, + 0.072 + ], + "angle": 0, + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.145, + 0.887, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.231, + 0.572, + 0.249 + ], + "angle": 0, + "content": "(a) A brown bottle falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.256, + 0.887, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.342, + 0.56, + 0.361 + ], + "angle": 0, + "content": "(b) A grey eraser falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.367, + 0.887, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.453, + 0.593, + 0.471 + ], + "angle": 0, + "content": "(c) A grey paper cup falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.478, + 0.887, + 0.557 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.564, + 0.62, + 0.581 + ], + "angle": 0, + "content": "(d) A transparent bottle falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.588, + 0.887, + 0.668 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.674, + 0.633, + 0.692 + ], + "angle": 0, + "content": "(e) A red wrapping paper falls." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.7, + 0.887, + 0.778 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.351, + 0.785, + 0.559, + 0.802 + ], + "angle": 0, + "content": "(f) A white bottle falls." + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.825, + 0.888, + 0.855 + ], + "angle": 0, + "content": "Figure 22. Qualitative examples of our method (Open-Sora + PSFT + ORO). In all the videos, objects have a tendency to fall. However, the consistency of objects is still insufficient. In some frames, objects become blurry. Objects sometimes disappear after collision." + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.923, + 0.496, + 0.935 + ], + "angle": 0, + "content": "25" + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09595/b59876e9-da8b-438b-ab54-bb4c4d76820f_origin.pdf b/data/2025/2503_09xxx/2503.09595/b59876e9-da8b-438b-ab54-bb4c4d76820f_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..cffcf3b64564c9074a96ff42715dd189745d2b3c --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/b59876e9-da8b-438b-ab54-bb4c4d76820f_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:287aec489ea1baedef98e3de9553fcdad82e948603de837f80a643ee4ad17796 +size 33119990 diff --git a/data/2025/2503_09xxx/2503.09595/full.md b/data/2025/2503_09xxx/2503.09595/full.md new file mode 100644 index 0000000000000000000000000000000000000000..f688f3cb8ff9b94034388337448c469d64d9daef --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/full.md @@ -0,0 +1,667 @@ +# PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop + +Chenyu Li $^{*1}$ Oscar Michel $^{*1}$ Xichen Pan $^{1}$ Sainan Liu $^{2}$ Mike Roberts $^{2}$ Saining Xie + +# Abstract + +Large-scale pre-trained video generation models excel in content creation but are not reliable as physically accurate world simulators out of the box. This work studies the process of posttraining these models for accurate world modeling through the lens of the simple, yet fundamental, physics task of modeling object freefall. We show state-of-the-art video generation models struggle with this basic task, despite their visually impressive outputs. To remedy this problem, we find that fine-tuning on a relatively small amount of simulated videos is effective in inducing the dropping behavior in the model, and we can further improve results through a novel reward modeling procedure we introduce. Our study also reveals key limitations of post-training in generalization and distribution modeling. Additionally, we release a benchmark for this task that may serve as a useful diagnostic tool for tracking physical accuracy in large-scale video generative model development. Code is available at this repository: https://github.com/vision-x-nyu/pisa-experiments. + +# 1. Introduction + +Over the past year, video generation models have advanced significantly, inspiring visions of a future where these models could serve as realistic world models (Craik, 1967; LeCun, 2022; Hafner et al., 2019; 2023; Ha & Schmidhuber, 2018). State-of-the-art video generation models models exhibit impressive results in content creation (OpenAI, 2024; Kuaishou, 2024; Luma, 2024; Runway, 2024) and are already being used in advertising and filmmaking (Runway, 2025; NBC, 2025). These advancements have sparked a line of research that seeks to evolve these models from content creators to world simulators for embodied agents (Yang + +*Equal contribution, alphabetical order. 1New York University 2Intel Labs. + +et al., 2023; 2024b; Agarwal et al., 2025). However, accurate world modeling is considerably more challenging than creative content creation because looking "good enough" is not sufficient: generated pixels must faithfully represent a world state evolving in accordance with the laws of physics and visual perspective. + +We find that although the generations of state-of-the-art models are impressive visually, these models still struggle to generate results that are accurate physically, even though these models are pretrained on internet-scale video data demonstrating a wide variety of complex physical interactions. The failure to ground and align visual generations to the laws of physics suggests that pretraining is not enough and a post-training stage is needed. Much like how pretrained Large Language Models (LLMs) need to be adapted through post-training before they can be useful conversational assistants, pretrained video generative models ought to be adapted through post-training before they can be deployed as physically accurate world simulators. + +In this work, we rigorously examine the post-training process of video generation models by focusing on the simple yet fundamental physics task of modeling object freefall, which we find is highly challenging for state-of-the-art models. Specifically, we study an image-to-video $^{1}$ (I2V) scenario where the goal is to generate a video of an object falling and potentially colliding with other objects on the ground, starting from an initial image of the object suspended in midair. We chose to study this single task, rather than general physics ability as a whole, because its simplicity allows us to conduct controlled experiments that yield insights into the strengths and limitations of the post-training process, which we believe will become an increasingly important component of research in generative world modeling. Additionally, the simplicity of the dropping task allows it to be implemented in simulation which is desirable because it allows us to easily test the properties of dataset scaling, gives us access to ground truth annotations for evaluation, and gives us the ability to precisely manipulate the simulation environment for controlled experimentation. + +![](images/19a56cf5e00b438278d859ed1f2a1e950888d74b178b001eab56b1dd9862790e.jpg) +Figure 1. Our PISA (Physics-Informed Simulation and Alignment) evaluation framework includes a new video dataset, where objects are dropped in a variety of real-world (left) and synthetic (right) scenes. For visualization purposes, we depict object motion by overlaying multiple video frames in each image shown above. Our real-world videos enable us to evaluate the physical accuracy of generated video output, and our synthetic videos enable us to improve accuracy through the use of post-training alignment methods. + +![](images/00bcbfd531744498cbd9c4457e5710d02b6c2db7bb28bac2414f98156285e998.jpg) + +Named after Galileo's famous dropping experiment, we introduce the PISA (Physics-Informed Simulation and Alignment) framework for studying physics post-training in the context of the dropping task. PISA includes new real and simulated video datasets, as shown in Figure 1, containing a diverse set of dropping scenarios. PISA also includes a set of task-specific metrics that focus on measuring physical accuracy. Our real-world videos and metrics enable us to evaluate the physical accuracy of generated video output, and our synthetic videos enable us to improve accuracy through a post-training process we introduce. + +Our study reveals that current state-of-the-art video generative models struggle significantly with the task of physically accurate object dropping. Generated objects frequently exhibit impossible behaviors, such as floating midair, defying gravity, or failing to preserve realistic trajectories during freefall. However, we find that simple fine-tuning can be remarkably effective: fine-tuning an open-source model on a small dataset of just a few thousand samples enables it to vastly outperform state-of-the-art models in physical accuracy. We further observe that pretrained models are critical for success; models initialized randomly, without leveraging pretraining on large-scale video datasets, fail to achieve comparable results. We also introduce a novel framework for reward modeling that yields further improvement. We demonstrate that our reward learning system is highly flexible in that different reward functions can be chosen to target different axes of physical improvement. + +Our analysis also reveals key limitations. First, we see that model performance degrades when tasked with scenarios outside the training distribution, such as objects dropping from unseen depths or heights. Additionally, while our post + +trained model generates object motion that is 3D-consistent and physically accurate, we observe misalignment between the generated and ground truth dropping time distribution. + +These findings indicate that post-training is likely to be an essential component of future world modeling systems. The challenges we identify in this relatively simple task are likely to persist when modeling more sophisticated physical phenomena. By introducing the PISA framework and benchmark, we provide a useful diagnostic tool for researchers to test whether models are on the path to acquiring general physical abilities, as well as identify key limitations that researchers should be aware of when integrating new capabilities into their models through post-training. + +# 2. Related Work + +Modeling Intuitive Physics. Intuitive physics refers to the innate or learned human capacity to make quick and accurate judgments about the physical properties and behaviors of objects in the world, such as their motion, stability, or interactions. This ability, present even in infancy (Spelke et al., 1992; Baillargeon, 2004; Battaglia et al., 2013), is crucial for navigating and understanding everyday life. Replicating intuitive physics is a foundational step toward creating systems that can interact effectively and safely in dynamic, real-world environments (Lake et al., 2017). Gravity, as a core component of intuitive physics, plays a pivotal role in both domains. It is one of the most universal and observable physical forces, shaping our expectations about object motion, stability, and interaction (Hamrick et al., 2016; Ullman et al., 2017). Many studies in cognitive science (Battaglia et al., 2013) and AI (Wu et al., 2015; Bear et al., 2021) have + +relied on physics engines to evaluate and model intuitive physics. Our work uses the Kubric engine (Greff et al., 2022) to generate training videos. + +Video Generation Models as World Simulators. Video generation has long been an intriguing topic in computer vision, particularly in the context of predicting future frames (Srivastava et al., 2015; Xue et al., 2016). More recently, as large-scale generative models have become prominent, Yang et al. explored how a wide range of real-world dynamics and decision-making processes can be expressed in terms of video modeling (Yang et al., 2024b; 2023). The introduction of the Sora model (OpenAI, 2024) marked a leap in the quality of generated videos and ignited interest in leveraging such models as "world simulators." Over the past year, numerous video generation models have emerged, some open-source (Zheng et al., 2024; Yang et al., 2024c; Jin et al., 2024; Agarwal et al., 2025) and others commercially available (Kuaishou, 2024; Luma, 2024; Runway, 2024; OpenAI, 2024). Related to our work, Kang et al. (Kang et al., 2024) study the extent to which video generation models learn generalizable laws of physics when trained on 2D data from a synthetic environment. + +Evaluating Video Generation Models. Traditional image-based metrics for generative modeling, such Fréchet inception distance (FID) (Heusel et al., 2017) or inception score (IS) (Salimans et al., 2016), can be incorporated into the video domain, either by applying them on a frame-by-frame basis or by developing video-specific versions, such as Fréchet video distance (FVD) (Unterthiner et al., 2018). Going beyond distribution matching measures, several benchmarks have developed suites of metrics that aim to better evaluate the semantic or visual quality of generated videos. For example, V-Bench (Huang et al., 2024) offers a more granular evaluation by measuring video quality across multiple dimensions, such as with respect to subject consistency or spatial relationships. In physics, some works, such as VideoPhy (Bansal et al., 2024) and PhyGenBench (Meng et al., 2024), evaluate in the T2V setting by utilizing multimodal large language models (MLLM) to generate a VQA-based score. More recently, Cosmos (Agarwal et al., 2025) and Physics-IQ (Motamed et al., 2025), evaluate physics in the image-to-video and video-to-video settings. + +# 3. PisaBench + +Our benchmark, PisaBench, examines the ability of video generative models to produce accurate physical phenomena by focusing on a straightforward dropping task. + +# 3.1. Task Definition & Assumptions + +Our task can be summarized as follows: given an image of an object suspended in midair, generate a video of the object + +![](images/1ac3de96698eccbe7ffd3f2cdeee9e6722a7a59125ffbd3af395c8b98701c5c7.jpg) +Figure 2. The setup for collecting real-world videos. + +falling and colliding with the ground and potentially other objects. Since a video is an incomplete partial observation of the 4D world, we make a number of assumptions to constrain the task space. These assumptions are crucial for ensuring that our metrics are reliable signals for physical accuracy, since they are only approximations of task success computed from a single ground truth and generated video. + +Specifically, we assume that the falling object is completely still in the initial frame, that only the force of gravity is acting on the object while it falls, and that the camera does not move. The first two assumptions are necessary for the image-to-video setting. Since we do not provide multiple frames as input, it is otherwise impossible to establish the initial velocity or acceleration of the falling object without these assumptions. The last assumption is necessary as our metrics derive from the motion of segmentation masks, which would be affected in the presence of camera motion. + +# 3.2. Real World Data + +![](images/75bc9f270274a876e0f2d9bb2c4307c38aafd436f796800aeee64fbd5f2d9b29.jpg) +Figure 3. Statistics of the real-world data: (a) number of objects in each video, (b) the proportions of different scenes in the videos. + +![](images/2a038e4df7899a3c6a7af3f168c48dd30504eb434167e140afe024da4ee58ef5.jpg) + +Real World Videos. We collect a set of 361 real-world videos demonstrating the dropping task for evaluation. As is shown in Figure 4, the dataset includes a diverse set + +![](images/7d4ceaab34fce33596b3dd1e9d4ba7a5b2fc93095d7bd082617fa0ffe00a3c57.jpg) +Figure 4. Examples of various objects included in our dataset. For simulation, we utilize the GSO dataset (Downs et al., 2022), while for the real-world dataset, we curate our own set of common household objects. + +of objects with different shapes and sizes, captured across various settings such as offices, kitchens, parks, and more (see Figure 3). Each video begins with an object suspended by an invisible wire in the first frame, which is necessary to enforce the assumption that objects are stationary at the start of the video. This assumption is required in our imaged-to-video setting; otherwise, the initial velocity of an object is ambiguous. We cut the video clips to begin as soon as the wire is released. We record the videos in slow-motion at 120 frames per second (fps) with cellphone cameras mounted on tripods to eliminate camera motion. An example of our video collection setup is shown in Figure 2. Additional details on our collection system are provided in Appendix H. + +Simulated Test Videos. Since our post-training process uses a dataset of simulated videos, we also create a simulation test-set of 60 videos for understanding sim2real transfer. We create two splits of 30 videos each: one featuring objects and backgrounds seen during training, and the other featuring unseen objects and backgrounds. See Section 4.1 for details on how our simulated data is created. + +Annotations. As is shown in Figure 5, we annotate each video with a caption and segmentation masks estimated from the SAM 2 (Ravi et al., 2024) video segmentation model. We create a descriptive caption for each object in the format of “{object description} falls.” This caption is used to provide context to the task when text input is supported. + +# 3.3. Metrics + +We propose three metrics to assess the accuracy of trajectories, shape fidelity, and object permanence. Each of our metrics compare frames from the ground-truth video with the generated video. Further details about the metrics, including + +![](images/fdf0c1240713a0f765f0563462d434ced74c2fc826cdce4f827d1a1a50939cc7.jpg) +Figure 5. Example of annotations in real-world data. For segmentation masks, we manually annotate first frame and utilize SAM 2 to produce segmentation masks across frames. For captions, we annotate “{object description} falls.” for all video segments. + +their formulas and our resampling procedure for accounting for differences in fps, is described in Appendix B. + +Trajectory L2. For each frame in both the generated video and ground truth, we calculate the centroid of the masked region. After doing this, we compute the average $L_{2}$ distance between the centroids of corresponding frames. + +Chamfer Distance (CD). To assess the shape fidelity of objects, we calculate the Chamfer Distance (CD) between the mask regions of the generated video and ground truth. + +Intersection over Union (IoU). We use the Intersection over Union (IoU) metric to evaluate object permanence. The IoU measures objects' degree of overlap between the generated video and ground truth. + +# 3.4. Evaluation Results + +We evaluate 4 open models including CogVideoX-5B-I2V(Yang et al., 2024c), DynamiCrafter(Xing et al., 2023), Pyramid-Flow(Jin et al., 2024), and Open-Sora-V1.2(Zheng et al., 2024), as well as 4 proprietary models including Sora (OpenAI, 2024), Kling-V1(Kuaishou, 2024), Kling-V1.5(Kuaishou, 2024), and Runway Gen3 (Runway, 2024). We also evaluate OpenSora post-trained through the processes of Supervised Fine-Tuning (PSFT) and Object Reward Optimization (ORO); see Section 4 for details. + +The results of running the baseline models on the benchmark indicate a consistent failure to generate physically accurate dropping behavior, despite the visual realism of their generated frames. Qualitatively, we see common failure cases in Figure 6, such as implausible object deformations, floating, hallucination of new objects, and unrealistic special + +![](images/342a3b4296640d6e8ed1d72350bfb1237311d4fb53b1dc49759aa08ed93e6617.jpg) +Figure 6. Qualitative comparison of results on real test set (row 1-2), simulated seen test set (row 3-4) and simulated unseen test set (row 5-6). We present the results of popular open-source and commercially available models alongside those of models fine-tuned through our method. Existing models often struggle to generate videos depicting objects falling, whereas our PSFT method effectively introduces knowledge of free-fall into the model. ORO enables the model to more accurately learn object motion and shape. + +effects. We further visualize a random subset of generated trajectories on the left of Figure 8. In many cases, the object remains completely static, and sometimes the object even moves upward. When downward motion is present, it is often slow or contains unrealistic horizontal movement. + +# 4. Physics Post-Training + +We present a post-training process to address the limitations of current models described in Section 3.4. We utilize simulated videos that demonstrate realistic dropping behavior. Our approach for post-training is inspired by the two-stage pipeline consisting of supervised fine-tuning followed by reward modeling commonly used in LLMs. We find that our pipeline improves performance on both real and simulated evaluations, with greater gains observed in simulation. This is due to the sim-to-real gap, though our approach still shows substantial gains in transferring to real-world data. + +# 4.1. Simulated Adaptation Data + +The first stage of our approach involves supervised fine-tuning. We use Kubric (Greff et al., 2022), a simulation and rendering engine designed for scalable video generation, to create simulated videos of objects dropping and colliding with other objects on the ground. Each video consists of 1-6 dropping objects onto a (possibly empty) pile of up to 4 objects underneath them. The videos are 2 seconds long, consisting of 32 frames at 16 fps. The objects are sourced from the Google Scanned Objects (GSO) dataset (Downs et al., 2022), which provides true-to-scale 3D models created from real-world scans across diverse categories (examples shown in Figure 4). The camera remains stationary in each video and is oriented parallel to the ground plane. To introduce variability, we randomly sample the camera height + +between 0.4 and 0.6 meters and position objects between 1 and 3 meters away from the camera, which corresponds to the distributions observed in the real-world dataset. More information about the dataset can be found in Appendix K. + +# 4.2. Physics Supervised Fine-Tuning (PSFT). + +![](images/9cb6168eacd785e4f741b3dbc66836cc1b13221d84cacac490669a7f161086a6.jpg) + +![](images/6adf02f646ee4da228177a9bb3d6b9810b033aeaac6c9f68056cd271eb9c47c2.jpg) + +![](images/91f342955c4abb85297c05b6fbf080e1ae798b04587e39e4104a2985d585e4d7.jpg) +Figure 7. Plots (a), (b), and (c) demonstrate that our metrics tend to improve with further training and that leveraging a pre-trained video diffusion model enhances performance compared to random initialization. In plot (d), the size of the training dataset varies in each training run (each consisting of 5k steps). With only 5k samples, we can achieve optimal results. + +![](images/a7f340f4c1bc1798d01d2bcfa3aac741d7c979c6b80afc4910bba5d647c78286.jpg) + +We use the pretrained Open-Sora v1.2 (Zheng et al., 2024) model as our base model and fine-tune it on our simulated video dataset. We employ Open-Sora v1.2's rectified flow training objective without modification (Liu et al., 2022). Each fine-tuning experiment is conducted with a batch size of 128 and a learning rate of $1\mathrm{e} - 4$ on two 80GB NVIDIA A100 GPUs. As shown in Figure 6, fine-tuning with this + +
MethodRealSim (Seen)Sim (Unseen)
L2 (↓)CD (↓)IoU (↑)L2 (↓)CD (↓)IoU (↑)L2 (↓)CD (↓)IoU (↑)
ProprietarySora (OpenAI, 2024)0.1740.4880.0650.1490.4460.0400.1400.4190.031
Kling-V1 (Kuaishou, 2024)0.1570.4250.0560.1420.4150.0320.1450.4370.028
Kling-V1.5 (Kuaishou, 2024)0.1550.4240.0580.1370.3960.0330.1320.4050.029
Runway Gen3 (Runway, 2024)0.1870.5260.0420.1700.5090.0400.1490.4600.038
OpenCogVideoX-5B-I2V (Yang et al., 2024c)0.1380.3660.0800.1120.3150.0200.1010.2900.020
DynamiCrafter (Xing et al., 2023)0.1870.5040.0210.1570.4850.0390.1360.4300.033
Pyramid-Flow (Jin et al., 2024)0.1750.4850.0620.1260.3520.0590.1300.3810.048
Open-Sora (Zheng et al., 2024)0.1750.5020.0690.1390.4090.0360.1300.3680.034
OursOpen-Sora + PSFT (base)0.0760.1880.1390.0360.0880.1650.0280.0580.129
base + ORO (Seg)0.0750.1830.1420.0330.0760.1700.0320.0630.145
base + ORO (Flow)0.0670.1640.1360.0260.0620.1220.0220.0450.071
base + ORO (Depth)0.0670.1590.1290.0310.0720.1240.0220.0460.096
+ +Table 1. PisaBench Evaluation Results. This table compares the performance of four proprietary models, four open models, and the models fine-tuned with PSFT and $\mathrm{PSFT + ORO}$ on our real-world and simulated test set which is decomposed into seen and unseen object splits. Across all metrics, our PSFT models outperform all other baselines, including proprietary models like Sora. Reward modeling further enhances results, with segmentation rewards improving the shape-based IoU metric and optical rewards and depth rewards enhancing the motion-based L2 and CD metrics. This suggests that rewards can be flexibly adjusted to target specific aspects of performance. + +data alone is sufficient to induce realistic dropping behavior in the model. Quantitatively, our PSFT model substantially improves on both our simulated and real-world benchmark, as shown in Table 1. Dataset size. We conduct an ablation study on the number of training samples to understand the amount of data required for optimal performance on our benchmark. We create random subsets from 500 to 20,000 samples and train our model for 5,000 gradient steps on each subset. Notably, as shown in Figure 7, only 5,000 samples are needed to achieve optimal results. Effect of pretraining. Additionally, we investigate the impact of Open-Sora's pre-training on adaptation. We randomly initialize the Open-Sora's denoising network while keeping the pre-trained initialization of the compressor network and train the model on a dataset of 5k training samples. As shown in Figure 8, the learned knowledge from Open-Sora's pretraining plays a critical role in our task. + +Overall, using PSFT on only 5k samples is sufficient to push Open-Sora's performance past all other evaluated models, including state-of-the-art commercial video generators, by a wide margin. This is made possible by leveraging the knowledge from the sufficiently pre-trained base model. + +# 4.3. Object Reward Optimization (ORO) + +In the second stage, we propose Object Reward Optimization (ORO) to use reward gradients to guide the video generation model toward generating videos where the object's motion and shape more closely align with the ground truth. + +![](images/2b0b1b1edbe72353a808756a9abec58b083266dae8f49a22cf820119f4e51b9d.jpg) +Figure 8. On the left, we plot random trajectories from the baseline models in Table 1. On the right, we show random trajectories from our fine-tuned model. The baseline trajectories exhibit unrealistic behavior, and most of them stay completely static. On the right, we see the trajectories consistently falling downward with collision and rolling behavior being modeled after the point of contact. + +We follow the VADER framework from (Prabhudesai et al., 2024) and introduce three reward models. The differences between our approach and VADER include: (1) our reward model utilizes both generated videos and ground truth instead of generated videos and conditioning. (2) gradients propagate through all denoising time steps in fine-tuning. Consequently, the VADER objective is modified as follows: + +$$ +J (\theta) = \mathbb {E} _ {\left(x _ {0}, c\right) \sim \mathcal {D}, x _ {0} ^ {\prime} \sim p _ {\theta} \left(x _ {0} ^ {\prime} \mid c\right)} \left[ R \left(x _ {0} ^ {\prime}, x _ {0}\right) \right] \tag {1} +$$ + +where $\mathcal{D}$ is the ground truth dataset, $p_{\theta}(.)$ is a given video diffusion model, $x_0^{\prime}, x_0 \in \mathbb{R}^{H \times W \times 3}$ are generated video and ground truth, and $c \in \mathbb{R}^{H \times W \times 3}$ is the initial image. + +Segmentation Reward. We utilize SAM 2 (Ravi et al., + +2024) to generate segmentation masks across frames for generated videos. We define segmentation reward as the IoU between the dropping object's mask in generated video and the mask from the ground truth simulated segmentation. + +Optical Flow Reward. We utilize RAFT (Teed & Deng, 2020) to generate generated video's optical flow $V^{\mathrm{gen}}$ and ground truth's optical flow $V^{\mathrm{gt}}$ . We define the optical flow reward as $R(x_0', x_0) = -|V^{\mathrm{gen}} - V^{\mathrm{gt}}|$ . + +Depth Reward. We utilize Depth-Anything-V2 (Yang et al., 2024a) to generate generated video's depth map $D^{\mathrm{gen}}$ and ground truth's depth map $D^{\mathrm{gt}}$ . We define the optical flow reward as $R(x_0', x_0) = -|D^{\mathrm{gen}} - D^{\mathrm{gt}}|$ . + +Details on implementation can be found in Appendix C. + +We begin from the checkpoint of the first stage, which is trained on 5,000 samples trained over 5,000 gradient steps. We then fine-tune the model with ORO on the simulated dataset, using a batch size of 1 and two 80GB NVIDIA A100 GPUs for each fine-tuning experiment. We set a learning rate of $1\mathrm{e} - 6$ for segmentation reward and depth reward and $1\mathrm{e} - 5$ for optical flow. + +As shown in Table 1, incorporating ORO in reward modeling further improves performance. Additionally, each reward function enhances the aspect of physicality that aligns with its intended purpose—segmentation rewards improve shape accuracy, while flow rewards and depth rewards improve motion accuracy. This demonstrates the process is both modular and interpretable. + +# 5. Assessing Learned Physical Behavior + +Having introduced our post-training approaches in Section 4, we probe into the model's understanding of the interaction between gravity and perspective—the two laws that determine the dynamics of our videos. We first test if the learned physical behavior of our model can generalize to dropping heights and depths beyond its training distribution. Then, we study the ability of the model to learn the probability distribution induced by the uncertainty of perspective. + +# 5.1. Generalization to Unseen Depths and Heights + +Depth and height are the main factors that affect the dynamics of a falling object in our videos. We can see this by combining the laws of gravity with perspective under our camera assumptions to model the object's image $y$ coordinate as a function of time (further details on our coordinate system are described in Appendix G): + +$$ +y (t) = \frac {f}{Z} \left(Y _ {0} - \frac {1}{2} g t ^ {2}\right). \tag {2} +$$ + +From Equation (2), we see that the random variables that af + +fect object motion are $Z$ (depth) and $Y$ (height) (the camera focal length $f$ is fixed). Thus, we are interested in testing generalization on unseen values of $Y$ and $Z$ . + +We create a simulated test set in which a single object is dropped from varying depths and heights, using objects and backgrounds unseen during training. We uniformly sample depth and height values (in meters) from the Cartesian product of the ranges [1, 5] and [0.5, 2.5], respectively. The camera height is fixed at $0.5m$ , and depth-height pairs outside the camera viewing frustum are discarded. A sample is in-distribution (ID) if its dropping depth and height both fall in the range [1, 3] and [0.5, 1.5]. + +Since we have access to the ground truth dropping time in simulation, we also employ a dropping time error, a metric we describe in Appendix B. Our analysis in Table 2 shows that performance degrades for out-of-distribution scenarios. + +Since depth and height are the main physical quantities that affect falling dynamics, this finding indicates that our model may struggle to learn a fully generalizable law that accounts for the interaction of perspective and gravity. + +
SettingL2 (↓)Chamfer (↓)IOU (↑)Time Error (↓)
ID0.0360.0880.1550.091
OOD0.0440.1430.0490.187
+ +Table 2. Results of our metrics on in-distribution (ID) and out-of-distribution (OOD) depth-height combinations. The values used for depth range from $1 - 5\mathrm{m}$ (ID range [1,3]) and height values range from 0.5-2.5 (ID range [0.5, 1.5]). + +# 5.2. Distributional Analysis + +![](images/173d344ac811372831211d71e1f089dcd0ee90fa7170fbcfc6b6036661f678d4.jpg) +Figure 9. Demonstration of ambiguity in 2D perspective projections. Each of the three clouds appears the exact same in the camera's image. The right side shows how we perform a scale and translation augmentation to generate deliberately ambiguous data. + +![](images/dcf5758aab2710279523e38fe46165467a3e180eeef0ed0b3050233202412b9e.jpg) + +The evolution of a physical system is not uniquely determined by a single initial image, since the lossy uncertainty of perspective induces a distribution of possible outcomes as shown in Figure 9. An ideal video world model should (1) output videos that are faithful to the evolution of some plausible world state and (2) provide accurate coverage across the entire distribution of the world that is possible from + +its conditioning signal. In this section, we examine these two facets by studying $p(t|y)$ : the distribution of dropping times possible from an object at coordinate $y$ in the image plane. To do this, we create a simulated dataset that has a much wider distribution $p(t|y)$ than our PSFT dataset. See Appendix F for more details on its construction. + +![](images/73a89a0c5b340af83339e7bdee6111a2850a250f52d7a51435656c3c43642fe2.jpg) +Figure 10. Examples of model trajectories lifted to 3D. The blue line represents the height of the camera ray passing through the bottom of the dropping object as a function of depth. The set of possible dropping trajectories at a given depth are depicted in gray. The lifted trajectory of the model is depicted in green. + +![](images/8862d09f5523d53e73a41fca13423e9ac96d265eb77cc7788176d5f31c7abc8a.jpg) +Figure 11. Visualizing $p(t|y)$ misalignment for different images. Green shows the ground-truth CDF, orange is the 32-frame quantized version, and blue is the empirical CDF of 128 different samples of dropping times from the model. + +# Testing (1): 3D faithfulness of trajectories. + +After training our model on this new dataset, we test whether its trajectories are consistent with a valid 3D world state. We first obtain an estimated dropping time from generated videos using the procedure described in Section 5.1. Using knowledge of the camera position, focal length, sensor width, and $y$ , we can obtain an implied depth and height of the trajectory. We can then back-project the video trajectory to 3D and analyze whether they constitute physically accurate trajectories. We give further details about this process in Appendix G. As show in Figure 10, we find that our model's lifted trajectories consistently align with the 3D trajectory at the height and depth implied by its dropping time, giving evidence that the model's visual outputs are faithful to some plausible real-world state. + +# Testing (2): distributional alignment. + +Going beyond the level of individual trajectories, we study the model's learned conditional distribution $p(t|y)$ . We + +create 50 different initial images with differing values of $y$ , generate 128 different videos from each, and estimate the dropping time in each video. Using the laws of gravity, the laws of perspective, and the assumption of uniform depth sampling in our dataset, we can analytically derive the probability $p(t|y)$ as + +$$ +p (t | y) = \left\{ \begin{array}{l l} \frac {g t}{\left(Z _ {\max } - Z _ {\min }\right) \beta}, & t _ {\min } \leq t \leq t _ {\max } \\ 0, & \text {o t h e r w i s e} \end{array} \right. \tag {3} +$$ + +where $\beta$ is a constant that depends on $f$ , $y$ and the camera height. The derivation is given in Appendix E. + +We then measure goodness-of-fit for each of the 50 experiments using the Kolmogorov-Smirnov (KS) test (Massey Jr, 1951). The null hypothesis of the KS test is that the two distributions being compared are equal, and we consider p-values less than 0.05 as evidence of misalignment. Since our measured times have limited precision and can only take 32 distinct values—due to estimating the contact frame—we approximate the ground truth $p(t|y)$ using a Monte Carlo method. We sample 1000 values from the ground truth distribution and then quantized them into 32 bins corresponding to their frame, which we use as ground truth observations in the KS test. We find that in all 50/50 cases, the p-value from the test is less than 0.05, which provides evidence that the model does not learn the correct distribution of dropping times. We visualize the misalignment between the empirical CDF of the model's in Figure 11. + +In summary, while our model's trajectories show promising tendencies to ground themselves to plausible 3D world states, the range of possible outputs from the model does not align with the ground truth distribution. + +# 6. Conclusion + +This work studies post-training as an avenue for adapting adapting pre-trained video generator into world models. We introduce a post-training strategy that is highly effective in aligning our model. Our work raises interesting insights into the learned distributions of generative models. Qualitatively, large scale image or video generative models appear to excel at generating likely samples from the data distribution, but this alone does not imply that they match the data distribution well in its entirety. As long as a model is able to generate likely samples, global distributional misalignment is not necessarily a problem for content creation. However, this problem becomes critical for world models, where alignment across the entire distribution is necessary for faithful world simulation. The insights revealed by our study, made possible by our constrained and tractable setting, indicate that although post-training improves per-sample accuracy, general distributional alignment remains unsolved. + +# Acknowledgment + +We thank Boyang Zheng, Srivats Poddar, Ellis Brown, Shengbang Tong, Shusheng Yang, Jihan Yang, Daohan Lu, Anjali Gupta and Ziteng Wang for their help with data collection. We thank Jiraphon Yenphraphai for valuable assistance in setting up our simulation code. We thank Runway and Kling AI for providing API credit. SX also acknowledges support from Intel AI SRS, Korean AI Research Hub, Open Path AI Foundation, Amazon Research Award, Google TRC program, and NSF Award IIS-2443404. + +# References + +Agarwal, N., Ali, A., Bala, M., Balaji, Y., Barker, E., Cai, T., Chattopadhyay, P., Chen, Y., Cui, Y., Ding, Y., et al. Cosmos world foundation model platform for physical AI. arXiv preprint arXiv:2501.03575, 2025. +Baillargeon, R. Infants' physical world. Current directions in psychological science, 13(3):89-94, 2004. +Bansal, H., Lin, Z., Xie, T., Zong, Z., Yarom, M., Bitton, Y., Jiang, C., Sun, Y., Chang, K.-W., and Grover, A. Videophy: Evaluating physical commonsense for video generation. arXiv preprint arXiv:2406.03520, 2024. +Battaglia, P. W., Hamrick, J. B., and Tenenbaum, J. B. Simulation as an engine of physical scene understanding. Proceedings of the National Academy of Sciences, 110 (45):18327-18332, 2013. +Bear, D. M., Wang, E., Mrowca, D., Binder, F. J., Tung, H.-Y. F., Pramod, R., Holdaway, C., Tao, S., Smith, K., Sun, F.-Y., et al. Physion: Evaluating physical prediction from vision in humans and machines. arXiv preprint arXiv:2106.08261, 2021. +Community, B. O. Blender - a 3d modelling and rendering package, 2018. URL http://www.blender.org. +Coumans, E. et al. Bullet physics engine. Open Source Software: http://bulletphysics.org, 1(3):84, 2010. +Craik, K. J. W. The nature of explanation, volume 445. CUP Archive, 1967. +Downs, L., Francis, A., Koenig, N., Kinman, B., Hickman, R., Reymann, K., McHugh, T. B., and Vanhoucke, V. Google scanned objects: A high-quality dataset of 3d scanned household items. In 2022 International Conference on Robotics and Automation (ICRA), pp. 2553-2560. IEEE, 2022. +Greff, K., Belletti, F., Beyer, L., Doersch, C., Du, Y., Duckworth, D., Fleet, D. J., Gnanapragasam, D., Golemo, F., Herrmann, C., et al. Kubric: A scalable dataset generator. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 3749-3761, 2022. + +Ha, D. and Schmidhuber, J. Recurrent world models facilitate policy evolution. Advances in neural information processing systems, 31, 2018. +Hafner, D., Lillicrap, T., Ba, J., and Norouzi, M. Dream to control: Learning behaviors by latent imagination. arXiv preprint arXiv:1912.01603, 2019. +Hafner, D., Pasukonis, J., Ba, J., and Lillicrap, T. Mastering diverse domains through world models. arXiv preprint arXiv:2301.04104, 2023. +Hamrick, J. B., Battaglia, P. W., Griffiths, T. L., and Tenenbaum, J. B. Inferring mass in complex scenes by mental simulation. Cognition, 157:61-76, 2016. +Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., and Hochreiter, S. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. +Huang, Z., He, Y., Yu, J., Zhang, F., Si, C., Jiang, Y., Zhang, Y., Wu, T., Jin, Q., Chanpaisit, N., et al. Vbench: Comprehensive benchmark suite for video generative models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 21807-21818, 2024. +Jin, Y., Sun, Z., Li, N., Xu, K., Jiang, H., Zhuang, N., Huang, Q., Song, Y., Mu, Y., and Lin, Z. Pyramidal flow matching for efficient video generative modeling. arXiv preprint arXiv:2410.05954, 2024. +Kang, B., Yue, Y., Lu, R., Lin, Z., Zhao, Y., Wang, K., Huang, G., and Feng, J. How far is video generation from world model: A physical law perspective. arXiv preprint arXiv:2411.02385, 2024. +Kuaishou. Kling, 2024. URL https://kling.kuaishou.com. Accessed: 2024. +Lake, B. M., Ullman, T. D., Tenenbaum, J. B., and Gershman, S. J. Building machines that learn and think like people. Behavioral and brain sciences, 40:e253, 2017. +LeCun, Y. A path towards autonomous machine intelligence version 0.9.2, 2022-06-27. Open Review, 62(1):1-62, 2022. +Liu, X., Gong, C., and Liu, Q. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022. +Luma. Dream machine, 2024. URL https://lumalabs.ai/dream-machine. Accessed: 2024. +Massey Jr, F. J. The kolmogorov-smirnov test for goodness of fit. Journal of the American statistical Association, 46 (253):68-78, 1951. + +Meng, F., Liao, J., Tan, X., Shao, W., Lu, Q., Zhang, K., Cheng, Y., Li, D., Qiao, Y., and Luo, P. Towards world simulator: Crafting physical commonsense-based benchmark for video generation. arXiv preprint arXiv:2410.05363, 2024. +Motamed, S., Culp, L., Swersky, K., Jaini, P., and Geirhos, R. Do generative video models learn physical principles from watching videos? arXiv preprint arXiv:2501.09038, 2025. +NBC. Coca-Cola causes controversy with ai-made ad, 2025. Accessed: 2025-01-17. +OpenAI. Sora, 2024. URL https://sora.com. Accessed: 2024. +Prabhudesai, M., Mendonca, R., Qin, Z., Fragkiadaki, K., and Pathak, D. Video diffusion alignment via reward gradients. arXiv preprint arXiv:2407.08737, 2024. +Ravi, N., Gabeur, V., Hu, Y.-T., Hu, R., Ryali, C., Ma, T., Khedr, H., Rädle, R., Rolland, C., Gustafson, L., Mintun, E., Pan, J., Alwala, K. V., Carion, N., Wu, C.-Y., Girshick, R., Dollár, P., and Feichtenhofer, C. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024. URL https://arxiv.org/abs/2408.00714. +Runway. Gen-3 alpha, 2024. URL https://runwayml.com/research/introducing-gen-3alpha. Accessed: 2024. +Runway. AIFF 2025: AI Film Festival, 2025. URL https://aiff.runwayml.com/. Accessed: 2025-01-17. +Salimans, T., Goodfellow, I., Zaremba, W., Cheung, V., Radford, A., and Chen, X. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. +Spelke, E. S., Breinlinger, K., Macomber, J., and Jacobson, K. Origins of knowledge. Psychological review, 99(4): 605, 1992. +Srivastava, N., Mansimov, E., and Salakhudinov, R. Unsupervised learning of video representations using lstms. In International conference on machine learning, pp. 843-852. PMLR, 2015. +Teed, Z. and Deng, J. Raft: Recurrent all-pairs field transforms for optical flow. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part II 16, pp. 402-419. Springer, 2020. +Ullman, T. D., Spelke, E., Battaglia, P., and Tenenbaum, J. B. Mind games: Game engines as an architecture for + +intuitive physics. Trends in cognitive sciences, 21(9): 649-665, 2017. +Unterthiner, T., Van Steenkiste, S., Kurach, K., Marinier, R., Michalski, M., and Gelly, S. Towards accurate generative models of video: A new metric & challenges. arXiv preprint arXiv:1812.01717, 2018. +Wu, J., Yildirim, I., Lim, J. J., Freeman, B., and Tenenbaum, J. Galileo: Perceiving physical object properties by integrating a physics engine with deep learning. Advances in neural information processing systems, 28, 2015. +Xing, J., Xia, M., Zhang, Y., Chen, H., Yu, W., Liu, H., Wang, X., Wong, T.-T., and Shan, Y. Dynamiccafter: Animating open-domain images with video diffusion priors. arXiv preprint arXiv:2310.12190, 2023. +Xue, T., Wu, J., Bouman, K., and Freeman, B. Visual dynamics: Probabilistic future frame synthesis via cross convolutional networks. Advances in neural information processing systems, 29, 2016. +Yang, L., Kang, B., Huang, Z., Zhao, Z., Xu, X., Feng, J., and Zhao, H. Depth anything v2. arXiv:2406.09414, 2024a. +Yang, M., Du, Y., Ghasemipour, K., Tompson, J., Schuurmans, D., and Abbeel, P. Learning interactive real-world simulators. arXiv preprint arXiv:2310.06114, 2023. +Yang, S., Walker, J., Parker-Holder, J., Du, Y., Bruce, J., Barreto, A., Abbeel, P., and Schuurmans, D. Video as the new language for real-world decision making. arXiv preprint arXiv:2402.17139, 2024b. +Yang, Z., Teng, J., Zheng, W., Ding, M., Huang, S., Xu, J., Yang, Y., Hong, W., Zhang, X., Feng, G., et al. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072, 2024c. +Zheng, Z., Peng, X., Yang, T., Shen, C., Li, S., Liu, H., Zhou, Y., Li, T., and You, Y. Open-sora: Democratizing efficient video production for all, March 2024. URL https://github.com/hpcaitech/Open-Sora. + +# A. Discussion of Image-to-Video setting. + +We note that our choice of single-image input, as opposed to multi-frame input, comes with some trade-offs. We choose the image-to-video setting because it is widely supported among many different models, allowing us to make effective comparisons across the current state-of-the-art. However, only conditioning on a single frame introduces significant ambiguity. Due to the loss of information caused by projecting the 3D world through perspective, it may not be possible to directly infer the size of the object or its height. In practice, we find our metrics are still reliable signals of task success, but we still study the problem of ambiguity more extensively in Section 5.2. + +# B. Metric details. + +We propose three metrics to assess the accuracy of trajectories, shape fidelity, and object permanence. Each of our metrics compare frames from the ground-truth video with the generated video. Because different models can operate at different fps, we perform fps alignment as part of our evaluation process. To perform fps alignment, we map each frame index of the generated videos to the ground truth using $f_{\mathrm{gen}}$ and $f_{\mathrm{gt}}$ , where $f_{\mathrm{gen}}$ and $f_{\mathrm{gt}}$ are the fps of generated video and ground truth respectively. For $i$ -th frame in the generated video, we find the corresponding aligned frame index $j$ in the ground truth video: + +$$ +j = \operatorname {r o u n d} \left(i \cdot \frac {f _ {\text {g e n}}}{f _ {\mathrm {g t}}}\right) \tag {4} +$$ + +Through fps alignment, we downsample the ground truth video to match the frame number of the generated video. We denote the downsampled ground truth as $\{I_i^{\mathrm{gt}}\}_{i = 1}^N$ and the generated video as $\{I_i^{\mathrm{gen}}\}_{i = 1}^N$ , where $N$ is the number of frames in the generated video. + +Trajectory L2. For each frame in both the generated video and ground truth, we calculate the centroid of the masked region. We then compute $L_{2}$ distance between the centroids of corresponding frames: + +$$ +L _ {2} = \frac {1}{N} \sum_ {i = 1} ^ {N} \| C _ {i} ^ {\text {g e n}} - C _ {i} ^ {\mathrm {g t}} \| _ {2} \tag {5} +$$ + +where $C_i^{\mathrm{gen}}, C_i^{\mathrm{gt}} \in \mathbb{R}^2$ are the centroids of the dropping object in the $i$ -th frame of generated video and the ground truth respectively. + +Chamfer Distance (CD). To assess the shape fidelity of objects, we calculate the Chamfer Distance (CD) between the mask regions of the generated video and ground truth: + +$$ +\mathrm {C D} = \frac {1}{N} \sum_ {i = 1} ^ {N} \left(\frac {1}{| P _ {i} |} \sum_ {p \in P _ {i}} \min _ {q \in Q _ {i}} \| p - q \| _ {2} + \frac {1}{| Q _ {i} |} \sum_ {q \in Q _ {i}} \min _ {p \in P _ {i}} \| q - p \| _ {2}\right) +$$ + +where $P_{i} = \{p_{j}\}_{j = 1}^{|P_{i}|}$ and $Q_{i} = \{q_{j}\}_{j = 1}^{|Q_{i}|}$ are the sets of mask points in the $i$ -th frame of the generated video and ground truth respectively. + +Intersection over Union (IoU). We use the Intersection over Union (IoU) metric to evaluate object permanence. IoU measures objects' degree of overlap between the generated video and ground truth. This is formulated as follows: + +$$ +\mathrm {I o U} = \frac {1}{| N |} \sum_ {i = 1} ^ {N} \frac {\left| M _ {i} ^ {\text {g e n}} \cap M _ {i} ^ {\mathrm {g t}} \right|}{\left| M _ {i} ^ {\text {g e n}} \cup M _ {i} ^ {\mathrm {g t}} \right|} \tag {6} +$$ + +where $M_{i}^{\mathrm{gen}}$ , $M_{i}^{\mathrm{gt}} \in \{0,1\}^{H\times W}$ are binary segmentation masks of the falling object in the $i$ -th frame of the generated and ground truth videos respectively. + +Time error. When testing on videos generated in simulation, we can provide a timing error. From the dropping height $Y_{0}$ of the ground truth video, which we have access to from the simulator, we can derive $t_{\mathrm{drop}} = \sqrt{Y_0\frac{2}{g}}$ . We then obtain a dropping time from the model's output by estimating the frame of impact as the first frame $F$ whose centroid velocity in the $y$ direction is negative. If $t_{\mathrm{drop}}$ occurs in between $F$ and $F - 1$ , then we define the time error $E_{\mathrm{time}}$ as zero. Otherwise, we define the time error as + +$$ +E _ {\text {t i m e}} = \min \left(\left| \frac {F - 1}{\mathrm {f p s}} - t _ {\text {d r o p}} \right|, \left| \frac {F}{\mathrm {f p s}} - t _ {\text {d r o p}} \right|\right). \tag {7} +$$ + +# C. ORO implementation details. + +In our setting, we do not cut the gradient after step $k$ like VADER. The gradient $\nabla_{\theta}R(x_0',x_0)$ backpropagates through all diffusion timesteps and update the model weights $\theta$ : + +$$ +\nabla_ {\theta} \left(R \left(x _ {0} ^ {\prime}, x _ {0}\right)\right) = \sum_ {t = 0} ^ {T} \frac {\partial R \left(x _ {0} ^ {\prime} , x _ {0}\right)}{\partial x _ {t}} \cdot \frac {\partial x _ {t}}{\partial \theta} \tag {8} +$$ + +where $T$ is the total diffusion timesteps. + +Segmentation Reward. We utilize SAM 2 (Ravi et al., 2024) to generate segmentation masks across frames for generated video: + +$$ +M ^ {\text {g e n}} = \operatorname {S A M} - 2 \left(x _ {0}\right) \tag {9} +$$ + +where $M^{\mathrm{gen}}$ denotes the masks of the falling object in the generated video. We obtain ground truth masks $M^{\mathrm{gt}}$ using Kubric (Greff et al., 2022). To avoid non-differentiable reward, we use Sigmoid to normalize mask logits of generated video instead of converting them to binary masks. We use IoU between $M^{\mathrm{gen}}$ and $M^{\mathrm{gt}}$ as reward function: + +$$ +R \left(x _ {0} ^ {\prime}, x _ {0}\right) = \operatorname {I o U} \left(M ^ {\text {g e n}}, M ^ {\text {g t}}\right) \tag {10} +$$ + +Maximizing objective 1 is equivalent to minimizing the following objective: + +$$ +J (\theta) = \mathbb {E} _ {\left(x _ {0}, c\right) \sim \mathcal {D}, x _ {0} ^ {\prime} \sim p _ {\theta} \left(x _ {0} ^ {\prime} \mid c\right)} \left[ 1 - \operatorname {I o U} \left(M ^ {\text {g e n}}, M ^ {\text {g t}}\right) \right] \tag {11} +$$ + +This objective constrains the position and shape of the generated object in the video, encouraging a greater intersection with the object region in the ground truth video. The model learns to generate more accurate object positions and shapes through training with this objective. + +Optical Flow Reward. We utilize RAFT (Teed & Deng, 2020) to generate optical flow for both generated videos and ground truth: + +$$ +V ^ {\text {g e n}} = \operatorname {R A F T} \left(x _ {0} ^ {\prime}\right) \tag {12} +$$ + +$$ +V ^ {\mathrm {g t}} = \operatorname {R A F T} (x _ {0}) +$$ + +where $V^{\mathrm{gen}}$ , $V^{\mathrm{gt}}$ denote the optical flows of generated videos and ground truth. We define the reward as follows: + +$$ +R \left(x _ {0} ^ {\prime}, x _ {0}\right) = - \left| V ^ {\text {g e n}} - V ^ {\text {g t}} \right| \tag {13} +$$ + +Maximizing objective 1 is equivalent to minimizing the following objective: + +$$ +J (\theta) = \mathbb {E} _ {\left(x _ {0}, c\right) \sim \mathcal {D}, x _ {0} ^ {\prime} \sim p _ {\theta} \left(x _ {0} ^ {\prime} \mid c\right)} \left[ \left| V ^ {\text {g e n}} - V ^ {\text {g t}} \right| \right] \tag {14} +$$ + +This objective constrains the motion of the generated object in the video. The model learns to generate more accurate physical motion through training with this objective. + +Depth Reward. We utilize Depth-Anything-V2 (Yang et al., 2024a) to generate optical depth maps for both generated videos and ground truth: + +$$ +D ^ {\text {g e n}} = \text {D e p t h - A n y t h i n g - V 2} \left(x _ {0} ^ {\prime}\right) \tag {15} +$$ + +$$ +D ^ {\mathrm {g t}} = \text {D e p t h - A n y t h i n g - V 2} (x _ {0}) +$$ + +where $D^{\mathrm{gen}}$ , $D^{\mathrm{gt}}$ denote the depth maps of generated videos and ground truth. We define the reward as follows: + +$$ +R \left(x _ {0} ^ {\prime}, x _ {0}\right) = - \left| D ^ {\text {g e n}} - D ^ {\mathrm {g t}} \right| \tag {16} +$$ + +Maximizing objective 1 is equivalent to minimizing the following objective: + +$$ +J (\theta) = \mathbb {E} _ {\left(x _ {0}, c\right) \sim \mathcal {D}, x _ {0} ^ {\prime} \sim p _ {\theta} \left(x _ {0} ^ {\prime} \mid c\right)} \left[ \left| D ^ {\mathrm {g e n}} - D ^ {\mathrm {g t}} \right| \right] \tag {17} +$$ + +This objective constrains the 3d motion of the generated object in the video. The model learns to generate more accurate 3d physical motion through training with this objective. + +# D. Coordinate system + +We give a visualization of the coordinate system used in this paper in Figure 12. To compute $y$ , we first leverage a segmentation map and find pixel row index that is just below the object. Once this row index is found, $y$ can easily be computed from the camera position, camera sensor size, and image resolution. We note that because our camera is assumed to be in perspective with the $XY$ plane, we can ignore $X$ and $x$ (not shown in figure) in our analyses in Section 5.1 and Section 5.2. + +![](images/afd6a41eb9e3a6bfc87ee7d008d6fc6d00d3b71c75ccf91adf12e6a11f200f01.jpg) +Figure 12. A visualization of the coordinate system used in this paper (not to scale). The image plane height of the object is denoted as $y$ , its actual height in 3D as $Y$ , and its depth as $Z$ . The camera focal length is denoted as $f$ . + +![](images/0492c7160061c2167d23b2371de275bf324606389678afb1b4fcc06512b614c9.jpg) + +# E. Derivation of $p(t|y)$ + +In our dataset construction, we assume a uniform distribution for $Z$ , where $Z \sim \mathcal{U}(Z_{\min}, Z_{\max})$ , where $Z_{\min} = 2$ and $Z_{\max} = 18$ . As shown in Figure 12, the dropping height $Y$ is a linear function of $Z$ , i.e. $Y = y + \beta Z$ for the slope $\beta$ that can be computed from $y, f$ , the sensor size, and the camera height. This means we can solve for dropping time as $t = \sqrt{\frac{2}{g}Y} = \sqrt{\frac{2}{g}(y + \beta Z)}$ . Applying the transformation rule for probability density yields + +$$ +p (t | y) = \left\{ \begin{array}{l l} \frac {g t}{\left(Z _ {\max } - Z _ {\min }\right) \beta}, & t _ {\min } \leq t \leq t _ {\max } \\ 0, & \text {o t h e r w i s e} \end{array} \right. \tag {18} +$$ + +where $t_{\mathrm{min}} = \sqrt{\frac{2}{g} (y + \beta Z_{\mathrm{min}})}$ and $t_{\mathrm{max}} = \sqrt{\frac{2}{g} (y + \beta Z_{\mathrm{max}})}$ . Plugging in $Z_{\mathrm{min}} = 2$ and $Z_{\mathrm{max}} = 18$ yields Equation (3). + +# F. Ambiguous dataset + +We introduce a new dataset for distributional analysis that broadens $p(t|y)$ , in contrast to the PSFT dataset, which prioritizes realism and has a narrower distribution due to limited object depth variability. To create a dataset with $p(t|y)$ that is sufficiently diverse for meaningful analysis, we first set up the initial scenes as before, but then apply an augmentation where a new depth values is sampled uniformly from [2, 18] and the object is scaled and translated such that it appears the same in the original image, as shown in Figure 9. For simplicity, we limit our scenes to a single dropping object with no other objects on the ground. We also disable shadows, preventing the model from using them as cues to infer depth and height. Our dataset contains 5k samples consisting of 1k unique initial scenes each containing 5 different trajectories produced by the augmentation. + +# G. Lifting trajectories to 3D + +To lift trajectories to 3D, we first estimate $t_{\mathrm{drop}}$ as described in Section 5.1. Using SAM2 to estimate object masks in the generated video, we can obtain a trajectory of the bottom of the object which we denote as $y_0, y_1, \ldots, y_N$ where + +$N = t_{\mathrm{drop}} \times \mathrm{fps}$ . From $t_{\mathrm{drop}}$ , we can solve for an implied depth $Z = \frac{\frac{1}{2}gt^2 - y}{\beta}$ . We then compute the lifted 3D trajectory as $y_i \mapsto y_i + \beta Z$ + +# H. PisaBench Details + +In this section, we discuss the details of our data collection pipeline and annotations. We present more examples of real-world videos and corresponding annotations in Figure 13. + +# H.1. Data Collection Pipeline + +Collecting Real World Videos. We enlist approximately 15 volunteers to participate in the data collection process. We hand out a tripod, tape, and invisible wire for each volunteer. To ensure the quality, diversity, and minimize the ambiguity introduced by the environments, volunteers are provided with detailed guidelines. The key points of the data collection guidelines are shown in Table 3. + +Raw videos processing. For the collected raw videos, we cut each video into multiple clips and crop their sizes. For each video clip, we annotate its starting position in the original long video and ensure that the duration of each segment does not exceed 12 seconds. Regarding the sizes of the videos, we manually crop each video to an aspect ratio of $1:1$ , ensuring that the falling objects remain fully visible within the frame during the cropping process. The processing interface is shown in Figure 14. + +# H.2. Annotation Details + +We present our annotation details in Figure 15. For video captions, we present the word cloud figure in (a). For segmentation masks, we annotate all objects in the first frame using positive and negative points, which are then propagated across frames using the SAM 2 (Ravi et al., 2024) model to produce segmentation masks for all objects throughout the video. The annotation interface is shown in (b). + +In addition to providing the annotated caption " {object description} falls," we also add information to inform off-the-shelf models of the task's context as much as possible. To further enhance task comprehension, we append an additional description "A video that conforms to the laws of physics." We also employ negative prompts "no camera motion" and "no slow-motion" to ensure environmental stability and impose constraints on the generated videos. These prompts explicitly instruct the models to avoid including camera motion or any non-real-time object motion, thereby maintaining consistency with real-world physics. + +# I. Inference Details + +We present the inference configurations of each closed or open model we evaluate in Table 4. For models that do not support generating videos with 1:1 aspect ratio, we pad initial frames with black borders to the resolution supported by these models, and finally remove the black borders from the generated videos. + +# J. More Qualitative Examples + +We present more qualitative examples in Figure 16 - Figure 22. Although in some showcases, models can roughly predict the downward trend, models still struggle to predict plausible shape and motion. The defects in the models can be mainly attributed to the following aspects: + +- Trajectory correctness: in most videos, models fail to predict even the basic falling trajectory of objects, as shown in Figure 19 (a), despite this being highly intuitive for humans. Even in cases where the falling trajectory is roughly correctly predicted, the models still struggle to accurately predict subsequent events, such as collisions, as illustrated in Figure 16 (f). +- Object consistency: in many generated videos, object consistency is poor. Models struggle to infer the appearance of objects from multiple viewpoints in a physically plausible manner, resulting in unnatural appearances, as shown in Figure 16 (a). Additionally, models perform poorly in maintaining object permanence, causing objects to appear blurry, as illustrated in Figure 20 (f). Furthermore, models sometimes introduce new objects into the video, as depicted in + +Figure 20 (e). + +- Scene consistency: models struggle to maintain scene consistency, leading to abrupt transitions in many videos. These sudden changes make videos appear unnatural, as shown in Figure 18 (f). + +# K. Simulated Adaption Details + +We use the Kubric (Greff et al., 2022) simulation and rendering engine for creating our simulated videos. Kubric uses PyBullet (Coumans et al., 2010) for running physics simulations and Blender (Community, 2018) for rendering. We set the simulation rate to 240 steps per second and render 2-second videos at 16 fps, resulting in 32 frames per video. Each scene consists of objects from the Google Scanned Objects (GSO) dataset (Downs et al., 2022) and uses environmental lighting from HDRI maps provided by Kubric. We use 930 objects and 458 HDRI maps for training and 103 objects and 51 HDRI maps for testing. + +For each video, we randomly choose 1-6 objects to drop. These objects are placed at a height uniformly sampled from $0.5\mathrm{m}$ to $1.5\mathrm{m}$ . Below each of these objects, a possibly empty pile of up to 4 objects spawns beneath to create collisions. The objects are placed in a spawn region of size $2\mathrm{m} \times 2\mathrm{m}$ . + +The camera is initially positioned $1\mathrm{m}$ behind this region, with its height varying uniformly between $0.4\mathrm{m}$ and $0.6\mathrm{m}$ . Once all objects are placed, the camera moves back in random increments until all objects are visible within the camera frame. The camera uses a focal length of $35\mathrm{mm}$ , a sensor width of $32\mathrm{mm}$ , and an aspect ratio of $1\times 1$ . + +# L. Limitations + +In this work, we collect and manually annotate a dataset of 361 real-world videos and design three spatial metrics to evaluate the performance of state-of-the-art image-to-video (I2V) models in a fundamental physical scenario: free fall. Our metrics focus solely on spatial positional relationships, excluding object appearance attributes such as color. To enable more fine-grained evaluations of appearance characteristics, we aim to develop metrics based on Multimodal Large Language Models (MLLMs) or pixel-level analysis in future work. + +Furthermore, we propose the PSFT and ORO methods to fine-tune the Open-Sora model (Zheng et al., 2024), improving its ability to generate physically plausible videos. Despite these improvements, certain limitations remain, specifically, the generation of blurry objects in some videos. We hope to address these challenges in future research by refining both the dataset and the fine-tuning strategies, aiming to produce videos that better maintain object visuals. + +![](images/2c3255e61507df64f2fe2b8fc33da6df4f8074be9bf5d02291a3cf3bb8a8a94a.jpg) +(a) A white paper roll falls. + +![](images/76661e958af011fe7f1f7ab37c8d6b11f4f98a6f04af442101dbe8308c1127ee.jpg) +(c) A black bottle falls. + +![](images/f96ae24b9af0730f8094994e180d7d5a6e32804e7b580a5dadd002836de5fced.jpg) +(b) A transparent bottle falls. + +![](images/14aef21ca7858494c097f6f67d50528c4b5917b1b19db5da86d4a491cd5a520f.jpg) +(d) A white bottle falls. +Figure 13. Examples of real world videos and annotations. We present video frames in the first row and mask annotations in the second row. + +
AspectRequirements
Camera·The camera must be stabilized using a tripod. +·The dropping object should remain visible throughout the entire fall. +·The trajectory of the object should be sufficiently centered in the frame. +·Ensure the slow-motion setting is configured to 120 fps. +·Avoid a completely top-down perspective; the frame should include both the floor and the wall for spatial context. +·It is acceptable to record one long video containing multiple drops at the same location.
Objects·Most objects should be rigid and non-deformable. +·A limited number of flexible or deformable objects may be included, as such data is also valuable.
Dropping Procedure·Secure the object with a wire using tape, ensuring stability. Multiple tapings may be necessary for proper stabilization. +·Visibility of the wire in the video is acceptable. +·No body parts should appear in the frame. If this is challenging, consider having a partner monitor the camera or use screen-sharing software to view the camera feed on a laptop for uninterrupted framing. +·Record videos in a horizontal orientation to simplify cropping and to help keep the frame free of unnecessary elements. +·Use a short wire to enhance object stability. +·The object should remain stationary before being dropped.
Scene Composition·Make the scenes dynamic and engaging. Include interactions with other objects, such as collisions or objects tipping over. Static objects should serve as active elements rather than mere background props. +·Avoid filming in classroom or laboratory environments. +·Include a variety of dropping heights. +·Film in different environments, ensuring at least one setting is outside your apartment. +·Minimize human shadows in the frame whenever possible. +·Ensure good lighting and maintain strong contrast between the objects and the back-ground.
+ +Table 3. Key points of real world videos collection guideline. We have detailed requirements for camera, objects, dropping procedure and scene composition to ensure the quality, diversity and minimize ambiguity introduced by environments. + +![](images/945c3d2b2e7ba5dd24811fc027b02c812dd3fdb62673324359a47eefccc06ed5.jpg) +(a) + +![](images/4d595693598e6f902081fbae2160c14f17629bf42e1282931e980a4273a07e54.jpg) +(b) + +![](images/8f522784783a329f5017e6371582ae27e84586d17a8f7bcd9025f339933400e5.jpg) +(a) + +![](images/733aaf4a714c7ec870de8f5e762d4a545653a15ef5ea24677ad5c2400309aed0.jpg) +Figure 14. Video processing interface. (a) we annotate starting positions in the original long videos and clip them into multiple clips less than 12 seconds. (b) We drag the cropping box to crop the video size to an aspect ratio of 1:1. +(b) +Figure 15. Annotation details of real world videos. (a) Word cloud of objects in video captions. Our videos contain a variety of daily life objects. (b) Interface for annotating positive and negative points in the first frame. Red and blue dots indicate positive and negative points respectively. We annotate all objects in the midair and ground. + +
ModelResolutionNumber of FramesFPSGuidance ScaleSampling StepsNoise Scheduler
ClosedSora720 × 72015030---
Kling-V1.5960 × 960150301.0--
Kling-V1960 × 960150301.0--
Runway Gen31280 × 76815630---
OpenCogVideoX-5B-I2V720 × 4804886.050DDIM
DynamiCrafter512 × 32090300.750DDIM
Pyramid-Flow1280 × 768120244.010EulerDiscrete
Open-Sora512 × 51290307.030RFLOW
+ +Table 4. Inference details for models we evaluate, where “-” indicates the information is not available. + +![](images/c26bfecf28ebcde71af6d60da63253f75d02e0c5503f1425cbff24859ab09444.jpg) +(a) A brown bottle falls. + +![](images/5f5749c22b33d6469cf285b89aba3d78b780ffbd4338568515ee93f4e1fa544d.jpg) +(b) A grey bottle falls. + +![](images/1f5a8409a2aefddb489c74e7deb33bffefdf80fdb5c072687ddd0946e16f3e00.jpg) +(c) A grey paper cup falls. + +![](images/19fe4f4ece7488446d3a1550236f6f8bf176f70d2615cfa7db7c28a0720cf550.jpg) +(d) A paper cup falls. + +![](images/7ecc93d403dcb06758ac993647a892395af1264eeca956ba139d2929c18192ba.jpg) +(e) A white bottle falls. + +![](images/c8f534dc359b0c6486ea7d10444e18fb31e91a7d173e50458a7fb898ac132de4.jpg) +(f) A white box falls. +Figure 16. Qualitative examples of Kling-V1 (Kuaishou, 2024). In (a) (b) (c) (f), objects have a tendency to fall. (b) (c) are roughly consistent with the laws of physics. In (a) (f), the shape of the object does not match the first frame. In (d), the paper cup is suspended in midair. In (e), new object is introduced. In (e), the model fails to correctly predict the collision that occurs after the white box falls and the chain of events that follows. + +![](images/d24e9d610f63859bf566728e5b51c496af6f6f54f828b2cc2aae2a5186055f4c.jpg) +(a) A black and grey glove falls. + +![](images/936b8ee3ceeca5f82d0050ae068b79e918e73267eaa429a43c2317f1f2323c25.jpg) +(b) A black bottle falls. + +![](images/2d23c7b2c224aa44ab3a8eed9dfe17398040ed64c31737411568651ceb6dcf15.jpg) +(c) A blue and white box falls. + +![](images/20194bd551fbcc50f5676b0a019e1d10041918fadd0264051f4f9a7540545bac.jpg) +(d) A brown bottle falls. + +![](images/baf79faae3094c985d469cccd5f8af1aef1f088b8c03e4b23c119ec86aebd807.jpg) +(e) A Coca-Cola can falls. + +![](images/8cb3aa9d253fc5e51c44a557e5629610fa5ad282a6ce25364e557518e7b8d6c7.jpg) +(f) A pink box falls. +Figure 17. Qualitative examples of Runway Gen3 (Runway, 2024). In (b) (e), objects have a tendency to fall. In (a) (e) (f), new objects are introduced. In (b) (d), the shape of the object does not match the first frame. In (c), the box is suspended in midair. + +![](images/a79a3d2c12bca59c6526e7216d60bb7580bc8c63576be8552b2f29dfd5c346d2.jpg) +(a) A black bottle falls. + +![](images/498fa6c0164db5fd88370c112e6dd2c5829dbe74624f6e825ac28219d5f5aa3d.jpg) +(b) A black helmet falls. + +![](images/8317550ad7aa20a11f3bf97aa716e1da5f6f471ce296a6b0a5da5eba39db5c52.jpg) +(c) A paper box falls. + +![](images/5259adbc56734c97b69c0f5ac5a0debfc574cf0ee9fab5cae058c157503ada7b.jpg) +(d) A white bottle falls. + +![](images/fb0eb57bfd7df70674700e7d467e475c936b995b0451b6a6746c321bb30fd4dc.jpg) +(e) A grey paper cup falls. + +![](images/dad22c2ccf02d28f88996ada3f6458c8f7ba92408c341e39e6da0ed00ee05fbb.jpg) +(f) A white box falls. +Figure 18. Qualitative examples of CogVideoX-5B-I2V (Yang et al., 2024c). In (a) - (f), objects have a tendency to fall. However, in all the videos, there are violations of physics. In (a) (b), the objects are divided into two parts. In (c) (d) (e), the shape of the object does not match the first frame. In (c), the trajectory is not a vertical fall. In (f), scene changes suddenly, which does not match the first frame. + +![](images/e39efd16222ce5ec5787e1ab30db7573f7cb53089f14019eb71cfee5130fa917.jpg) +(a) A black box falls. + +![](images/a17478f38168df434239dbbc27ca455093b6c1e40467cd5487d213bb5d83095d.jpg) +(b) A card holder falls. + +![](images/f5955939eff061d366f9461ca64daddccce438bc7cf26e0ca0177e4bea14e76c.jpg) +(c) A white bottle falls. + +![](images/3064ab2bd099f6c34a0b0143af9b37cafbc9b07d4aff556a41357fd47bdbe8d3.jpg) +(d) A white box falls. + +![](images/f9b6d84ca868eb7b320e5bf66a6830342a093523d4a8c01e2b0d2c6c9b32e833.jpg) +(e) An orange and white box falls. + +![](images/b68b172758804959f5687238c0b53d284f904543618dda06d81c04419d71a2ce.jpg) +(f) A shoe falls. +Figure 19. Qualitative examples of DynamiCrafter (?). In all the videos, objects do not have a tendency to fall, suspended in the midair. + +![](images/b39f635b4b2874364c63137dcdc9fc15f4eb68ba57cc4f852a463fac4a5c56f0.jpg) +(a) A black bottle falls. + +![](images/c3078aa13d861c89ebf70c3db430662cdcd8c83d8880aa9a934a258e5c3598e5.jpg) +(b) A green and white box falls. + +![](images/47bbef5b7ad9b2e303cbcdd98429261eea928bc17ba376000894eba09e78ab5b.jpg) +(c) A grey bottle falls. + +![](images/d20954189b98d66653a95abacce5f2c333474ea3a756f9e105766842d1c52aaf.jpg) +(d) An orange tube falls. + +![](images/52a4ded90059019d5599737a9ccf91373f0a89472a4470044d57fa2d382b574c.jpg) +(e) A white bottle falls. + +![](images/1bfe20845da001136df0ce41999f5d2267a843826b8f310807bc262f0f0570d5.jpg) +(f) A plastic box falls. +Figure 20. Qualitative examples of Pyramid-Flow (Jin et al., 2024). In (b) (d) (e), objects have a tendency to fall. In (a) (b) (e) (f), new objects are introduced. In (c), scene changes, which does not match the first frame.. In (d), the tube becomes blurry. + +![](images/161db114927f1a531685c756c1a59ca048d49f95035d117b63e02913d1aeda55.jpg) +(a) A bottle full of water falls. + +![](images/00dd072f248c8d96ba525c8f9026f6dbe513577c2833c0a39bcc58836058a3ae.jpg) +(b) A brown bottle falls. + +![](images/20691688d65a9c34668843431f6da68506d385837a8adf32d934a8338cfbb623.jpg) +(c) A grey paper cup falls. + +![](images/20dc724f7197dd626747f2db293d23a957a1d7672a436a2fd07469a0a937b08a.jpg) +(d) A paper box falls. + +![](images/5f2dbd497dd96b551059198818fd41a1cc13a1d01d40d7814fd8343102c994a3.jpg) +(e) A white bottle falls. + +![](images/5794ff6b8e6a1c23e1b350538a38f18063fb742919db3216ae76aa58e6f391f5.jpg) +(f) A white box falls. +Figure 21. Qualitative examples of Open-Sora (Zheng et al., 2024). In all the videos, objects do not have a tendency to fall, suspended in the midair. In (b) (d), scene changes suddenly, which does not match the first frame. In (e), new object is introduced. + +![](images/0cc35a21a9c1a24bb99fd5ae428c1b943237dcc4bf44654420f09f88cbbe622f.jpg) +(a) A brown bottle falls. + +![](images/20453f58b53c0dbbefd7d26b5854ca263406b127771a6983ee453bcedd34a9b8.jpg) +(b) A grey eraser falls. + +![](images/d0de139686427ffdf841921b45e326470ebe6a127e77cc98801376148a86c3b4.jpg) +(c) A grey paper cup falls. + +![](images/2c9f209b0aeac72570d8c4fde360b1cf6b4dfcb2b6a0e8d4e853f7b3a78fa4d6.jpg) +(d) A transparent bottle falls. + +![](images/f52bf9a1a1b538e95d70fbf8e8072fe5c66e0fb960e1c0adc65706d6253840a0.jpg) +(e) A red wrapping paper falls. + +![](images/a00eb2f6afd73e7890435740d404820471ec3c29a424ac89fa8f08f0f78a9a90.jpg) +(f) A white bottle falls. +Figure 22. Qualitative examples of our method (Open-Sora + PSFT + ORO). In all the videos, objects have a tendency to fall. However, the consistency of objects is still insufficient. In some frames, objects become blurry. Objects sometimes disappear after collision. \ No newline at end of file diff --git a/data/2025/2503_09xxx/2503.09595/images/00bcbfd531744498cbd9c4457e5710d02b6c2db7bb28bac2414f98156285e998.jpg b/data/2025/2503_09xxx/2503.09595/images/00bcbfd531744498cbd9c4457e5710d02b6c2db7bb28bac2414f98156285e998.jpg new file mode 100644 index 0000000000000000000000000000000000000000..022051b11c6c3395281782844553c95d01994159 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/00bcbfd531744498cbd9c4457e5710d02b6c2db7bb28bac2414f98156285e998.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88b3a7558e3e535930a52b9e014bef0c7d63a90f3418f7662e04c72bf0a9705f +size 48314 diff --git a/data/2025/2503_09xxx/2503.09595/images/00dd072f248c8d96ba525c8f9026f6dbe513577c2833c0a39bcc58836058a3ae.jpg b/data/2025/2503_09xxx/2503.09595/images/00dd072f248c8d96ba525c8f9026f6dbe513577c2833c0a39bcc58836058a3ae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5baee29fa8e3fa042a70bd10eebe5bf417a52747 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/00dd072f248c8d96ba525c8f9026f6dbe513577c2833c0a39bcc58836058a3ae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11d7bd8aa00034d5175cabac98b75016d41dacd3f07a186342394ee3fc315c2c +size 39908 diff --git a/data/2025/2503_09xxx/2503.09595/images/0492c7160061c2167d23b2371de275bf324606389678afb1b4fcc06512b614c9.jpg b/data/2025/2503_09xxx/2503.09595/images/0492c7160061c2167d23b2371de275bf324606389678afb1b4fcc06512b614c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..979cf87d01a9457cd0bdca463456748f077bcdc3 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/0492c7160061c2167d23b2371de275bf324606389678afb1b4fcc06512b614c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fab75cbd3953017db09e475cd0c190f4c0a96c87cdba58ea82c33259a8641011 +size 21446 diff --git a/data/2025/2503_09xxx/2503.09595/images/0cc35a21a9c1a24bb99fd5ae428c1b943237dcc4bf44654420f09f88cbbe622f.jpg b/data/2025/2503_09xxx/2503.09595/images/0cc35a21a9c1a24bb99fd5ae428c1b943237dcc4bf44654420f09f88cbbe622f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d38de23a27e716bc3d442f32cc3b8cda52b1cac --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/0cc35a21a9c1a24bb99fd5ae428c1b943237dcc4bf44654420f09f88cbbe622f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afec8b3072d32dd61140e7432bdf8c26064bddc0389b3c738d1736d291ac8257 +size 41746 diff --git a/data/2025/2503_09xxx/2503.09595/images/1211a8336c5089da8a8c3424e1cc2df81e4f22b093cc82bb6236a03de951a3b1.jpg b/data/2025/2503_09xxx/2503.09595/images/1211a8336c5089da8a8c3424e1cc2df81e4f22b093cc82bb6236a03de951a3b1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d76c037f0f4982d79e084c08706961d4d345d31 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/1211a8336c5089da8a8c3424e1cc2df81e4f22b093cc82bb6236a03de951a3b1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ae65696e3002648443dab9d5009da0e1570fca1459cfe04217ce82fcb5f7931 +size 242819 diff --git a/data/2025/2503_09xxx/2503.09595/images/14aef21ca7858494c097f6f67d50528c4b5917b1b19db5da86d4a491cd5a520f.jpg b/data/2025/2503_09xxx/2503.09595/images/14aef21ca7858494c097f6f67d50528c4b5917b1b19db5da86d4a491cd5a520f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82dc6939cc84c2d0d8d2ddf3c22658aca3d94258 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/14aef21ca7858494c097f6f67d50528c4b5917b1b19db5da86d4a491cd5a520f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3dd8e1b647b7f332cdb97375e21a43e709f2598d16af8a48e13167a6447eea25 +size 92643 diff --git a/data/2025/2503_09xxx/2503.09595/images/161db114927f1a531685c756c1a59ca048d49f95035d117b63e02913d1aeda55.jpg b/data/2025/2503_09xxx/2503.09595/images/161db114927f1a531685c756c1a59ca048d49f95035d117b63e02913d1aeda55.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee1c44fff798cb908958ee4de8ed1615cd0417e9 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/161db114927f1a531685c756c1a59ca048d49f95035d117b63e02913d1aeda55.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6546ae6aa31b922f2250058d76e3b8b834a215a4be458d6a0474662e401c0ee +size 26903 diff --git a/data/2025/2503_09xxx/2503.09595/images/173d344ac811372831211d71e1f089dcd0ee90fa7170fbcfc6b6036661f678d4.jpg b/data/2025/2503_09xxx/2503.09595/images/173d344ac811372831211d71e1f089dcd0ee90fa7170fbcfc6b6036661f678d4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b133b3e37746a8d9343ed2b790d22cac483bf9e9 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/173d344ac811372831211d71e1f089dcd0ee90fa7170fbcfc6b6036661f678d4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3edff3b2e3bfc7d419fbeb099701a37094d8cf402973369f466d429c847739e +size 6585 diff --git a/data/2025/2503_09xxx/2503.09595/images/18f6ca9a4d35a17aace73c8956747dc9f04f552c0b87a2dd86040242c6bd19e8.jpg b/data/2025/2503_09xxx/2503.09595/images/18f6ca9a4d35a17aace73c8956747dc9f04f552c0b87a2dd86040242c6bd19e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6d419d9638917d9059159f001909b196f638d101 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/18f6ca9a4d35a17aace73c8956747dc9f04f552c0b87a2dd86040242c6bd19e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9db9c51ceb1174fa3558a9ecc0eb97cb21301b745dbe2a5b23ee28b8f72dc8e4 +size 8527 diff --git a/data/2025/2503_09xxx/2503.09595/images/19982fa17713845a5d804164e55f0ffa9571cbf7063e297859e10dcd17cb35ca.jpg b/data/2025/2503_09xxx/2503.09595/images/19982fa17713845a5d804164e55f0ffa9571cbf7063e297859e10dcd17cb35ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..264328d3da4978f29f5dff85d06695b1daa84e2e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/19982fa17713845a5d804164e55f0ffa9571cbf7063e297859e10dcd17cb35ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7da7babcff7b6620edeead3f8ad46302d02e600c0c22ac07d0552b922723729b +size 146111 diff --git a/data/2025/2503_09xxx/2503.09595/images/19a56cf5e00b438278d859ed1f2a1e950888d74b178b001eab56b1dd9862790e.jpg b/data/2025/2503_09xxx/2503.09595/images/19a56cf5e00b438278d859ed1f2a1e950888d74b178b001eab56b1dd9862790e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9265483eb84b1297825a777b1e935d56bdda414d --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/19a56cf5e00b438278d859ed1f2a1e950888d74b178b001eab56b1dd9862790e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61f1a34ebcb9dc6275b7a53ad4c5defa5d36e0d2a9e9a50286a287db76ccc57d +size 70272 diff --git a/data/2025/2503_09xxx/2503.09595/images/19fe4f4ece7488446d3a1550236f6f8bf176f70d2615cfa7db7c28a0720cf550.jpg b/data/2025/2503_09xxx/2503.09595/images/19fe4f4ece7488446d3a1550236f6f8bf176f70d2615cfa7db7c28a0720cf550.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b8adeced332b774db493a87cff1e9239b488acb5 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/19fe4f4ece7488446d3a1550236f6f8bf176f70d2615cfa7db7c28a0720cf550.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05aa4f5b74b502b686912566d2401887ccc93523b83c7d438ea2a461e4508c15 +size 48718 diff --git a/data/2025/2503_09xxx/2503.09595/images/1ac3de96698eccbe7ffd3f2cdeee9e6722a7a59125ffbd3af395c8b98701c5c7.jpg b/data/2025/2503_09xxx/2503.09595/images/1ac3de96698eccbe7ffd3f2cdeee9e6722a7a59125ffbd3af395c8b98701c5c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d9b098fd6cd732eff58bdaa3641f99b7b6ffe32 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/1ac3de96698eccbe7ffd3f2cdeee9e6722a7a59125ffbd3af395c8b98701c5c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dad33017292565bf054162875f8d016ccb09a8da6ac6ce8b56919d9980b09fbe +size 44401 diff --git a/data/2025/2503_09xxx/2503.09595/images/1bfe20845da001136df0ce41999f5d2267a843826b8f310807bc262f0f0570d5.jpg b/data/2025/2503_09xxx/2503.09595/images/1bfe20845da001136df0ce41999f5d2267a843826b8f310807bc262f0f0570d5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16265a88af593959e0fd35a5389a570d774003a7 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/1bfe20845da001136df0ce41999f5d2267a843826b8f310807bc262f0f0570d5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14c960111236c7d10f7080476833b0285db8d86f6e976febe466aed1851c8777 +size 44034 diff --git a/data/2025/2503_09xxx/2503.09595/images/1f5a8409a2aefddb489c74e7deb33bffefdf80fdb5c072687ddd0946e16f3e00.jpg b/data/2025/2503_09xxx/2503.09595/images/1f5a8409a2aefddb489c74e7deb33bffefdf80fdb5c072687ddd0946e16f3e00.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9668efd96035792b3687a2b625b0a0f147529ea1 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/1f5a8409a2aefddb489c74e7deb33bffefdf80fdb5c072687ddd0946e16f3e00.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81e5306f32cae379661c791b4a66438a9ae4efed994c5594842f3b5014773315 +size 45249 diff --git a/data/2025/2503_09xxx/2503.09595/images/20194bd551fbcc50f5676b0a019e1d10041918fadd0264051f4f9a7540545bac.jpg b/data/2025/2503_09xxx/2503.09595/images/20194bd551fbcc50f5676b0a019e1d10041918fadd0264051f4f9a7540545bac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eee85f5c4d351dca2049d9c80fcf8f7a778b2cd9 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/20194bd551fbcc50f5676b0a019e1d10041918fadd0264051f4f9a7540545bac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a93f1a2a2898b6f25d62eef32a09bfd8cbafc0fda6fef4ffb5adde1e68f7a6e9 +size 52624 diff --git a/data/2025/2503_09xxx/2503.09595/images/20453f58b53c0dbbefd7d26b5854ca263406b127771a6983ee453bcedd34a9b8.jpg b/data/2025/2503_09xxx/2503.09595/images/20453f58b53c0dbbefd7d26b5854ca263406b127771a6983ee453bcedd34a9b8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb55bebdec3817aba0c74c50eb3a9650d23a0a94 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/20453f58b53c0dbbefd7d26b5854ca263406b127771a6983ee453bcedd34a9b8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fc833551aae41694564b5fbbb39c4a361e521f0d8274398d4d1e342e1baa725 +size 30978 diff --git a/data/2025/2503_09xxx/2503.09595/images/20691688d65a9c34668843431f6da68506d385837a8adf32d934a8338cfbb623.jpg b/data/2025/2503_09xxx/2503.09595/images/20691688d65a9c34668843431f6da68506d385837a8adf32d934a8338cfbb623.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d225dc15fb30213fbb34c76a8852bd07a57703e7 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/20691688d65a9c34668843431f6da68506d385837a8adf32d934a8338cfbb623.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4467dc4113d9f8f778b59137b31433044e56262552947f77c7d897c6b2152b5e +size 42606 diff --git a/data/2025/2503_09xxx/2503.09595/images/20dc724f7197dd626747f2db293d23a957a1d7672a436a2fd07469a0a937b08a.jpg b/data/2025/2503_09xxx/2503.09595/images/20dc724f7197dd626747f2db293d23a957a1d7672a436a2fd07469a0a937b08a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a2141a0260eceb29894d14b2dd46a148718c4d72 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/20dc724f7197dd626747f2db293d23a957a1d7672a436a2fd07469a0a937b08a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af4b6b52b7e32b97aa7b34e5c41c960e6a324c1033aa3a7313d940750051806c +size 37359 diff --git a/data/2025/2503_09xxx/2503.09595/images/2a038e4df7899a3c6a7af3f168c48dd30504eb434167e140afe024da4ee58ef5.jpg b/data/2025/2503_09xxx/2503.09595/images/2a038e4df7899a3c6a7af3f168c48dd30504eb434167e140afe024da4ee58ef5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..64070b012d16e828f8debfa27c7b6d4bdd330516 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/2a038e4df7899a3c6a7af3f168c48dd30504eb434167e140afe024da4ee58ef5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9be88ffa2a5b0a627cb208a89d36450e9b1cd6b2302f506cf5b0b2ee5c6875fa +size 8037 diff --git a/data/2025/2503_09xxx/2503.09595/images/2af4f4a51a21c94069dd0ccc494f71591672a4c22044b52818a3462e358900fd.jpg b/data/2025/2503_09xxx/2503.09595/images/2af4f4a51a21c94069dd0ccc494f71591672a4c22044b52818a3462e358900fd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b4302d480f20fb8ec399d2b8cdc2860c853444a3 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/2af4f4a51a21c94069dd0ccc494f71591672a4c22044b52818a3462e358900fd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4a89d53d9c7332989571b5c2b423833b23cedcbf4d0629dd1a5326995ec6a26 +size 4604 diff --git a/data/2025/2503_09xxx/2503.09595/images/2b0b1b1edbe72353a808756a9abec58b083266dae8f49a22cf820119f4e51b9d.jpg b/data/2025/2503_09xxx/2503.09595/images/2b0b1b1edbe72353a808756a9abec58b083266dae8f49a22cf820119f4e51b9d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..abb3c9103fa4e6f3030ba72febc14702b9941eed --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/2b0b1b1edbe72353a808756a9abec58b083266dae8f49a22cf820119f4e51b9d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02005f57cb86f65c2b0e433fb7aaea5dd169274d286a6bf9f73792c85c871a28 +size 16373 diff --git a/data/2025/2503_09xxx/2503.09595/images/2c3255e61507df64f2fe2b8fc33da6df4f8074be9bf5d02291a3cf3bb8a8a94a.jpg b/data/2025/2503_09xxx/2503.09595/images/2c3255e61507df64f2fe2b8fc33da6df4f8074be9bf5d02291a3cf3bb8a8a94a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d54c1eba0f1bfd7b680b25268e269b0ae88588c3 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/2c3255e61507df64f2fe2b8fc33da6df4f8074be9bf5d02291a3cf3bb8a8a94a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e87fa3fc7c8001a06653b99fd487c82df371738f43d62f6e593379ee9667d48 +size 75848 diff --git a/data/2025/2503_09xxx/2503.09595/images/2c9f209b0aeac72570d8c4fde360b1cf6b4dfcb2b6a0e8d4e853f7b3a78fa4d6.jpg b/data/2025/2503_09xxx/2503.09595/images/2c9f209b0aeac72570d8c4fde360b1cf6b4dfcb2b6a0e8d4e853f7b3a78fa4d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c5e3d0289c702242609e1d0621c99d0ea097a806 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/2c9f209b0aeac72570d8c4fde360b1cf6b4dfcb2b6a0e8d4e853f7b3a78fa4d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad3e83560f52511d4f45e6154464e1d78f545b84c589bd0eda53e030cbe6a618 +size 32805 diff --git a/data/2025/2503_09xxx/2503.09595/images/2d23c7b2c224aa44ab3a8eed9dfe17398040ed64c31737411568651ceb6dcf15.jpg b/data/2025/2503_09xxx/2503.09595/images/2d23c7b2c224aa44ab3a8eed9dfe17398040ed64c31737411568651ceb6dcf15.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2fdc7003ff036bbaf52670e6e1bff19b57281b63 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/2d23c7b2c224aa44ab3a8eed9dfe17398040ed64c31737411568651ceb6dcf15.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c16478778f240ff83b188c904556de03a95986bcda7d7112e26b9a2b203ba155 +size 47163 diff --git a/data/2025/2503_09xxx/2503.09595/images/305b266d41310514e36020946f50f0d4e84d78dde1d04ab214ef5e367fadf418.jpg b/data/2025/2503_09xxx/2503.09595/images/305b266d41310514e36020946f50f0d4e84d78dde1d04ab214ef5e367fadf418.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f2fa6f87c34575c563e92c4ebb60c632f7d96f7 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/305b266d41310514e36020946f50f0d4e84d78dde1d04ab214ef5e367fadf418.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2b16c06128429bda478b409f5769a6d89fa5175ab90c21a8cd3e73ae86697d7 +size 8139 diff --git a/data/2025/2503_09xxx/2503.09595/images/3064ab2bd099f6c34a0b0143af9b37cafbc9b07d4aff556a41357fd47bdbe8d3.jpg b/data/2025/2503_09xxx/2503.09595/images/3064ab2bd099f6c34a0b0143af9b37cafbc9b07d4aff556a41357fd47bdbe8d3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..da7d8dbb0fbdbd368e5266aab3e481ab34610a38 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/3064ab2bd099f6c34a0b0143af9b37cafbc9b07d4aff556a41357fd47bdbe8d3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e6bb50e0c493b4fe4f729dd04cbc3257cb8986a65fea06cc5099e59a1425ba2 +size 36138 diff --git a/data/2025/2503_09xxx/2503.09595/images/342a3b4296640d6e8ed1d72350bfb1237311d4fb53b1dc49759aa08ed93e6617.jpg b/data/2025/2503_09xxx/2503.09595/images/342a3b4296640d6e8ed1d72350bfb1237311d4fb53b1dc49759aa08ed93e6617.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ff881a55db203f1de5768312742b55676a21fef5 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/342a3b4296640d6e8ed1d72350bfb1237311d4fb53b1dc49759aa08ed93e6617.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a197d6d324c23c81cf3dc579bc5464d2a93dad5b82cb0cb177af799f8851337a +size 133207 diff --git a/data/2025/2503_09xxx/2503.09595/images/47bbef5b7ad9b2e303cbcdd98429261eea928bc17ba376000894eba09e78ab5b.jpg b/data/2025/2503_09xxx/2503.09595/images/47bbef5b7ad9b2e303cbcdd98429261eea928bc17ba376000894eba09e78ab5b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0146038420a09141f103ffd560e4d2dae9fea41 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/47bbef5b7ad9b2e303cbcdd98429261eea928bc17ba376000894eba09e78ab5b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ce8e4d5f175177fc11ba3140daf18b307153d45ff8507378f60ac3e9042070b +size 48216 diff --git a/data/2025/2503_09xxx/2503.09595/images/498fa6c0164db5fd88370c112e6dd2c5829dbe74624f6e825ac28219d5f5aa3d.jpg b/data/2025/2503_09xxx/2503.09595/images/498fa6c0164db5fd88370c112e6dd2c5829dbe74624f6e825ac28219d5f5aa3d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a121b47ffd68b38b5e72571139509ff8cb8f69ea --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/498fa6c0164db5fd88370c112e6dd2c5829dbe74624f6e825ac28219d5f5aa3d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc25f337a23f6454e5832cc09540f8454b7be8920289902087af29dc7bcbfbd4 +size 54992 diff --git a/data/2025/2503_09xxx/2503.09595/images/4d595693598e6f902081fbae2160c14f17629bf42e1282931e980a4273a07e54.jpg b/data/2025/2503_09xxx/2503.09595/images/4d595693598e6f902081fbae2160c14f17629bf42e1282931e980a4273a07e54.jpg new file mode 100644 index 0000000000000000000000000000000000000000..705264ccbd5c71ad227b89c4dc790ae7a7baa0d4 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/4d595693598e6f902081fbae2160c14f17629bf42e1282931e980a4273a07e54.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b69c91e8b4396f43deb525f0c20700f78aee6638351b5c6437962e83ee1cf670 +size 49445 diff --git a/data/2025/2503_09xxx/2503.09595/images/5259adbc56734c97b69c0f5ac5a0debfc574cf0ee9fab5cae058c157503ada7b.jpg b/data/2025/2503_09xxx/2503.09595/images/5259adbc56734c97b69c0f5ac5a0debfc574cf0ee9fab5cae058c157503ada7b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f2221795517ac4f4e6ddf85b07ee63fac18dc35f --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/5259adbc56734c97b69c0f5ac5a0debfc574cf0ee9fab5cae058c157503ada7b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bcb1bee95843419ea6f723f9b5833fe72484035c9e99dad77ca99f9496785a1 +size 37420 diff --git a/data/2025/2503_09xxx/2503.09595/images/52a4ded90059019d5599737a9ccf91373f0a89472a4470044d57fa2d382b574c.jpg b/data/2025/2503_09xxx/2503.09595/images/52a4ded90059019d5599737a9ccf91373f0a89472a4470044d57fa2d382b574c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9b557cc9a43e1a4af87feeae8843be46ebf748e7 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/52a4ded90059019d5599737a9ccf91373f0a89472a4470044d57fa2d382b574c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db9295410f1a8a8f3deeedb128068aba51be4a682901b37f1bcef0a28b6a7044 +size 47647 diff --git a/data/2025/2503_09xxx/2503.09595/images/5794ff6b8e6a1c23e1b350538a38f18063fb742919db3216ae76aa58e6f391f5.jpg b/data/2025/2503_09xxx/2503.09595/images/5794ff6b8e6a1c23e1b350538a38f18063fb742919db3216ae76aa58e6f391f5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3e9f58f217ae26327157a1672119b28d38766b1c --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/5794ff6b8e6a1c23e1b350538a38f18063fb742919db3216ae76aa58e6f391f5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76c1e3cf4fe3fd747b903b6273f78f00a5c90ac5cbf95a8aba23132a2579575d +size 38622 diff --git a/data/2025/2503_09xxx/2503.09595/images/5f2dbd497dd96b551059198818fd41a1cc13a1d01d40d7814fd8343102c994a3.jpg b/data/2025/2503_09xxx/2503.09595/images/5f2dbd497dd96b551059198818fd41a1cc13a1d01d40d7814fd8343102c994a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db70b5fd3b87e826c47b771fa8b0cf9c4f9f52a0 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/5f2dbd497dd96b551059198818fd41a1cc13a1d01d40d7814fd8343102c994a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:709ef947853875028841a5a8af0459389da4d8505aa93e8b34baba7e48ab11e9 +size 31371 diff --git a/data/2025/2503_09xxx/2503.09595/images/5f5749c22b33d6469cf285b89aba3d78b780ffbd4338568515ee93f4e1fa544d.jpg b/data/2025/2503_09xxx/2503.09595/images/5f5749c22b33d6469cf285b89aba3d78b780ffbd4338568515ee93f4e1fa544d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..edf81788a9399db965e4e9195888cfb1771b6769 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/5f5749c22b33d6469cf285b89aba3d78b780ffbd4338568515ee93f4e1fa544d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a97d56444e9a5221241a8d294fcdf19b35611063b3777d25a5e42054a1519271 +size 36047 diff --git a/data/2025/2503_09xxx/2503.09595/images/5ff8dcc1fc52105fef32fc975178bc49046b998c818452290cac625a89916132.jpg b/data/2025/2503_09xxx/2503.09595/images/5ff8dcc1fc52105fef32fc975178bc49046b998c818452290cac625a89916132.jpg new file mode 100644 index 0000000000000000000000000000000000000000..58e7fc28e7e3af9136d392d53647955decbd88d5 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/5ff8dcc1fc52105fef32fc975178bc49046b998c818452290cac625a89916132.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca07acb6e701f09e26c8ef12b8b098e92786323be36af56874302fdc933fdad1 +size 67056 diff --git a/data/2025/2503_09xxx/2503.09595/images/6adf02f646ee4da228177a9bb3d6b9810b033aeaac6c9f68056cd271eb9c47c2.jpg b/data/2025/2503_09xxx/2503.09595/images/6adf02f646ee4da228177a9bb3d6b9810b033aeaac6c9f68056cd271eb9c47c2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..17baaf986cc0cbe0ad19da47da99b90e151d04f3 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/6adf02f646ee4da228177a9bb3d6b9810b033aeaac6c9f68056cd271eb9c47c2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23d3aab906a665321db5e5dc975ca06ae7a08cfe47b189e4c9cd960e53761134 +size 10719 diff --git a/data/2025/2503_09xxx/2503.09595/images/6e1f9e94954a2f07537b844ab77f7db174e9dbc07feff0f2ba04ac58b383fd0c.jpg b/data/2025/2503_09xxx/2503.09595/images/6e1f9e94954a2f07537b844ab77f7db174e9dbc07feff0f2ba04ac58b383fd0c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6227df2e81497df76b266bc91d2486161f28a055 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/6e1f9e94954a2f07537b844ab77f7db174e9dbc07feff0f2ba04ac58b383fd0c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b03abeb13348d04b330d814d601f7d1b1a3ca940b6e4c39b06ce5868be6cca9 +size 5702 diff --git a/data/2025/2503_09xxx/2503.09595/images/6e609a608080271c51e72d3bb004d998e6ef365863e6486c69de7a14c67bf666.jpg b/data/2025/2503_09xxx/2503.09595/images/6e609a608080271c51e72d3bb004d998e6ef365863e6486c69de7a14c67bf666.jpg new file mode 100644 index 0000000000000000000000000000000000000000..382f56db1e9b69c509bedd770c352a230e3cf4b3 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/6e609a608080271c51e72d3bb004d998e6ef365863e6486c69de7a14c67bf666.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17f999ae727bddf01dfee136fae834dc51d1811983ba161851944cda748aa945 +size 3704 diff --git a/data/2025/2503_09xxx/2503.09595/images/733aaf4a714c7ec870de8f5e762d4a545653a15ef5ea24677ad5c2400309aed0.jpg b/data/2025/2503_09xxx/2503.09595/images/733aaf4a714c7ec870de8f5e762d4a545653a15ef5ea24677ad5c2400309aed0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f3cccae286884b603f3497d352fea66d67c9d6cc --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/733aaf4a714c7ec870de8f5e762d4a545653a15ef5ea24677ad5c2400309aed0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94720a8fccdb1c488f6b3b874605da323ba8fd7ebb2962c4c5c2e4e56e2752fb +size 50221 diff --git a/data/2025/2503_09xxx/2503.09595/images/73a89a0c5b340af83339e7bdee6111a2850a250f52d7a51435656c3c43642fe2.jpg b/data/2025/2503_09xxx/2503.09595/images/73a89a0c5b340af83339e7bdee6111a2850a250f52d7a51435656c3c43642fe2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0dbb3bb0b103b0862d568fd87538469f84693be --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/73a89a0c5b340af83339e7bdee6111a2850a250f52d7a51435656c3c43642fe2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43241756bcc4f946c0834be2cea0da87ca24cb7ed76b11793ab6407862e74704 +size 21300 diff --git a/data/2025/2503_09xxx/2503.09595/images/74d2a95e99fc2fd73ad4d1df3a6192e0a7054af3d825efca106428f50d84a7b8.jpg b/data/2025/2503_09xxx/2503.09595/images/74d2a95e99fc2fd73ad4d1df3a6192e0a7054af3d825efca106428f50d84a7b8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3c2f8c43669a1632c422fa26a706dd0fbc94038e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/74d2a95e99fc2fd73ad4d1df3a6192e0a7054af3d825efca106428f50d84a7b8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6def912708765a02d09bcb65434de235179925901240faeca41e72f0338f6b7e +size 5585 diff --git a/data/2025/2503_09xxx/2503.09595/images/75ae96615ee2cc27de037c2571b7e943cd4557143b31224230f453e92e8ee6c4.jpg b/data/2025/2503_09xxx/2503.09595/images/75ae96615ee2cc27de037c2571b7e943cd4557143b31224230f453e92e8ee6c4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2fb7410b28db549aa1d35fc8d30ce7bbc6d37ea --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/75ae96615ee2cc27de037c2571b7e943cd4557143b31224230f453e92e8ee6c4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b90786c333a23229a1a8a98c18cee03e1dcf2f5054ad29cb551bdf21b2a274d +size 2755 diff --git a/data/2025/2503_09xxx/2503.09595/images/75bc9f270274a876e0f2d9bb2c4307c38aafd436f796800aeee64fbd5f2d9b29.jpg b/data/2025/2503_09xxx/2503.09595/images/75bc9f270274a876e0f2d9bb2c4307c38aafd436f796800aeee64fbd5f2d9b29.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5824f0396f41aabdc8418f5856a2cfa9331b32ec --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/75bc9f270274a876e0f2d9bb2c4307c38aafd436f796800aeee64fbd5f2d9b29.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de16cc137d2e388bb816f802403e0e9ab5eed7015107848baa34b69afc4e5b22 +size 11230 diff --git a/data/2025/2503_09xxx/2503.09595/images/76661e958af011fe7f1f7ab37c8d6b11f4f98a6f04af442101dbe8308c1127ee.jpg b/data/2025/2503_09xxx/2503.09595/images/76661e958af011fe7f1f7ab37c8d6b11f4f98a6f04af442101dbe8308c1127ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..05a96e1be71d85478f40513e72f42c4fdd7768df --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/76661e958af011fe7f1f7ab37c8d6b11f4f98a6f04af442101dbe8308c1127ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71c7ebf99d1549b6d56d9cac178e75d48acdc4dd23b039ecd1a95db743a474a9 +size 76434 diff --git a/data/2025/2503_09xxx/2503.09595/images/76e15cbc4824d4b47dfc7cac69adb8b782eaf0ac238e42bf738fa8fe88e04090.jpg b/data/2025/2503_09xxx/2503.09595/images/76e15cbc4824d4b47dfc7cac69adb8b782eaf0ac238e42bf738fa8fe88e04090.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa6f7acd804ccd7b3ad38056d071ad49d06c3c09 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/76e15cbc4824d4b47dfc7cac69adb8b782eaf0ac238e42bf738fa8fe88e04090.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80215c513ccfa690bbe08f68e4a5314ca209b082666c0276d2269298d82734ce +size 7734 diff --git a/data/2025/2503_09xxx/2503.09595/images/7d4ceaab34fce33596b3dd1e9d4ba7a5b2fc93095d7bd082617fa0ffe00a3c57.jpg b/data/2025/2503_09xxx/2503.09595/images/7d4ceaab34fce33596b3dd1e9d4ba7a5b2fc93095d7bd082617fa0ffe00a3c57.jpg new file mode 100644 index 0000000000000000000000000000000000000000..193e937a795274acabb26591d5f439f69708459e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/7d4ceaab34fce33596b3dd1e9d4ba7a5b2fc93095d7bd082617fa0ffe00a3c57.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3fb29677f11655e146350059a0834a31f6d22e26b23849d8a3f20b4585d6beb +size 42331 diff --git a/data/2025/2503_09xxx/2503.09595/images/7e1e125b014d5443ce72b21ee8652e2160e81fcf0acf07a108129a1ed596ddf4.jpg b/data/2025/2503_09xxx/2503.09595/images/7e1e125b014d5443ce72b21ee8652e2160e81fcf0acf07a108129a1ed596ddf4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..853095104f033a31ad2712b68d53ece37f255c83 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/7e1e125b014d5443ce72b21ee8652e2160e81fcf0acf07a108129a1ed596ddf4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fb543b1ee1f0617cd5b24adedf3c97ddaf37f2b51965157eb854a48cbb7f83a +size 11938 diff --git a/data/2025/2503_09xxx/2503.09595/images/7ecc93d403dcb06758ac993647a892395af1264eeca956ba139d2929c18192ba.jpg b/data/2025/2503_09xxx/2503.09595/images/7ecc93d403dcb06758ac993647a892395af1264eeca956ba139d2929c18192ba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b4e5e7e5efef70f3c35744eae2f333770e70293 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/7ecc93d403dcb06758ac993647a892395af1264eeca956ba139d2929c18192ba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31d519fb7b6480a19f59afa53e8f632141d34b2e67f05485ac6423676b2abd9c +size 51606 diff --git a/data/2025/2503_09xxx/2503.09595/images/8317550ad7aa20a11f3bf97aa716e1da5f6f471ce296a6b0a5da5eba39db5c52.jpg b/data/2025/2503_09xxx/2503.09595/images/8317550ad7aa20a11f3bf97aa716e1da5f6f471ce296a6b0a5da5eba39db5c52.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d4f53ef5de01c4fa64e3a732ca18659c1878c632 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/8317550ad7aa20a11f3bf97aa716e1da5f6f471ce296a6b0a5da5eba39db5c52.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12a2fff8c913798955712becfb0534c37f9bb8a8ef52e4056f341a213141b650 +size 29624 diff --git a/data/2025/2503_09xxx/2503.09595/images/8862d09f5523d53e73a41fca13423e9ac96d265eb77cc7788176d5f31c7abc8a.jpg b/data/2025/2503_09xxx/2503.09595/images/8862d09f5523d53e73a41fca13423e9ac96d265eb77cc7788176d5f31c7abc8a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c4a10d0e665a3f7719b2d021c52596019f36029c --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/8862d09f5523d53e73a41fca13423e9ac96d265eb77cc7788176d5f31c7abc8a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6245c67b0f45e38b0fd8496f86d8b9411f51de567705119d388055c22b3364a +size 27894 diff --git a/data/2025/2503_09xxx/2503.09595/images/88670abb85fb73081ca571f4fc556131bc94f22fa766b8d2ff19594c1ac79e8b.jpg b/data/2025/2503_09xxx/2503.09595/images/88670abb85fb73081ca571f4fc556131bc94f22fa766b8d2ff19594c1ac79e8b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3576fc4b640fe943761733e1076b08acf44b5149 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/88670abb85fb73081ca571f4fc556131bc94f22fa766b8d2ff19594c1ac79e8b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b25bed2cdd32d8440411713e8994e52ebc59c069ab80c77847a163eeb8522dc1 +size 17749 diff --git a/data/2025/2503_09xxx/2503.09595/images/8ae69436605c27369e44a6c3f836bbf91b30b4935fa8dcf15a6d4b282938dfee.jpg b/data/2025/2503_09xxx/2503.09595/images/8ae69436605c27369e44a6c3f836bbf91b30b4935fa8dcf15a6d4b282938dfee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aaa033a9a69263370c3653ca3832ee720e7c5018 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/8ae69436605c27369e44a6c3f836bbf91b30b4935fa8dcf15a6d4b282938dfee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:305d691d9733dfcadec9d4f85286f0782ba185a000971263740aa6fefac3933c +size 4268 diff --git a/data/2025/2503_09xxx/2503.09595/images/8cb3aa9d253fc5e51c44a557e5629610fa5ad282a6ce25364e557518e7b8d6c7.jpg b/data/2025/2503_09xxx/2503.09595/images/8cb3aa9d253fc5e51c44a557e5629610fa5ad282a6ce25364e557518e7b8d6c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2dbdcf2f6003ac88f6851a0fbebe75eaf43a7b29 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/8cb3aa9d253fc5e51c44a557e5629610fa5ad282a6ce25364e557518e7b8d6c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:088b68924449b546b0800e9125c54c888c90c3f69f9317f37ef4346d8b73e740 +size 53546 diff --git a/data/2025/2503_09xxx/2503.09595/images/8f522784783a329f5017e6371582ae27e84586d17a8f7bcd9025f339933400e5.jpg b/data/2025/2503_09xxx/2503.09595/images/8f522784783a329f5017e6371582ae27e84586d17a8f7bcd9025f339933400e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f51659de2c138d09c6fb76141d555e8a3b6d78fd --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/8f522784783a329f5017e6371582ae27e84586d17a8f7bcd9025f339933400e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98e66aa2538bb6f9bce9eb3435705a4a38b54c7da552692773cac32d4ab0434e +size 60905 diff --git a/data/2025/2503_09xxx/2503.09595/images/91f342955c4abb85297c05b6fbf080e1ae798b04587e39e4104a2985d585e4d7.jpg b/data/2025/2503_09xxx/2503.09595/images/91f342955c4abb85297c05b6fbf080e1ae798b04587e39e4104a2985d585e4d7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2af36088aad6caf9b34eba0cf3bc2772ede635af --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/91f342955c4abb85297c05b6fbf080e1ae798b04587e39e4104a2985d585e4d7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e517e339eeb3aa474783938d27af5f18e488f90b153c71ddd2763c4bab943301 +size 11028 diff --git a/data/2025/2503_09xxx/2503.09595/images/936b8ee3ceeca5f82d0050ae068b79e918e73267eaa429a43c2317f1f2323c25.jpg b/data/2025/2503_09xxx/2503.09595/images/936b8ee3ceeca5f82d0050ae068b79e918e73267eaa429a43c2317f1f2323c25.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cc91c375cea00bb501e7b768598b571eee4ebfcb --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/936b8ee3ceeca5f82d0050ae068b79e918e73267eaa429a43c2317f1f2323c25.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a9b995ab963bba4c9c4ad4d4bc6b9e157e3e9d721b9b76c7bbfc968773bbed2 +size 48867 diff --git a/data/2025/2503_09xxx/2503.09595/images/945c3d2b2e7ba5dd24811fc027b02c812dd3fdb62673324359a47eefccc06ed5.jpg b/data/2025/2503_09xxx/2503.09595/images/945c3d2b2e7ba5dd24811fc027b02c812dd3fdb62673324359a47eefccc06ed5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eaff790929c70ec16c8df0318adbd9eb60d9227d --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/945c3d2b2e7ba5dd24811fc027b02c812dd3fdb62673324359a47eefccc06ed5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a91cba57519f8ae6d4bb940d3fbd43ebaed64fe1a62b7ed188a041bae3236667 +size 44767 diff --git a/data/2025/2503_09xxx/2503.09595/images/99e34ae6711da1dd7f954b4f89af1eb173f91e59afdf52dfd382ee502dbf0eb6.jpg b/data/2025/2503_09xxx/2503.09595/images/99e34ae6711da1dd7f954b4f89af1eb173f91e59afdf52dfd382ee502dbf0eb6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d67b15254fde68f749ed3ea12b0df9acfa4320ad --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/99e34ae6711da1dd7f954b4f89af1eb173f91e59afdf52dfd382ee502dbf0eb6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eac08e72e92242466d721f25b0e1321d29d0b9720d03a4e5313a9d4c4c5264a4 +size 5799 diff --git a/data/2025/2503_09xxx/2503.09595/images/9a02ea6f114914c7d76f049a88a46de99ef271b776da7080f7929a05b2d82157.jpg b/data/2025/2503_09xxx/2503.09595/images/9a02ea6f114914c7d76f049a88a46de99ef271b776da7080f7929a05b2d82157.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bfbf0fc7058d5d4905593f087a93c615829dc7bf --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/9a02ea6f114914c7d76f049a88a46de99ef271b776da7080f7929a05b2d82157.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1913c874050552cd8da13051af321798004aee4c80842efdfc2125d54dff498 +size 4097 diff --git a/data/2025/2503_09xxx/2503.09595/images/9cb6168eacd785e4f741b3dbc66836cc1b13221d84cacac490669a7f161086a6.jpg b/data/2025/2503_09xxx/2503.09595/images/9cb6168eacd785e4f741b3dbc66836cc1b13221d84cacac490669a7f161086a6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..de9b22767c94bd1f9b906ccd3aa0ce685a4dc96a --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/9cb6168eacd785e4f741b3dbc66836cc1b13221d84cacac490669a7f161086a6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8771034ecc2df1b5326dd002154ee7d375c1fd90feba681ccaa4cb2dc4ca719 +size 10010 diff --git a/data/2025/2503_09xxx/2503.09595/images/a00eb2f6afd73e7890435740d404820471ec3c29a424ac89fa8f08f0f78a9a90.jpg b/data/2025/2503_09xxx/2503.09595/images/a00eb2f6afd73e7890435740d404820471ec3c29a424ac89fa8f08f0f78a9a90.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a0acb0a216df7b1c55dc625c330f9e620f85f68 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/a00eb2f6afd73e7890435740d404820471ec3c29a424ac89fa8f08f0f78a9a90.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:433c09151c5923760189a88e18c0072717e82d732dbd13a2e425213714edc3c0 +size 28443 diff --git a/data/2025/2503_09xxx/2503.09595/images/a17478f38168df434239dbbc27ca455093b6c1e40467cd5487d213bb5d83095d.jpg b/data/2025/2503_09xxx/2503.09595/images/a17478f38168df434239dbbc27ca455093b6c1e40467cd5487d213bb5d83095d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f950c2e5f3c3261d695f51e8a72349a87985f7ac --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/a17478f38168df434239dbbc27ca455093b6c1e40467cd5487d213bb5d83095d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5962c29e4c6fecc5482f088ecc32affc8fb9d74207a5ce5c134c2d50d8c749a +size 40113 diff --git a/data/2025/2503_09xxx/2503.09595/images/a79a3d2c12bca59c6526e7216d60bb7580bc8c63576be8552b2f29dfd5c346d2.jpg b/data/2025/2503_09xxx/2503.09595/images/a79a3d2c12bca59c6526e7216d60bb7580bc8c63576be8552b2f29dfd5c346d2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..befec60a6cb0e2142ca23b38a86de8abaf3455f2 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/a79a3d2c12bca59c6526e7216d60bb7580bc8c63576be8552b2f29dfd5c346d2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f91c7cda956be3b9f97a32d2010774478e84ad81ac3c95ca7602b4831864ddba +size 44962 diff --git a/data/2025/2503_09xxx/2503.09595/images/a7f340f4c1bc1798d01d2bcfa3aac741d7c979c6b80afc4910bba5d647c78286.jpg b/data/2025/2503_09xxx/2503.09595/images/a7f340f4c1bc1798d01d2bcfa3aac741d7c979c6b80afc4910bba5d647c78286.jpg new file mode 100644 index 0000000000000000000000000000000000000000..057f902bce55bc60910564fc148eb76184e61cda --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/a7f340f4c1bc1798d01d2bcfa3aac741d7c979c6b80afc4910bba5d647c78286.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51e3b56a442d40741489b532f68735f8d86cdbf5e7de40f3e3d4afa472d25fa6 +size 11028 diff --git a/data/2025/2503_09xxx/2503.09595/images/afb3cd5801f3ce6384f1620771da148f5537afbdc9f1890db3bb151e6321637a.jpg b/data/2025/2503_09xxx/2503.09595/images/afb3cd5801f3ce6384f1620771da148f5537afbdc9f1890db3bb151e6321637a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75eafa5d6338dfd2fc133e4cdf0bffced4536222 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/afb3cd5801f3ce6384f1620771da148f5537afbdc9f1890db3bb151e6321637a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dba95466ad7fc88103a69601a3d481403a7d12243fc314298be6ab0cf3b31703 +size 4270 diff --git a/data/2025/2503_09xxx/2503.09595/images/afd6a41eb9e3a6bfc87ee7d008d6fc6d00d3b71c75ccf91adf12e6a11f200f01.jpg b/data/2025/2503_09xxx/2503.09595/images/afd6a41eb9e3a6bfc87ee7d008d6fc6d00d3b71c75ccf91adf12e6a11f200f01.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f9ff6204bed4b58974f089bf59e95786278944b --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/afd6a41eb9e3a6bfc87ee7d008d6fc6d00d3b71c75ccf91adf12e6a11f200f01.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d4de4d10b185a3db10c996dd70d62f324b58a41d24d67d0785004c3523b9b3f +size 12972 diff --git a/data/2025/2503_09xxx/2503.09595/images/b39f635b4b2874364c63137dcdc9fc15f4eb68ba57cc4f852a463fac4a5c56f0.jpg b/data/2025/2503_09xxx/2503.09595/images/b39f635b4b2874364c63137dcdc9fc15f4eb68ba57cc4f852a463fac4a5c56f0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b4ee85d46d1bb006cbdeca9f956d4c08b93f1438 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/b39f635b4b2874364c63137dcdc9fc15f4eb68ba57cc4f852a463fac4a5c56f0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16b49613001b22b9b45c0aabc08115190c573a239b96a008f5353ebe67687885 +size 44136 diff --git a/data/2025/2503_09xxx/2503.09595/images/b68b172758804959f5687238c0b53d284f904543618dda06d81c04419d71a2ce.jpg b/data/2025/2503_09xxx/2503.09595/images/b68b172758804959f5687238c0b53d284f904543618dda06d81c04419d71a2ce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d90cd0cad2199379ce2b25c5ede18d651451894f --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/b68b172758804959f5687238c0b53d284f904543618dda06d81c04419d71a2ce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23277c223ef102c13ad8b4fbc06f03e9a6a939dc4b5f6e03f36edcc21a278996 +size 49537 diff --git a/data/2025/2503_09xxx/2503.09595/images/b87ce2810e76a242273a09ac0b48625aaea5cdd89ebf4f8ad8f5727f690a705a.jpg b/data/2025/2503_09xxx/2503.09595/images/b87ce2810e76a242273a09ac0b48625aaea5cdd89ebf4f8ad8f5727f690a705a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dfb56f49eaa729ad0b51a05ec98cfb8f8692e037 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/b87ce2810e76a242273a09ac0b48625aaea5cdd89ebf4f8ad8f5727f690a705a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:988839ccd2bd21feca411c6cd2fa0f2c68164b3fccac79cc71f105a8f7a055fa +size 5592 diff --git a/data/2025/2503_09xxx/2503.09595/images/baf79faae3094c985d469cccd5f8af1aef1f088b8c03e4b23c119ec86aebd807.jpg b/data/2025/2503_09xxx/2503.09595/images/baf79faae3094c985d469cccd5f8af1aef1f088b8c03e4b23c119ec86aebd807.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a4ffdff89605d96d67b7f01f3f025eb6eada058f --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/baf79faae3094c985d469cccd5f8af1aef1f088b8c03e4b23c119ec86aebd807.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc9cf6d799506b71900a212d189e96428fa65e60da18738dcc78fa4ed3bd7ab7 +size 43386 diff --git a/data/2025/2503_09xxx/2503.09595/images/c26bfecf28ebcde71af6d60da63253f75d02e0c5503f1425cbff24859ab09444.jpg b/data/2025/2503_09xxx/2503.09595/images/c26bfecf28ebcde71af6d60da63253f75d02e0c5503f1425cbff24859ab09444.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b47bcf205cca23995f38393c0439ed32070c88c7 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/c26bfecf28ebcde71af6d60da63253f75d02e0c5503f1425cbff24859ab09444.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d88219fe428bf84460292936bd64ded1f5bbe2a9f0e4a208e04a1f81c355ce0 +size 50166 diff --git a/data/2025/2503_09xxx/2503.09595/images/c3078aa13d861c89ebf70c3db430662cdcd8c83d8880aa9a934a258e5c3598e5.jpg b/data/2025/2503_09xxx/2503.09595/images/c3078aa13d861c89ebf70c3db430662cdcd8c83d8880aa9a934a258e5c3598e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d9248c3bc8d999384d46f3b1bbe10faacc9d3a9 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/c3078aa13d861c89ebf70c3db430662cdcd8c83d8880aa9a934a258e5c3598e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f5a105e8fc20b8c17f35608864e823d31408ca36bc32e132f6f8a832b047166 +size 37620 diff --git a/data/2025/2503_09xxx/2503.09595/images/c8f534dc359b0c6486ea7d10444e18fb31e91a7d173e50458a7fb898ac132de4.jpg b/data/2025/2503_09xxx/2503.09595/images/c8f534dc359b0c6486ea7d10444e18fb31e91a7d173e50458a7fb898ac132de4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d086d51a9f7c5b0317b7975ebb147cb3344697dd --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/c8f534dc359b0c6486ea7d10444e18fb31e91a7d173e50458a7fb898ac132de4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d888f345cf4c8b49c10fae3bfdce346b2dc52efa54302d5a7daca7b8e44c5737 +size 46301 diff --git a/data/2025/2503_09xxx/2503.09595/images/cd46345119bb9f763ede3215ff65c6a675244fe939c3245326654a7eb9c2c6b0.jpg b/data/2025/2503_09xxx/2503.09595/images/cd46345119bb9f763ede3215ff65c6a675244fe939c3245326654a7eb9c2c6b0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e1f02fa48904ca6bb3524589b8376985f6f3bab3 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/cd46345119bb9f763ede3215ff65c6a675244fe939c3245326654a7eb9c2c6b0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77d4660e38c1633929c81bd76be9b57232aeed742cc5a46bab57781aff730b27 +size 4836 diff --git a/data/2025/2503_09xxx/2503.09595/images/d0de139686427ffdf841921b45e326470ebe6a127e77cc98801376148a86c3b4.jpg b/data/2025/2503_09xxx/2503.09595/images/d0de139686427ffdf841921b45e326470ebe6a127e77cc98801376148a86c3b4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0c7217b3f22ae13ee787d255cbf70ad60495b434 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/d0de139686427ffdf841921b45e326470ebe6a127e77cc98801376148a86c3b4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6bc0836bdd75c5a3f203ef211a2750dc02689ad31b656c7087f7997ff85ff6e +size 26256 diff --git a/data/2025/2503_09xxx/2503.09595/images/d20954189b98d66653a95abacce5f2c333474ea3a756f9e105766842d1c52aaf.jpg b/data/2025/2503_09xxx/2503.09595/images/d20954189b98d66653a95abacce5f2c333474ea3a756f9e105766842d1c52aaf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34df03431d213f5d019a779a9a1584ba010466ed --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/d20954189b98d66653a95abacce5f2c333474ea3a756f9e105766842d1c52aaf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07b1c70e1770d396c3b7b415020ae4ec7aefbbd1956ec94bcd9f6125ed38194a +size 57660 diff --git a/data/2025/2503_09xxx/2503.09595/images/d24e9d610f63859bf566728e5b51c496af6f6f54f828b2cc2aae2a5186055f4c.jpg b/data/2025/2503_09xxx/2503.09595/images/d24e9d610f63859bf566728e5b51c496af6f6f54f828b2cc2aae2a5186055f4c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..206e7ee0f5202912a7db83ae45e33c65948fee8e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/d24e9d610f63859bf566728e5b51c496af6f6f54f828b2cc2aae2a5186055f4c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82b5fe82170849f3e6cf536f3c7765039a4f18b64e2f3d195d25aa4492430af4 +size 51253 diff --git a/data/2025/2503_09xxx/2503.09595/images/dad12d0b6072d76092e95ba4d3424ea053c4dfb1ed2ec31c20542fc94215138d.jpg b/data/2025/2503_09xxx/2503.09595/images/dad12d0b6072d76092e95ba4d3424ea053c4dfb1ed2ec31c20542fc94215138d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d58477b70b0ab8914727dccfa54b39779e0e2d9 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/dad12d0b6072d76092e95ba4d3424ea053c4dfb1ed2ec31c20542fc94215138d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf1aadc0cc79bd025824812ae0f1b6e8a4d0e435be98a6077aa6dcf548b409c4 +size 6906 diff --git a/data/2025/2503_09xxx/2503.09595/images/dad22c2ccf02d28f88996ada3f6458c8f7ba92408c341e39e6da0ed00ee05fbb.jpg b/data/2025/2503_09xxx/2503.09595/images/dad22c2ccf02d28f88996ada3f6458c8f7ba92408c341e39e6da0ed00ee05fbb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ed16a998452224d394af6d1065c68318f76bf6e0 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/dad22c2ccf02d28f88996ada3f6458c8f7ba92408c341e39e6da0ed00ee05fbb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6660aeffbd15ce96f0f7dfd02020a71f8dbc87716904d62c837e8f004e189fc +size 28956 diff --git a/data/2025/2503_09xxx/2503.09595/images/dbfaf034811ce5459e629284804f36c7a1d8b92410b0042ff3289fca3e7293e2.jpg b/data/2025/2503_09xxx/2503.09595/images/dbfaf034811ce5459e629284804f36c7a1d8b92410b0042ff3289fca3e7293e2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bfecdf13e100410b4b0a570d0fc71eb2f232db95 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/dbfaf034811ce5459e629284804f36c7a1d8b92410b0042ff3289fca3e7293e2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3da56f180d96bb335c0f94baa2030b41319af366fcaad8a76b5c1ad9c30d033e +size 7854 diff --git a/data/2025/2503_09xxx/2503.09595/images/dcf5758aab2710279523e38fe46165467a3e180eeef0ed0b3050233202412b9e.jpg b/data/2025/2503_09xxx/2503.09595/images/dcf5758aab2710279523e38fe46165467a3e180eeef0ed0b3050233202412b9e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..35958fc71c540a4163803956694500c023d8514d --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/dcf5758aab2710279523e38fe46165467a3e180eeef0ed0b3050233202412b9e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:639ed0692e82f2ce8ecea7eff3e6928750f1ee651b1a25925f563cf85d49ab83 +size 11347 diff --git a/data/2025/2503_09xxx/2503.09595/images/e39efd16222ce5ec5787e1ab30db7573f7cb53089f14019eb71cfee5130fa917.jpg b/data/2025/2503_09xxx/2503.09595/images/e39efd16222ce5ec5787e1ab30db7573f7cb53089f14019eb71cfee5130fa917.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ae8b9ba529585efe1ee1814b208afc43ad0ee331 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/e39efd16222ce5ec5787e1ab30db7573f7cb53089f14019eb71cfee5130fa917.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75a6de8b3153222eb319517577660cf14d5d425c7b1de5aefc04efbceedbafe3 +size 40542 diff --git a/data/2025/2503_09xxx/2503.09595/images/ebac98ec4b7c4b91f83432d9328234ee0cb2346246756bf3c6ef6bf977273ad0.jpg b/data/2025/2503_09xxx/2503.09595/images/ebac98ec4b7c4b91f83432d9328234ee0cb2346246756bf3c6ef6bf977273ad0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0866da8534250444200ecae50e5067e46b85ac2c --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/ebac98ec4b7c4b91f83432d9328234ee0cb2346246756bf3c6ef6bf977273ad0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c95f876c0c6fe36b719800b1bfa4a2fbc8e557ca59352ff6d1a9670a277f5e37 +size 4305 diff --git a/data/2025/2503_09xxx/2503.09595/images/ec04a35a42939e0e06b8bc1b07070b0c0c90ff460116ede97d74c46464f397b5.jpg b/data/2025/2503_09xxx/2503.09595/images/ec04a35a42939e0e06b8bc1b07070b0c0c90ff460116ede97d74c46464f397b5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d2cb11d16d4cd3c53fd2e0b8264224f2e521771 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/ec04a35a42939e0e06b8bc1b07070b0c0c90ff460116ede97d74c46464f397b5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa1dc7d0cc65a835e55f95e94cd74070438bd7a436ee46a3455f3a7266365fee +size 7692 diff --git a/data/2025/2503_09xxx/2503.09595/images/eceaeaf5277e4ba1350d29f94dfc4847af28e5a6642caf910ffe300f4e298d7c.jpg b/data/2025/2503_09xxx/2503.09595/images/eceaeaf5277e4ba1350d29f94dfc4847af28e5a6642caf910ffe300f4e298d7c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0d223b1555e4542ba5a4da44b271f25ce67a8f00 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/eceaeaf5277e4ba1350d29f94dfc4847af28e5a6642caf910ffe300f4e298d7c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b711170d41a4037b15fe5a0554b30590cbafcc9bccaa93e4ce639b949cd2cc6 +size 5416 diff --git a/data/2025/2503_09xxx/2503.09595/images/f52bf9a1a1b538e95d70fbf8e8072fe5c66e0fb960e1c0adc65706d6253840a0.jpg b/data/2025/2503_09xxx/2503.09595/images/f52bf9a1a1b538e95d70fbf8e8072fe5c66e0fb960e1c0adc65706d6253840a0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d965d7796308a29ae50a73a58e236978e3c1c7dc --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/f52bf9a1a1b538e95d70fbf8e8072fe5c66e0fb960e1c0adc65706d6253840a0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70e24181cc74c32db482f0bf2ec7dbc59e8f062baae91ff648575489ee1f3185 +size 41138 diff --git a/data/2025/2503_09xxx/2503.09595/images/f5955939eff061d366f9461ca64daddccce438bc7cf26e0ca0177e4bea14e76c.jpg b/data/2025/2503_09xxx/2503.09595/images/f5955939eff061d366f9461ca64daddccce438bc7cf26e0ca0177e4bea14e76c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c34510ae0606b344e34823e540283044ee550542 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/f5955939eff061d366f9461ca64daddccce438bc7cf26e0ca0177e4bea14e76c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0e2f2175c9057685e0f50ed41026258e6f0b90048fe3de39c92c487a5c9f04d +size 46747 diff --git a/data/2025/2503_09xxx/2503.09595/images/f96ae24b9af0730f8094994e180d7d5a6e32804e7b580a5dadd002836de5fced.jpg b/data/2025/2503_09xxx/2503.09595/images/f96ae24b9af0730f8094994e180d7d5a6e32804e7b580a5dadd002836de5fced.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f36544e5d3269871bc5133bb46a90026e08ffbd --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/f96ae24b9af0730f8094994e180d7d5a6e32804e7b580a5dadd002836de5fced.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ff2a5d4c04d8d68064ee209d5ad055bb05f2b14315e65f3a7de9f8bf50d910a +size 70419 diff --git a/data/2025/2503_09xxx/2503.09595/images/f9b6d84ca868eb7b320e5bf66a6830342a093523d4a8c01e2b0d2c6c9b32e833.jpg b/data/2025/2503_09xxx/2503.09595/images/f9b6d84ca868eb7b320e5bf66a6830342a093523d4a8c01e2b0d2c6c9b32e833.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f9960989af55ad423d09dd4d41fd83e85b9af8bf --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/f9b6d84ca868eb7b320e5bf66a6830342a093523d4a8c01e2b0d2c6c9b32e833.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44c31b8d243046c715b3871d811119cdcea15db9982969af06f782c59336a792 +size 35149 diff --git a/data/2025/2503_09xxx/2503.09595/images/fa39dd5c194a70cc51e8058e5369a1cc529a139d485f223e0720f97bfeb118d8.jpg b/data/2025/2503_09xxx/2503.09595/images/fa39dd5c194a70cc51e8058e5369a1cc529a139d485f223e0720f97bfeb118d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4cba29aad6c7cef707adbc2f695856a87c1ff080 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/fa39dd5c194a70cc51e8058e5369a1cc529a139d485f223e0720f97bfeb118d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b2e8ec9e6c04c29a6493e581325a92e1207423e8615bd7ed1e9da57dd29a98c +size 4360 diff --git a/data/2025/2503_09xxx/2503.09595/images/fb0eb57bfd7df70674700e7d467e475c936b995b0451b6a6746c321bb30fd4dc.jpg b/data/2025/2503_09xxx/2503.09595/images/fb0eb57bfd7df70674700e7d467e475c936b995b0451b6a6746c321bb30fd4dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..35490c3def86760b2929c40b8c945a1f8d4624c6 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/fb0eb57bfd7df70674700e7d467e475c936b995b0451b6a6746c321bb30fd4dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ba90b48132d09aa41e26de333b85adcf8b03304bcf45fb3647f1d0e18db25c6 +size 40194 diff --git a/data/2025/2503_09xxx/2503.09595/images/fdf0c1240713a0f765f0563462d434ced74c2fc826cdce4f827d1a1a50939cc7.jpg b/data/2025/2503_09xxx/2503.09595/images/fdf0c1240713a0f765f0563462d434ced74c2fc826cdce4f827d1a1a50939cc7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e708aa1dd7f89acebefe74bcf2647d47d424d38e --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/images/fdf0c1240713a0f765f0563462d434ced74c2fc826cdce4f827d1a1a50939cc7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd8e31419512b282482f6a7daabf57cc98c0ad4e9f5d4ed35dbecbdebfecfca3 +size 39275 diff --git a/data/2025/2503_09xxx/2503.09595/layout.json b/data/2025/2503_09xxx/2503.09595/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..7a700d154f5764a146e1ebf3aac6a5dc3cc65f84 --- /dev/null +++ b/data/2025/2503_09xxx/2503.09595/layout.json @@ -0,0 +1,17304 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 135, + 87, + 460, + 124 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 87, + 460, + 124 + ], + "spans": [ + { + "bbox": [ + 135, + 87, + 460, + 124 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 157, + 488, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 157, + 488, + 171 + ], + "spans": [ + { + "bbox": [ + 105, + 157, + 488, + 171 + ], + "type": "text", + "content": "Chenyu Li " + }, + { + "bbox": [ + 105, + 157, + 488, + 171 + ], + "type": "inline_equation", + "content": "^{*1}" + }, + { + "bbox": [ + 105, + 157, + 488, + 171 + ], + "type": "text", + "content": " Oscar Michel " + }, + { + "bbox": [ + 105, + 157, + 488, + 171 + ], + "type": "inline_equation", + "content": "^{*1}" + }, + { + "bbox": [ + 105, + 157, + 488, + 171 + ], + "type": "text", + "content": " Xichen Pan " + }, + { + "bbox": [ + 105, + 157, + 488, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 105, + 157, + 488, + 171 + ], + "type": "text", + "content": " Sainan Liu " + }, + { + "bbox": [ + 105, + 157, + 488, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 105, + 157, + 488, + 171 + ], + "type": "text", + "content": " Mike Roberts " + }, + { + "bbox": [ + 105, + 157, + 488, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 105, + 157, + 488, + 171 + ], + "type": "text", + "content": " Saining Xie" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 148, + 192, + 196, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 192, + 196, + 205 + ], + "spans": [ + { + "bbox": [ + 148, + 192, + 196, + 205 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 72, + 214, + 272, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 214, + 272, + 489 + ], + "spans": [ + { + "bbox": [ + 72, + 214, + 272, + 489 + ], + "type": "text", + "content": "Large-scale pre-trained video generation models excel in content creation but are not reliable as physically accurate world simulators out of the box. This work studies the process of posttraining these models for accurate world modeling through the lens of the simple, yet fundamental, physics task of modeling object freefall. We show state-of-the-art video generation models struggle with this basic task, despite their visually impressive outputs. To remedy this problem, we find that fine-tuning on a relatively small amount of simulated videos is effective in inducing the dropping behavior in the model, and we can further improve results through a novel reward modeling procedure we introduce. Our study also reveals key limitations of post-training in generalization and distribution modeling. Additionally, we release a benchmark for this task that may serve as a useful diagnostic tool for tracking physical accuracy in large-scale video generative model development. Code is available at this repository: https://github.com/vision-x-nyu/pisa-experiments." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 517, + 133, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 517, + 133, + 529 + ], + "spans": [ + { + "bbox": [ + 53, + 517, + 133, + 529 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 536, + 291, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 536, + 291, + 669 + ], + "spans": [ + { + "bbox": [ + 52, + 536, + 291, + 669 + ], + "type": "text", + "content": "Over the past year, video generation models have advanced significantly, inspiring visions of a future where these models could serve as realistic world models (Craik, 1967; LeCun, 2022; Hafner et al., 2019; 2023; Ha & Schmidhuber, 2018). State-of-the-art video generation models models exhibit impressive results in content creation (OpenAI, 2024; Kuaishou, 2024; Luma, 2024; Runway, 2024) and are already being used in advertising and filmmaking (Runway, 2025; NBC, 2025). These advancements have sparked a line of research that seeks to evolve these models from content creators to world simulators for embodied agents (Yang" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 675, + 290, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 675, + 290, + 696 + ], + "spans": [ + { + "bbox": [ + 52, + 675, + 290, + 696 + ], + "type": "text", + "content": "*Equal contribution, alphabetical order. 1New York University 2Intel Labs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 193, + 543, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 193, + 543, + 266 + ], + "spans": [ + { + "bbox": [ + 304, + 193, + 543, + 266 + ], + "type": "text", + "content": "et al., 2023; 2024b; Agarwal et al., 2025). However, accurate world modeling is considerably more challenging than creative content creation because looking \"good enough\" is not sufficient: generated pixels must faithfully represent a world state evolving in accordance with the laws of physics and visual perspective." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 270, + 544, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 270, + 544, + 428 + ], + "spans": [ + { + "bbox": [ + 303, + 270, + 544, + 428 + ], + "type": "text", + "content": "We find that although the generations of state-of-the-art models are impressive visually, these models still struggle to generate results that are accurate physically, even though these models are pretrained on internet-scale video data demonstrating a wide variety of complex physical interactions. The failure to ground and align visual generations to the laws of physics suggests that pretraining is not enough and a post-training stage is needed. Much like how pretrained Large Language Models (LLMs) need to be adapted through post-training before they can be useful conversational assistants, pretrained video generative models ought to be adapted through post-training before they can be deployed as physically accurate world simulators." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 303, + 433, + 544, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 433, + 544, + 672 + ], + "spans": [ + { + "bbox": [ + 303, + 433, + 544, + 672 + ], + "type": "text", + "content": "In this work, we rigorously examine the post-training process of video generation models by focusing on the simple yet fundamental physics task of modeling object freefall, which we find is highly challenging for state-of-the-art models. Specifically, we study an image-to-video" + }, + { + "bbox": [ + 303, + 433, + 544, + 672 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 303, + 433, + 544, + 672 + ], + "type": "text", + "content": " (I2V) scenario where the goal is to generate a video of an object falling and potentially colliding with other objects on the ground, starting from an initial image of the object suspended in midair. We chose to study this single task, rather than general physics ability as a whole, because its simplicity allows us to conduct controlled experiments that yield insights into the strengths and limitations of the post-training process, which we believe will become an increasingly important component of research in generative world modeling. Additionally, the simplicity of the dropping task allows it to be implemented in simulation which is desirable because it allows us to easily test the properties of dataset scaling, gives us access to ground truth annotations for evaluation, and gives us the ability to precisely manipulate the simulation environment for controlled experimentation." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 304, + 679, + 543, + 700 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 679, + 543, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 679, + 543, + 700 + ], + "type": "text", + "content": "1We discuss our decision to formulate this task in the image-to-video setting instead of the video-to-video setting in Appendix A." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 14, + 205, + 35, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 205, + 35, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 205, + 35, + 559 + ], + "type": "text", + "content": "arXiv:2503.09595v1 [cs.CV] 12 Mar 2025" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 64, + 293, + 245 + ], + "blocks": [ + { + "bbox": [ + 53, + 64, + 293, + 245 + ], + "lines": [ + { + "bbox": [ + 53, + 64, + 293, + 245 + ], + "spans": [ + { + "bbox": [ + 53, + 64, + 293, + 245 + ], + "type": "image", + "image_path": "19a56cf5e00b438278d859ed1f2a1e950888d74b178b001eab56b1dd9862790e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 256, + 543, + 301 + ], + "lines": [ + { + "bbox": [ + 51, + 256, + 543, + 301 + ], + "spans": [ + { + "bbox": [ + 51, + 256, + 543, + 301 + ], + "type": "text", + "content": "Figure 1. Our PISA (Physics-Informed Simulation and Alignment) evaluation framework includes a new video dataset, where objects are dropped in a variety of real-world (left) and synthetic (right) scenes. For visualization purposes, we depict object motion by overlaying multiple video frames in each image shown above. Our real-world videos enable us to evaluate the physical accuracy of generated video output, and our synthetic videos enable us to improve accuracy through the use of post-training alignment methods." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 302, + 64, + 542, + 245 + ], + "blocks": [ + { + "bbox": [ + 302, + 64, + 542, + 245 + ], + "lines": [ + { + "bbox": [ + 302, + 64, + 542, + 245 + ], + "spans": [ + { + "bbox": [ + 302, + 64, + 542, + 245 + ], + "type": "image", + "image_path": "00bcbfd531744498cbd9c4457e5710d02b6c2db7bb28bac2414f98156285e998.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 320, + 291, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 320, + 291, + 452 + ], + "spans": [ + { + "bbox": [ + 51, + 320, + 291, + 452 + ], + "type": "text", + "content": "Named after Galileo's famous dropping experiment, we introduce the PISA (Physics-Informed Simulation and Alignment) framework for studying physics post-training in the context of the dropping task. PISA includes new real and simulated video datasets, as shown in Figure 1, containing a diverse set of dropping scenarios. PISA also includes a set of task-specific metrics that focus on measuring physical accuracy. Our real-world videos and metrics enable us to evaluate the physical accuracy of generated video output, and our synthetic videos enable us to improve accuracy through a post-training process we introduce." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 458, + 292, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 458, + 292, + 662 + ], + "spans": [ + { + "bbox": [ + 51, + 458, + 292, + 662 + ], + "type": "text", + "content": "Our study reveals that current state-of-the-art video generative models struggle significantly with the task of physically accurate object dropping. Generated objects frequently exhibit impossible behaviors, such as floating midair, defying gravity, or failing to preserve realistic trajectories during freefall. However, we find that simple fine-tuning can be remarkably effective: fine-tuning an open-source model on a small dataset of just a few thousand samples enables it to vastly outperform state-of-the-art models in physical accuracy. We further observe that pretrained models are critical for success; models initialized randomly, without leveraging pretraining on large-scale video datasets, fail to achieve comparable results. We also introduce a novel framework for reward modeling that yields further improvement. We demonstrate that our reward learning system is highly flexible in that different reward functions can be chosen to target different axes of physical improvement." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 667, + 293, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 667, + 293, + 717 + ], + "spans": [ + { + "bbox": [ + 51, + 667, + 293, + 717 + ], + "type": "text", + "content": "Our analysis also reveals key limitations. First, we see that model performance degrades when tasked with scenarios outside the training distribution, such as objects dropping from unseen depths or heights. Additionally, while our post" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 320, + 542, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 320, + 542, + 357 + ], + "spans": [ + { + "bbox": [ + 302, + 320, + 542, + 357 + ], + "type": "text", + "content": "trained model generates object motion that is 3D-consistent and physically accurate, we observe misalignment between the generated and ground truth dropping time distribution." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 361, + 544, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 361, + 544, + 483 + ], + "spans": [ + { + "bbox": [ + 302, + 361, + 544, + 483 + ], + "type": "text", + "content": "These findings indicate that post-training is likely to be an essential component of future world modeling systems. The challenges we identify in this relatively simple task are likely to persist when modeling more sophisticated physical phenomena. By introducing the PISA framework and benchmark, we provide a useful diagnostic tool for researchers to test whether models are on the path to acquiring general physical abilities, as well as identify key limitations that researchers should be aware of when integrating new capabilities into their models through post-training." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 498, + 391, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 498, + 391, + 510 + ], + "spans": [ + { + "bbox": [ + 304, + 498, + 391, + 510 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 517, + 544, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 517, + 544, + 710 + ], + "spans": [ + { + "bbox": [ + 302, + 517, + 544, + 710 + ], + "type": "text", + "content": "Modeling Intuitive Physics. Intuitive physics refers to the innate or learned human capacity to make quick and accurate judgments about the physical properties and behaviors of objects in the world, such as their motion, stability, or interactions. This ability, present even in infancy (Spelke et al., 1992; Baillargeon, 2004; Battaglia et al., 2013), is crucial for navigating and understanding everyday life. Replicating intuitive physics is a foundational step toward creating systems that can interact effectively and safely in dynamic, real-world environments (Lake et al., 2017). Gravity, as a core component of intuitive physics, plays a pivotal role in both domains. It is one of the most universal and observable physical forces, shaping our expectations about object motion, stability, and interaction (Hamrick et al., 2016; Ullman et al., 2017). Many studies in cognitive science (Battaglia et al., 2013) and AI (Wu et al., 2015; Bear et al., 2021) have" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 45, + 499, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 45, + 499, + 56 + ], + "spans": [ + { + "bbox": [ + 96, + 45, + 499, + 56 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 68, + 291, + 104 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 68, + 291, + 104 + ], + "spans": [ + { + "bbox": [ + 52, + 68, + 291, + 104 + ], + "type": "text", + "content": "relied on physics engines to evaluate and model intuitive physics. Our work uses the Kubric engine (Greff et al., 2022) to generate training videos." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 109, + 292, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 109, + 292, + 337 + ], + "spans": [ + { + "bbox": [ + 53, + 109, + 292, + 337 + ], + "type": "text", + "content": "Video Generation Models as World Simulators. Video generation has long been an intriguing topic in computer vision, particularly in the context of predicting future frames (Srivastava et al., 2015; Xue et al., 2016). More recently, as large-scale generative models have become prominent, Yang et al. explored how a wide range of real-world dynamics and decision-making processes can be expressed in terms of video modeling (Yang et al., 2024b; 2023). The introduction of the Sora model (OpenAI, 2024) marked a leap in the quality of generated videos and ignited interest in leveraging such models as \"world simulators.\" Over the past year, numerous video generation models have emerged, some open-source (Zheng et al., 2024; Yang et al., 2024c; Jin et al., 2024; Agarwal et al., 2025) and others commercially available (Kuaishou, 2024; Luma, 2024; Runway, 2024; OpenAI, 2024). Related to our work, Kang et al. (Kang et al., 2024) study the extent to which video generation models learn generalizable laws of physics when trained on 2D data from a synthetic environment." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 342, + 292, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 342, + 292, + 582 + ], + "spans": [ + { + "bbox": [ + 53, + 342, + 292, + 582 + ], + "type": "text", + "content": "Evaluating Video Generation Models. Traditional image-based metrics for generative modeling, such Fréchet inception distance (FID) (Heusel et al., 2017) or inception score (IS) (Salimans et al., 2016), can be incorporated into the video domain, either by applying them on a frame-by-frame basis or by developing video-specific versions, such as Fréchet video distance (FVD) (Unterthiner et al., 2018). Going beyond distribution matching measures, several benchmarks have developed suites of metrics that aim to better evaluate the semantic or visual quality of generated videos. For example, V-Bench (Huang et al., 2024) offers a more granular evaluation by measuring video quality across multiple dimensions, such as with respect to subject consistency or spatial relationships. In physics, some works, such as VideoPhy (Bansal et al., 2024) and PhyGenBench (Meng et al., 2024), evaluate in the T2V setting by utilizing multimodal large language models (MLLM) to generate a VQA-based score. More recently, Cosmos (Agarwal et al., 2025) and Physics-IQ (Motamed et al., 2025), evaluate physics in the image-to-video and video-to-video settings." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 597, + 121, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 597, + 121, + 609 + ], + "spans": [ + { + "bbox": [ + 52, + 597, + 121, + 609 + ], + "type": "text", + "content": "3. PisaBench" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 617, + 290, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 617, + 290, + 654 + ], + "spans": [ + { + "bbox": [ + 52, + 617, + 290, + 654 + ], + "type": "text", + "content": "Our benchmark, PisaBench, examines the ability of video generative models to produce accurate physical phenomena by focusing on a straightforward dropping task." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 666, + 206, + 679 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 666, + 206, + 679 + ], + "spans": [ + { + "bbox": [ + 52, + 666, + 206, + 679 + ], + "type": "text", + "content": "3.1. Task Definition & Assumptions" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 685, + 291, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 685, + 291, + 710 + ], + "spans": [ + { + "bbox": [ + 52, + 685, + 291, + 710 + ], + "type": "text", + "content": "Our task can be summarized as follows: given an image of an object suspended in midair, generate a video of the object" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 318, + 67, + 526, + 266 + ], + "blocks": [ + { + "bbox": [ + 318, + 67, + 526, + 266 + ], + "lines": [ + { + "bbox": [ + 318, + 67, + 526, + 266 + ], + "spans": [ + { + "bbox": [ + 318, + 67, + 526, + 266 + ], + "type": "image", + "image_path": "1ac3de96698eccbe7ffd3f2cdeee9e6722a7a59125ffbd3af395c8b98701c5c7.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 327, + 272, + 518, + 283 + ], + "lines": [ + { + "bbox": [ + 327, + 272, + 518, + 283 + ], + "spans": [ + { + "bbox": [ + 327, + 272, + 518, + 283 + ], + "type": "text", + "content": "Figure 2. The setup for collecting real-world videos." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 303, + 542, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 303, + 542, + 388 + ], + "spans": [ + { + "bbox": [ + 304, + 303, + 542, + 388 + ], + "type": "text", + "content": "falling and colliding with the ground and potentially other objects. Since a video is an incomplete partial observation of the 4D world, we make a number of assumptions to constrain the task space. These assumptions are crucial for ensuring that our metrics are reliable signals for physical accuracy, since they are only approximations of task success computed from a single ground truth and generated video." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 393, + 544, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 393, + 544, + 513 + ], + "spans": [ + { + "bbox": [ + 304, + 393, + 544, + 513 + ], + "type": "text", + "content": "Specifically, we assume that the falling object is completely still in the initial frame, that only the force of gravity is acting on the object while it falls, and that the camera does not move. The first two assumptions are necessary for the image-to-video setting. Since we do not provide multiple frames as input, it is otherwise impossible to establish the initial velocity or acceleration of the falling object without these assumptions. The last assumption is necessary as our metrics derive from the motion of segmentation masks, which would be affected in the presence of camera motion." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 526, + 398, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 526, + 398, + 537 + ], + "spans": [ + { + "bbox": [ + 304, + 526, + 398, + 537 + ], + "type": "text", + "content": "3.2. Real World Data" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 310, + 551, + 426, + 647 + ], + "blocks": [ + { + "bbox": [ + 310, + 551, + 426, + 647 + ], + "lines": [ + { + "bbox": [ + 310, + 551, + 426, + 647 + ], + "spans": [ + { + "bbox": [ + 310, + 551, + 426, + 647 + ], + "type": "image", + "image_path": "75bc9f270274a876e0f2d9bb2c4307c38aafd436f796800aeee64fbd5f2d9b29.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 655, + 542, + 677 + ], + "lines": [ + { + "bbox": [ + 304, + 655, + 542, + 677 + ], + "spans": [ + { + "bbox": [ + 304, + 655, + 542, + 677 + ], + "type": "text", + "content": "Figure 3. Statistics of the real-world data: (a) number of objects in each video, (b) the proportions of different scenes in the videos." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 439, + 551, + 539, + 647 + ], + "blocks": [ + { + "bbox": [ + 439, + 551, + 539, + 647 + ], + "lines": [ + { + "bbox": [ + 439, + 551, + 539, + 647 + ], + "spans": [ + { + "bbox": [ + 439, + 551, + 539, + 647 + ], + "type": "image", + "image_path": "2a038e4df7899a3c6a7af3f168c48dd30504eb434167e140afe024da4ee58ef5.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 681, + 542, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 681, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 304, + 681, + 542, + 717 + ], + "type": "text", + "content": "Real World Videos. We collect a set of 361 real-world videos demonstrating the dropping task for evaluation. As is shown in Figure 4, the dataset includes a diverse set" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "spans": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 68, + 289, + 231 + ], + "blocks": [ + { + "bbox": [ + 56, + 68, + 289, + 231 + ], + "lines": [ + { + "bbox": [ + 56, + 68, + 289, + 231 + ], + "spans": [ + { + "bbox": [ + 56, + 68, + 289, + 231 + ], + "type": "image", + "image_path": "7d4ceaab34fce33596b3dd1e9d4ba7a5b2fc93095d7bd082617fa0ffe00a3c57.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 245, + 291, + 289 + ], + "lines": [ + { + "bbox": [ + 52, + 245, + 291, + 289 + ], + "spans": [ + { + "bbox": [ + 52, + 245, + 291, + 289 + ], + "type": "text", + "content": "Figure 4. Examples of various objects included in our dataset. For simulation, we utilize the GSO dataset (Downs et al., 2022), while for the real-world dataset, we curate our own set of common household objects." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 315, + 291, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 315, + 291, + 471 + ], + "spans": [ + { + "bbox": [ + 52, + 315, + 291, + 471 + ], + "type": "text", + "content": "of objects with different shapes and sizes, captured across various settings such as offices, kitchens, parks, and more (see Figure 3). Each video begins with an object suspended by an invisible wire in the first frame, which is necessary to enforce the assumption that objects are stationary at the start of the video. This assumption is required in our imaged-to-video setting; otherwise, the initial velocity of an object is ambiguous. We cut the video clips to begin as soon as the wire is released. We record the videos in slow-motion at 120 frames per second (fps) with cellphone cameras mounted on tripods to eliminate camera motion. An example of our video collection setup is shown in Figure 2. Additional details on our collection system are provided in Appendix H." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 476, + 291, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 476, + 291, + 559 + ], + "spans": [ + { + "bbox": [ + 52, + 476, + 291, + 559 + ], + "type": "text", + "content": "Simulated Test Videos. Since our post-training process uses a dataset of simulated videos, we also create a simulation test-set of 60 videos for understanding sim2real transfer. We create two splits of 30 videos each: one featuring objects and backgrounds seen during training, and the other featuring unseen objects and backgrounds. See Section 4.1 for details on how our simulated data is created." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 566, + 290, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 566, + 290, + 639 + ], + "spans": [ + { + "bbox": [ + 52, + 566, + 290, + 639 + ], + "type": "text", + "content": "Annotations. As is shown in Figure 5, we annotate each video with a caption and segmentation masks estimated from the SAM 2 (Ravi et al., 2024) video segmentation model. We create a descriptive caption for each object in the format of “{object description} falls.” This caption is used to provide context to the task when text input is supported." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 651, + 106, + 662 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 651, + 106, + 662 + ], + "spans": [ + { + "bbox": [ + 52, + 651, + 106, + 662 + ], + "type": "text", + "content": "3.3. Metrics" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 670, + 291, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 670, + 291, + 718 + ], + "spans": [ + { + "bbox": [ + 52, + 670, + 291, + 718 + ], + "type": "text", + "content": "We propose three metrics to assess the accuracy of trajectories, shape fidelity, and object permanence. Each of our metrics compare frames from the ground-truth video with the generated video. Further details about the metrics, including" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 305, + 64, + 543, + 248 + ], + "blocks": [ + { + "bbox": [ + 305, + 64, + 543, + 248 + ], + "lines": [ + { + "bbox": [ + 305, + 64, + 543, + 248 + ], + "spans": [ + { + "bbox": [ + 305, + 64, + 543, + 248 + ], + "type": "image", + "image_path": "fdf0c1240713a0f765f0563462d434ced74c2fc826cdce4f827d1a1a50939cc7.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 259, + 543, + 304 + ], + "lines": [ + { + "bbox": [ + 304, + 259, + 543, + 304 + ], + "spans": [ + { + "bbox": [ + 304, + 259, + 543, + 304 + ], + "type": "text", + "content": "Figure 5. Example of annotations in real-world data. For segmentation masks, we manually annotate first frame and utilize SAM 2 to produce segmentation masks across frames. For captions, we annotate “{object description} falls.” for all video segments." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 327, + 542, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 327, + 542, + 351 + ], + "spans": [ + { + "bbox": [ + 304, + 327, + 542, + 351 + ], + "type": "text", + "content": "their formulas and our resampling procedure for accounting for differences in fps, is described in Appendix B." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 357, + 543, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 357, + 543, + 405 + ], + "spans": [ + { + "bbox": [ + 304, + 357, + 543, + 405 + ], + "type": "text", + "content": "Trajectory L2. For each frame in both the generated video and ground truth, we calculate the centroid of the masked region. After doing this, we compute the average " + }, + { + "bbox": [ + 304, + 357, + 543, + 405 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 304, + 357, + 543, + 405 + ], + "type": "text", + "content": " distance between the centroids of corresponding frames." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 411, + 543, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 411, + 543, + 447 + ], + "spans": [ + { + "bbox": [ + 304, + 411, + 543, + 447 + ], + "type": "text", + "content": "Chamfer Distance (CD). To assess the shape fidelity of objects, we calculate the Chamfer Distance (CD) between the mask regions of the generated video and ground truth." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 453, + 543, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 453, + 543, + 501 + ], + "spans": [ + { + "bbox": [ + 304, + 453, + 543, + 501 + ], + "type": "text", + "content": "Intersection over Union (IoU). We use the Intersection over Union (IoU) metric to evaluate object permanence. The IoU measures objects' degree of overlap between the generated video and ground truth." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 514, + 405, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 514, + 405, + 525 + ], + "spans": [ + { + "bbox": [ + 304, + 514, + 405, + 525 + ], + "type": "text", + "content": "3.4. Evaluation Results" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 303, + 532, + 543, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 532, + 543, + 640 + ], + "spans": [ + { + "bbox": [ + 303, + 532, + 543, + 640 + ], + "type": "text", + "content": "We evaluate 4 open models including CogVideoX-5B-I2V(Yang et al., 2024c), DynamiCrafter(Xing et al., 2023), Pyramid-Flow(Jin et al., 2024), and Open-Sora-V1.2(Zheng et al., 2024), as well as 4 proprietary models including Sora (OpenAI, 2024), Kling-V1(Kuaishou, 2024), Kling-V1.5(Kuaishou, 2024), and Runway Gen3 (Runway, 2024). We also evaluate OpenSora post-trained through the processes of Supervised Fine-Tuning (PSFT) and Object Reward Optimization (ORO); see Section 4 for details." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 303, + 645, + 544, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 645, + 544, + 718 + ], + "spans": [ + { + "bbox": [ + 303, + 645, + 544, + 718 + ], + "type": "text", + "content": "The results of running the baseline models on the benchmark indicate a consistent failure to generate physically accurate dropping behavior, despite the visual realism of their generated frames. Qualitatively, we see common failure cases in Figure 6, such as implausible object deformations, floating, hallucination of new objects, and unrealistic special" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "spans": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 68, + 542, + 241 + ], + "blocks": [ + { + "bbox": [ + 53, + 68, + 542, + 241 + ], + "lines": [ + { + "bbox": [ + 53, + 68, + 542, + 241 + ], + "spans": [ + { + "bbox": [ + 53, + 68, + 542, + 241 + ], + "type": "image", + "image_path": "342a3b4296640d6e8ed1d72350bfb1237311d4fb53b1dc49759aa08ed93e6617.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 244, + 544, + 289 + ], + "lines": [ + { + "bbox": [ + 51, + 244, + 544, + 289 + ], + "spans": [ + { + "bbox": [ + 51, + 244, + 544, + 289 + ], + "type": "text", + "content": "Figure 6. Qualitative comparison of results on real test set (row 1-2), simulated seen test set (row 3-4) and simulated unseen test set (row 5-6). We present the results of popular open-source and commercially available models alongside those of models fine-tuned through our method. Existing models often struggle to generate videos depicting objects falling, whereas our PSFT method effectively introduces knowledge of free-fall into the model. ORO enables the model to more accurately learn object motion and shape." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 299, + 290, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 299, + 290, + 358 + ], + "spans": [ + { + "bbox": [ + 52, + 299, + 290, + 358 + ], + "type": "text", + "content": "effects. We further visualize a random subset of generated trajectories on the left of Figure 8. In many cases, the object remains completely static, and sometimes the object even moves upward. When downward motion is present, it is often slow or contains unrealistic horizontal movement." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 373, + 179, + 388 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 373, + 179, + 388 + ], + "spans": [ + { + "bbox": [ + 52, + 373, + 179, + 388 + ], + "type": "text", + "content": "4. Physics Post-Training" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 395, + 291, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 395, + 291, + 515 + ], + "spans": [ + { + "bbox": [ + 52, + 395, + 291, + 515 + ], + "type": "text", + "content": "We present a post-training process to address the limitations of current models described in Section 3.4. We utilize simulated videos that demonstrate realistic dropping behavior. Our approach for post-training is inspired by the two-stage pipeline consisting of supervised fine-tuning followed by reward modeling commonly used in LLMs. We find that our pipeline improves performance on both real and simulated evaluations, with greater gains observed in simulation. This is due to the sim-to-real gap, though our approach still shows substantial gains in transferring to real-world data." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 527, + 190, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 527, + 190, + 540 + ], + "spans": [ + { + "bbox": [ + 52, + 527, + 190, + 540 + ], + "type": "text", + "content": "4.1. Simulated Adaptation Data" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 546, + 291, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 546, + 291, + 713 + ], + "spans": [ + { + "bbox": [ + 52, + 546, + 291, + 713 + ], + "type": "text", + "content": "The first stage of our approach involves supervised fine-tuning. We use Kubric (Greff et al., 2022), a simulation and rendering engine designed for scalable video generation, to create simulated videos of objects dropping and colliding with other objects on the ground. Each video consists of 1-6 dropping objects onto a (possibly empty) pile of up to 4 objects underneath them. The videos are 2 seconds long, consisting of 32 frames at 16 fps. The objects are sourced from the Google Scanned Objects (GSO) dataset (Downs et al., 2022), which provides true-to-scale 3D models created from real-world scans across diverse categories (examples shown in Figure 4). The camera remains stationary in each video and is oriented parallel to the ground plane. To introduce variability, we randomly sample the camera height" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 299, + 542, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 299, + 542, + 347 + ], + "spans": [ + { + "bbox": [ + 304, + 299, + 542, + 347 + ], + "type": "text", + "content": "between 0.4 and 0.6 meters and position objects between 1 and 3 meters away from the camera, which corresponds to the distributions observed in the real-world dataset. More information about the dataset can be found in Appendix K." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 360, + 497, + 372 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 360, + 497, + 372 + ], + "spans": [ + { + "bbox": [ + 304, + 360, + 497, + 372 + ], + "type": "text", + "content": "4.2. Physics Supervised Fine-Tuning (PSFT)." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 306, + 384, + 419, + 465 + ], + "blocks": [ + { + "bbox": [ + 306, + 384, + 419, + 465 + ], + "lines": [ + { + "bbox": [ + 306, + 384, + 419, + 465 + ], + "spans": [ + { + "bbox": [ + 306, + 384, + 419, + 465 + ], + "type": "image", + "image_path": "9cb6168eacd785e4f741b3dbc66836cc1b13221d84cacac490669a7f161086a6.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 429, + 384, + 541, + 464 + ], + "blocks": [ + { + "bbox": [ + 429, + 384, + 541, + 464 + ], + "lines": [ + { + "bbox": [ + 429, + 384, + 541, + 464 + ], + "spans": [ + { + "bbox": [ + 429, + 384, + 541, + 464 + ], + "type": "image", + "image_path": "6adf02f646ee4da228177a9bb3d6b9810b033aeaac6c9f68056cd271eb9c47c2.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 306, + 466, + 418, + 545 + ], + "blocks": [ + { + "bbox": [ + 306, + 466, + 418, + 545 + ], + "lines": [ + { + "bbox": [ + 306, + 466, + 418, + 545 + ], + "spans": [ + { + "bbox": [ + 306, + 466, + 418, + 545 + ], + "type": "image", + "image_path": "91f342955c4abb85297c05b6fbf080e1ae798b04587e39e4104a2985d585e4d7.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 303, + 554, + 542, + 620 + ], + "lines": [ + { + "bbox": [ + 303, + 554, + 542, + 620 + ], + "spans": [ + { + "bbox": [ + 303, + 554, + 542, + 620 + ], + "type": "text", + "content": "Figure 7. Plots (a), (b), and (c) demonstrate that our metrics tend to improve with further training and that leveraging a pre-trained video diffusion model enhances performance compared to random initialization. In plot (d), the size of the training dataset varies in each training run (each consisting of 5k steps). With only 5k samples, we can achieve optimal results." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 429, + 465, + 541, + 545 + ], + "blocks": [ + { + "bbox": [ + 429, + 465, + 541, + 545 + ], + "lines": [ + { + "bbox": [ + 429, + 465, + 541, + 545 + ], + "spans": [ + { + "bbox": [ + 429, + 465, + 541, + 545 + ], + "type": "image", + "image_path": "a7f340f4c1bc1798d01d2bcfa3aac741d7c979c6b80afc4910bba5d647c78286.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 303, + 633, + 542, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 633, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 303, + 633, + 542, + 717 + ], + "type": "text", + "content": "We use the pretrained Open-Sora v1.2 (Zheng et al., 2024) model as our base model and fine-tune it on our simulated video dataset. We employ Open-Sora v1.2's rectified flow training objective without modification (Liu et al., 2022). Each fine-tuning experiment is conducted with a batch size of 128 and a learning rate of " + }, + { + "bbox": [ + 303, + 633, + 542, + 717 + ], + "type": "inline_equation", + "content": "1\\mathrm{e} - 4" + }, + { + "bbox": [ + 303, + 633, + 542, + 717 + ], + "type": "text", + "content": " on two 80GB NVIDIA A100 GPUs. As shown in Figure 6, fine-tuning with this" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "spans": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 53, + 57, + 542, + 275 + ], + "blocks": [ + { + "bbox": [ + 53, + 57, + 542, + 275 + ], + "lines": [ + { + "bbox": [ + 53, + 57, + 542, + 275 + ], + "spans": [ + { + "bbox": [ + 53, + 57, + 542, + 275 + ], + "type": "table", + "html": "
MethodRealSim (Seen)Sim (Unseen)
L2 (↓)CD (↓)IoU (↑)L2 (↓)CD (↓)IoU (↑)L2 (↓)CD (↓)IoU (↑)
ProprietarySora (OpenAI, 2024)0.1740.4880.0650.1490.4460.0400.1400.4190.031
Kling-V1 (Kuaishou, 2024)0.1570.4250.0560.1420.4150.0320.1450.4370.028
Kling-V1.5 (Kuaishou, 2024)0.1550.4240.0580.1370.3960.0330.1320.4050.029
Runway Gen3 (Runway, 2024)0.1870.5260.0420.1700.5090.0400.1490.4600.038
OpenCogVideoX-5B-I2V (Yang et al., 2024c)0.1380.3660.0800.1120.3150.0200.1010.2900.020
DynamiCrafter (Xing et al., 2023)0.1870.5040.0210.1570.4850.0390.1360.4300.033
Pyramid-Flow (Jin et al., 2024)0.1750.4850.0620.1260.3520.0590.1300.3810.048
Open-Sora (Zheng et al., 2024)0.1750.5020.0690.1390.4090.0360.1300.3680.034
OursOpen-Sora + PSFT (base)0.0760.1880.1390.0360.0880.1650.0280.0580.129
base + ORO (Seg)0.0750.1830.1420.0330.0760.1700.0320.0630.145
base + ORO (Flow)0.0670.1640.1360.0260.0620.1220.0220.0450.071
base + ORO (Depth)0.0670.1590.1290.0310.0720.1240.0220.0460.096
", + "image_path": "19982fa17713845a5d804164e55f0ffa9571cbf7063e297859e10dcd17cb35ca.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 282, + 544, + 339 + ], + "lines": [ + { + "bbox": [ + 51, + 282, + 544, + 339 + ], + "spans": [ + { + "bbox": [ + 51, + 282, + 544, + 339 + ], + "type": "text", + "content": "Table 1. PisaBench Evaluation Results. This table compares the performance of four proprietary models, four open models, and the models fine-tuned with PSFT and " + }, + { + "bbox": [ + 51, + 282, + 544, + 339 + ], + "type": "inline_equation", + "content": "\\mathrm{PSFT + ORO}" + }, + { + "bbox": [ + 51, + 282, + 544, + 339 + ], + "type": "text", + "content": " on our real-world and simulated test set which is decomposed into seen and unseen object splits. Across all metrics, our PSFT models outperform all other baselines, including proprietary models like Sora. Reward modeling further enhances results, with segmentation rewards improving the shape-based IoU metric and optical rewards and depth rewards enhancing the motion-based L2 and CD metrics. This suggests that rewards can be flexibly adjusted to target specific aspects of performance." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 51, + 357, + 291, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 357, + 291, + 561 + ], + "spans": [ + { + "bbox": [ + 51, + 357, + 291, + 561 + ], + "type": "text", + "content": "data alone is sufficient to induce realistic dropping behavior in the model. Quantitatively, our PSFT model substantially improves on both our simulated and real-world benchmark, as shown in Table 1. Dataset size. We conduct an ablation study on the number of training samples to understand the amount of data required for optimal performance on our benchmark. We create random subsets from 500 to 20,000 samples and train our model for 5,000 gradient steps on each subset. Notably, as shown in Figure 7, only 5,000 samples are needed to achieve optimal results. Effect of pretraining. Additionally, we investigate the impact of Open-Sora's pre-training on adaptation. We randomly initialize the Open-Sora's denoising network while keeping the pre-trained initialization of the compressor network and train the model on a dataset of 5k training samples. As shown in Figure 8, the learned knowledge from Open-Sora's pretraining plays a critical role in our task." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 567, + 290, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 567, + 290, + 627 + ], + "spans": [ + { + "bbox": [ + 51, + 567, + 290, + 627 + ], + "type": "text", + "content": "Overall, using PSFT on only 5k samples is sufficient to push Open-Sora's performance past all other evaluated models, including state-of-the-art commercial video generators, by a wide margin. This is made possible by leveraging the knowledge from the sufficiently pre-trained base model." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 639, + 230, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 639, + 230, + 651 + ], + "spans": [ + { + "bbox": [ + 52, + 639, + 230, + 651 + ], + "type": "text", + "content": "4.3. Object Reward Optimization (ORO)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 658, + 291, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 658, + 291, + 706 + ], + "spans": [ + { + "bbox": [ + 52, + 658, + 291, + 706 + ], + "type": "text", + "content": "In the second stage, we propose Object Reward Optimization (ORO) to use reward gradients to guide the video generation model toward generating videos where the object's motion and shape more closely align with the ground truth." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 310, + 358, + 541, + 461 + ], + "blocks": [ + { + "bbox": [ + 310, + 358, + 541, + 461 + ], + "lines": [ + { + "bbox": [ + 310, + 358, + 541, + 461 + ], + "spans": [ + { + "bbox": [ + 310, + 358, + 541, + 461 + ], + "type": "image", + "image_path": "2b0b1b1edbe72353a808756a9abec58b083266dae8f49a22cf820119f4e51b9d.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 303, + 473, + 542, + 540 + ], + "lines": [ + { + "bbox": [ + 303, + 473, + 542, + 540 + ], + "spans": [ + { + "bbox": [ + 303, + 473, + 542, + 540 + ], + "type": "text", + "content": "Figure 8. On the left, we plot random trajectories from the baseline models in Table 1. On the right, we show random trajectories from our fine-tuned model. The baseline trajectories exhibit unrealistic behavior, and most of them stay completely static. On the right, we see the trajectories consistently falling downward with collision and rolling behavior being modeled after the point of contact." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 548, + 544, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 548, + 544, + 633 + ], + "spans": [ + { + "bbox": [ + 303, + 548, + 544, + 633 + ], + "type": "text", + "content": "We follow the VADER framework from (Prabhudesai et al., 2024) and introduce three reward models. The differences between our approach and VADER include: (1) our reward model utilizes both generated videos and ground truth instead of generated videos and conditioning. (2) gradients propagate through all denoising time steps in fine-tuning. Consequently, the VADER objective is modified as follows:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 339, + 641, + 542, + 657 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 641, + 542, + 657 + ], + "spans": [ + { + "bbox": [ + 339, + 641, + 542, + 657 + ], + "type": "interline_equation", + "content": "J (\\theta) = \\mathbb {E} _ {\\left(x _ {0}, c\\right) \\sim \\mathcal {D}, x _ {0} ^ {\\prime} \\sim p _ {\\theta} \\left(x _ {0} ^ {\\prime} \\mid c\\right)} \\left[ R \\left(x _ {0} ^ {\\prime}, x _ {0}\\right) \\right] \\tag {1}", + "image_path": "b87ce2810e76a242273a09ac0b48625aaea5cdd89ebf4f8ad8f5727f690a705a.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 303, + 663, + 542, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 663, + 542, + 700 + ], + "spans": [ + { + "bbox": [ + 303, + 663, + 542, + 700 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 303, + 663, + 542, + 700 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 303, + 663, + 542, + 700 + ], + "type": "text", + "content": " is the ground truth dataset, " + }, + { + "bbox": [ + 303, + 663, + 542, + 700 + ], + "type": "inline_equation", + "content": "p_{\\theta}(.)" + }, + { + "bbox": [ + 303, + 663, + 542, + 700 + ], + "type": "text", + "content": " is a given video diffusion model, " + }, + { + "bbox": [ + 303, + 663, + 542, + 700 + ], + "type": "inline_equation", + "content": "x_0^{\\prime}, x_0 \\in \\mathbb{R}^{H \\times W \\times 3}" + }, + { + "bbox": [ + 303, + 663, + 542, + 700 + ], + "type": "text", + "content": " are generated video and ground truth, and " + }, + { + "bbox": [ + 303, + 663, + 542, + 700 + ], + "type": "inline_equation", + "content": "c \\in \\mathbb{R}^{H \\times W \\times 3}" + }, + { + "bbox": [ + 303, + 663, + 542, + 700 + ], + "type": "text", + "content": " is the initial image." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 705, + 542, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 705, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 304, + 705, + 542, + 717 + ], + "type": "text", + "content": "Segmentation Reward. We utilize SAM 2 (Ravi et al.," + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 45, + 499, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 45, + 499, + 56 + ], + "spans": [ + { + "bbox": [ + 96, + 45, + 499, + 56 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 67, + 290, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 67, + 290, + 117 + ], + "spans": [ + { + "bbox": [ + 52, + 67, + 290, + 117 + ], + "type": "text", + "content": "2024) to generate segmentation masks across frames for generated videos. We define segmentation reward as the IoU between the dropping object's mask in generated video and the mask from the ground truth simulated segmentation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 121, + 291, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 121, + 291, + 170 + ], + "spans": [ + { + "bbox": [ + 52, + 121, + 291, + 170 + ], + "type": "text", + "content": "Optical Flow Reward. We utilize RAFT (Teed & Deng, 2020) to generate generated video's optical flow " + }, + { + "bbox": [ + 52, + 121, + 291, + 170 + ], + "type": "inline_equation", + "content": "V^{\\mathrm{gen}}" + }, + { + "bbox": [ + 52, + 121, + 291, + 170 + ], + "type": "text", + "content": " and ground truth's optical flow " + }, + { + "bbox": [ + 52, + 121, + 291, + 170 + ], + "type": "inline_equation", + "content": "V^{\\mathrm{gt}}" + }, + { + "bbox": [ + 52, + 121, + 291, + 170 + ], + "type": "text", + "content": ". We define the optical flow reward as " + }, + { + "bbox": [ + 52, + 121, + 291, + 170 + ], + "type": "inline_equation", + "content": "R(x_0', x_0) = -|V^{\\mathrm{gen}} - V^{\\mathrm{gt}}|" + }, + { + "bbox": [ + 52, + 121, + 291, + 170 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 175, + 291, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 175, + 291, + 223 + ], + "spans": [ + { + "bbox": [ + 52, + 175, + 291, + 223 + ], + "type": "text", + "content": "Depth Reward. We utilize Depth-Anything-V2 (Yang et al., 2024a) to generate generated video's depth map " + }, + { + "bbox": [ + 52, + 175, + 291, + 223 + ], + "type": "inline_equation", + "content": "D^{\\mathrm{gen}}" + }, + { + "bbox": [ + 52, + 175, + 291, + 223 + ], + "type": "text", + "content": " and ground truth's depth map " + }, + { + "bbox": [ + 52, + 175, + 291, + 223 + ], + "type": "inline_equation", + "content": "D^{\\mathrm{gt}}" + }, + { + "bbox": [ + 52, + 175, + 291, + 223 + ], + "type": "text", + "content": ". We define the optical flow reward as " + }, + { + "bbox": [ + 52, + 175, + 291, + 223 + ], + "type": "inline_equation", + "content": "R(x_0', x_0) = -|D^{\\mathrm{gen}} - D^{\\mathrm{gt}}|" + }, + { + "bbox": [ + 52, + 175, + 291, + 223 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 228, + 279, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 228, + 279, + 241 + ], + "spans": [ + { + "bbox": [ + 52, + 228, + 279, + 241 + ], + "type": "text", + "content": "Details on implementation can be found in Appendix C." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 247, + 291, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 247, + 291, + 331 + ], + "spans": [ + { + "bbox": [ + 52, + 247, + 291, + 331 + ], + "type": "text", + "content": "We begin from the checkpoint of the first stage, which is trained on 5,000 samples trained over 5,000 gradient steps. We then fine-tune the model with ORO on the simulated dataset, using a batch size of 1 and two 80GB NVIDIA A100 GPUs for each fine-tuning experiment. We set a learning rate of " + }, + { + "bbox": [ + 52, + 247, + 291, + 331 + ], + "type": "inline_equation", + "content": "1\\mathrm{e} - 6" + }, + { + "bbox": [ + 52, + 247, + 291, + 331 + ], + "type": "text", + "content": " for segmentation reward and depth reward and " + }, + { + "bbox": [ + 52, + 247, + 291, + 331 + ], + "type": "inline_equation", + "content": "1\\mathrm{e} - 5" + }, + { + "bbox": [ + 52, + 247, + 291, + 331 + ], + "type": "text", + "content": " for optical flow." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 336, + 291, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 336, + 291, + 420 + ], + "spans": [ + { + "bbox": [ + 51, + 336, + 291, + 420 + ], + "type": "text", + "content": "As shown in Table 1, incorporating ORO in reward modeling further improves performance. Additionally, each reward function enhances the aspect of physicality that aligns with its intended purpose—segmentation rewards improve shape accuracy, while flow rewards and depth rewards improve motion accuracy. This demonstrates the process is both modular and interpretable." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 435, + 257, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 435, + 257, + 449 + ], + "spans": [ + { + "bbox": [ + 52, + 435, + 257, + 449 + ], + "type": "text", + "content": "5. Assessing Learned Physical Behavior" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 456, + 291, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 456, + 291, + 552 + ], + "spans": [ + { + "bbox": [ + 52, + 456, + 291, + 552 + ], + "type": "text", + "content": "Having introduced our post-training approaches in Section 4, we probe into the model's understanding of the interaction between gravity and perspective—the two laws that determine the dynamics of our videos. We first test if the learned physical behavior of our model can generalize to dropping heights and depths beyond its training distribution. Then, we study the ability of the model to learn the probability distribution induced by the uncertainty of perspective." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 565, + 267, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 565, + 267, + 578 + ], + "spans": [ + { + "bbox": [ + 52, + 565, + 267, + 578 + ], + "type": "text", + "content": "5.1. Generalization to Unseen Depths and Heights" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 583, + 291, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 583, + 291, + 656 + ], + "spans": [ + { + "bbox": [ + 52, + 583, + 291, + 656 + ], + "type": "text", + "content": "Depth and height are the main factors that affect the dynamics of a falling object in our videos. We can see this by combining the laws of gravity with perspective under our camera assumptions to model the object's image " + }, + { + "bbox": [ + 52, + 583, + 291, + 656 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 52, + 583, + 291, + 656 + ], + "type": "text", + "content": " coordinate as a function of time (further details on our coordinate system are described in Appendix G):" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 117, + 664, + 291, + 691 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 664, + 291, + 691 + ], + "spans": [ + { + "bbox": [ + 117, + 664, + 291, + 691 + ], + "type": "interline_equation", + "content": "y (t) = \\frac {f}{Z} \\left(Y _ {0} - \\frac {1}{2} g t ^ {2}\\right). \\tag {2}", + "image_path": "2af4f4a51a21c94069dd0ccc494f71591672a4c22044b52818a3462e358900fd.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 705, + 291, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 705, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 52, + 705, + 291, + 717 + ], + "type": "text", + "content": "From Equation (2), we see that the random variables that af" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 67, + 541, + 103 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 67, + 541, + 103 + ], + "spans": [ + { + "bbox": [ + 304, + 67, + 541, + 103 + ], + "type": "text", + "content": "fect object motion are " + }, + { + "bbox": [ + 304, + 67, + 541, + 103 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 304, + 67, + 541, + 103 + ], + "type": "text", + "content": " (depth) and " + }, + { + "bbox": [ + 304, + 67, + 541, + 103 + ], + "type": "inline_equation", + "content": "Y" + }, + { + "bbox": [ + 304, + 67, + 541, + 103 + ], + "type": "text", + "content": " (height) (the camera focal length " + }, + { + "bbox": [ + 304, + 67, + 541, + 103 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 304, + 67, + 541, + 103 + ], + "type": "text", + "content": " is fixed). Thus, we are interested in testing generalization on unseen values of " + }, + { + "bbox": [ + 304, + 67, + 541, + 103 + ], + "type": "inline_equation", + "content": "Y" + }, + { + "bbox": [ + 304, + 67, + 541, + 103 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 67, + 541, + 103 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 304, + 67, + 541, + 103 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 303, + 109, + 542, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 109, + 542, + 217 + ], + "spans": [ + { + "bbox": [ + 303, + 109, + 542, + 217 + ], + "type": "text", + "content": "We create a simulated test set in which a single object is dropped from varying depths and heights, using objects and backgrounds unseen during training. We uniformly sample depth and height values (in meters) from the Cartesian product of the ranges [1, 5] and [0.5, 2.5], respectively. The camera height is fixed at " + }, + { + "bbox": [ + 303, + 109, + 542, + 217 + ], + "type": "inline_equation", + "content": "0.5m" + }, + { + "bbox": [ + 303, + 109, + 542, + 217 + ], + "type": "text", + "content": ", and depth-height pairs outside the camera viewing frustum are discarded. A sample is in-distribution (ID) if its dropping depth and height both fall in the range [1, 3] and [0.5, 1.5]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 303, + 223, + 542, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 223, + 542, + 271 + ], + "spans": [ + { + "bbox": [ + 303, + 223, + 542, + 271 + ], + "type": "text", + "content": "Since we have access to the ground truth dropping time in simulation, we also employ a dropping time error, a metric we describe in Appendix B. Our analysis in Table 2 shows that performance degrades for out-of-distribution scenarios." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 303, + 277, + 542, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 277, + 542, + 325 + ], + "spans": [ + { + "bbox": [ + 303, + 277, + 542, + 325 + ], + "type": "text", + "content": "Since depth and height are the main physical quantities that affect falling dynamics, this finding indicates that our model may struggle to learn a fully generalizable law that accounts for the interaction of perspective and gravity." + } + ] + } + ], + "index": 16 + }, + { + "type": "table", + "bbox": [ + 305, + 335, + 542, + 381 + ], + "blocks": [ + { + "bbox": [ + 305, + 335, + 542, + 381 + ], + "lines": [ + { + "bbox": [ + 305, + 335, + 542, + 381 + ], + "spans": [ + { + "bbox": [ + 305, + 335, + 542, + 381 + ], + "type": "table", + "html": "
SettingL2 (↓)Chamfer (↓)IOU (↑)Time Error (↓)
ID0.0360.0880.1550.091
OOD0.0440.1430.0490.187
", + "image_path": "88670abb85fb73081ca571f4fc556131bc94f22fa766b8d2ff19594c1ac79e8b.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 388, + 542, + 434 + ], + "lines": [ + { + "bbox": [ + 304, + 388, + 542, + 434 + ], + "spans": [ + { + "bbox": [ + 304, + 388, + 542, + 434 + ], + "type": "text", + "content": "Table 2. Results of our metrics on in-distribution (ID) and out-of-distribution (OOD) depth-height combinations. The values used for depth range from " + }, + { + "bbox": [ + 304, + 388, + 542, + 434 + ], + "type": "inline_equation", + "content": "1 - 5\\mathrm{m}" + }, + { + "bbox": [ + 304, + 388, + 542, + 434 + ], + "type": "text", + "content": " (ID range [1,3]) and height values range from 0.5-2.5 (ID range [0.5, 1.5])." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 453, + 424, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 453, + 424, + 464 + ], + "spans": [ + { + "bbox": [ + 304, + 453, + 424, + 464 + ], + "type": "text", + "content": "5.2. Distributional Analysis" + } + ] + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 305, + 478, + 399, + 565 + ], + "blocks": [ + { + "bbox": [ + 305, + 478, + 399, + 565 + ], + "lines": [ + { + "bbox": [ + 305, + 478, + 399, + 565 + ], + "spans": [ + { + "bbox": [ + 305, + 478, + 399, + 565 + ], + "type": "image", + "image_path": "173d344ac811372831211d71e1f089dcd0ee90fa7170fbcfc6b6036661f678d4.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 303, + 577, + 542, + 622 + ], + "lines": [ + { + "bbox": [ + 303, + 577, + 542, + 622 + ], + "spans": [ + { + "bbox": [ + 303, + 577, + 542, + 622 + ], + "type": "text", + "content": "Figure 9. Demonstration of ambiguity in 2D perspective projections. Each of the three clouds appears the exact same in the camera's image. The right side shows how we perform a scale and translation augmentation to generate deliberately ambiguous data." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 403, + 477, + 541, + 565 + ], + "blocks": [ + { + "bbox": [ + 403, + 477, + 541, + 565 + ], + "lines": [ + { + "bbox": [ + 403, + 477, + 541, + 565 + ], + "spans": [ + { + "bbox": [ + 403, + 477, + 541, + 565 + ], + "type": "image", + "image_path": "dcf5758aab2710279523e38fe46165467a3e180eeef0ed0b3050233202412b9e.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 303, + 633, + 542, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 633, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 303, + 633, + 542, + 717 + ], + "type": "text", + "content": "The evolution of a physical system is not uniquely determined by a single initial image, since the lossy uncertainty of perspective induces a distribution of possible outcomes as shown in Figure 9. An ideal video world model should (1) output videos that are faithful to the evolution of some plausible world state and (2) provide accurate coverage across the entire distribution of the world that is possible from" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "spans": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 68, + 291, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 68, + 291, + 139 + ], + "spans": [ + { + "bbox": [ + 52, + 68, + 291, + 139 + ], + "type": "text", + "content": "its conditioning signal. In this section, we examine these two facets by studying " + }, + { + "bbox": [ + 52, + 68, + 291, + 139 + ], + "type": "inline_equation", + "content": "p(t|y)" + }, + { + "bbox": [ + 52, + 68, + 291, + 139 + ], + "type": "text", + "content": ": the distribution of dropping times possible from an object at coordinate " + }, + { + "bbox": [ + 52, + 68, + 291, + 139 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 52, + 68, + 291, + 139 + ], + "type": "text", + "content": " in the image plane. To do this, we create a simulated dataset that has a much wider distribution " + }, + { + "bbox": [ + 52, + 68, + 291, + 139 + ], + "type": "inline_equation", + "content": "p(t|y)" + }, + { + "bbox": [ + 52, + 68, + 291, + 139 + ], + "type": "text", + "content": " than our PSFT dataset. See Appendix F for more details on its construction." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 53, + 148, + 290, + 228 + ], + "blocks": [ + { + "bbox": [ + 53, + 148, + 290, + 228 + ], + "lines": [ + { + "bbox": [ + 53, + 148, + 290, + 228 + ], + "spans": [ + { + "bbox": [ + 53, + 148, + 290, + 228 + ], + "type": "image", + "image_path": "73a89a0c5b340af83339e7bdee6111a2850a250f52d7a51435656c3c43642fe2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 239, + 291, + 295 + ], + "lines": [ + { + "bbox": [ + 52, + 239, + 291, + 295 + ], + "spans": [ + { + "bbox": [ + 52, + 239, + 291, + 295 + ], + "type": "text", + "content": "Figure 10. Examples of model trajectories lifted to 3D. The blue line represents the height of the camera ray passing through the bottom of the dropping object as a function of depth. The set of possible dropping trajectories at a given depth are depicted in gray. The lifted trajectory of the model is depicted in green." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 53, + 308, + 289, + 420 + ], + "blocks": [ + { + "bbox": [ + 53, + 308, + 289, + 420 + ], + "lines": [ + { + "bbox": [ + 53, + 308, + 289, + 420 + ], + "spans": [ + { + "bbox": [ + 53, + 308, + 289, + 420 + ], + "type": "image", + "image_path": "8862d09f5523d53e73a41fca13423e9ac96d265eb77cc7788176d5f31c7abc8a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 434, + 291, + 479 + ], + "lines": [ + { + "bbox": [ + 52, + 434, + 291, + 479 + ], + "spans": [ + { + "bbox": [ + 52, + 434, + 291, + 479 + ], + "type": "text", + "content": "Figure 11. Visualizing " + }, + { + "bbox": [ + 52, + 434, + 291, + 479 + ], + "type": "inline_equation", + "content": "p(t|y)" + }, + { + "bbox": [ + 52, + 434, + 291, + 479 + ], + "type": "text", + "content": " misalignment for different images. Green shows the ground-truth CDF, orange is the 32-frame quantized version, and blue is the empirical CDF of 128 different samples of dropping times from the model." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 484, + 235, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 484, + 235, + 496 + ], + "spans": [ + { + "bbox": [ + 52, + 484, + 235, + 496 + ], + "type": "text", + "content": "Testing (1): 3D faithfulness of trajectories." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 502, + 291, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 502, + 291, + 670 + ], + "spans": [ + { + "bbox": [ + 52, + 502, + 291, + 670 + ], + "type": "text", + "content": "After training our model on this new dataset, we test whether its trajectories are consistent with a valid 3D world state. We first obtain an estimated dropping time from generated videos using the procedure described in Section 5.1. Using knowledge of the camera position, focal length, sensor width, and " + }, + { + "bbox": [ + 52, + 502, + 291, + 670 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 52, + 502, + 291, + 670 + ], + "type": "text", + "content": ", we can obtain an implied depth and height of the trajectory. We can then back-project the video trajectory to 3D and analyze whether they constitute physically accurate trajectories. We give further details about this process in Appendix G. As show in Figure 10, we find that our model's lifted trajectories consistently align with the 3D trajectory at the height and depth implied by its dropping time, giving evidence that the model's visual outputs are faithful to some plausible real-world state." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 675, + 212, + 688 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 675, + 212, + 688 + ], + "spans": [ + { + "bbox": [ + 52, + 675, + 212, + 688 + ], + "type": "text", + "content": "Testing (2): distributional alignment." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 693, + 290, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 693, + 290, + 718 + ], + "spans": [ + { + "bbox": [ + 52, + 693, + 290, + 718 + ], + "type": "text", + "content": "Going beyond the level of individual trajectories, we study the model's learned conditional distribution " + }, + { + "bbox": [ + 52, + 693, + 290, + 718 + ], + "type": "inline_equation", + "content": "p(t|y)" + }, + { + "bbox": [ + 52, + 693, + 290, + 718 + ], + "type": "text", + "content": ". We" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 67, + 542, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 67, + 542, + 139 + ], + "spans": [ + { + "bbox": [ + 304, + 67, + 542, + 139 + ], + "type": "text", + "content": "create 50 different initial images with differing values of " + }, + { + "bbox": [ + 304, + 67, + 542, + 139 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 304, + 67, + 542, + 139 + ], + "type": "text", + "content": ", generate 128 different videos from each, and estimate the dropping time in each video. Using the laws of gravity, the laws of perspective, and the assumption of uniform depth sampling in our dataset, we can analytically derive the probability " + }, + { + "bbox": [ + 304, + 67, + 542, + 139 + ], + "type": "inline_equation", + "content": "p(t|y)" + }, + { + "bbox": [ + 304, + 67, + 542, + 139 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 330, + 156, + 542, + 188 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 156, + 542, + 188 + ], + "spans": [ + { + "bbox": [ + 330, + 156, + 542, + 188 + ], + "type": "interline_equation", + "content": "p (t | y) = \\left\\{ \\begin{array}{l l} \\frac {g t}{\\left(Z _ {\\max } - Z _ {\\min }\\right) \\beta}, & t _ {\\min } \\leq t \\leq t _ {\\max } \\\\ 0, & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {3}", + "image_path": "ec04a35a42939e0e06b8bc1b07070b0c0c90ff460116ede97d74c46464f397b5.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 203, + 542, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 203, + 542, + 228 + ], + "spans": [ + { + "bbox": [ + 304, + 203, + 542, + 228 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 203, + 542, + 228 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 304, + 203, + 542, + 228 + ], + "type": "text", + "content": " is a constant that depends on " + }, + { + "bbox": [ + 304, + 203, + 542, + 228 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 304, + 203, + 542, + 228 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 203, + 542, + 228 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 304, + 203, + 542, + 228 + ], + "type": "text", + "content": " and the camera height. The derivation is given in Appendix E." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 233, + 542, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 233, + 542, + 424 + ], + "spans": [ + { + "bbox": [ + 304, + 233, + 542, + 424 + ], + "type": "text", + "content": "We then measure goodness-of-fit for each of the 50 experiments using the Kolmogorov-Smirnov (KS) test (Massey Jr, 1951). The null hypothesis of the KS test is that the two distributions being compared are equal, and we consider p-values less than 0.05 as evidence of misalignment. Since our measured times have limited precision and can only take 32 distinct values—due to estimating the contact frame—we approximate the ground truth " + }, + { + "bbox": [ + 304, + 233, + 542, + 424 + ], + "type": "inline_equation", + "content": "p(t|y)" + }, + { + "bbox": [ + 304, + 233, + 542, + 424 + ], + "type": "text", + "content": " using a Monte Carlo method. We sample 1000 values from the ground truth distribution and then quantized them into 32 bins corresponding to their frame, which we use as ground truth observations in the KS test. We find that in all 50/50 cases, the p-value from the test is less than 0.05, which provides evidence that the model does not learn the correct distribution of dropping times. We visualize the misalignment between the empirical CDF of the model's in Figure 11." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 430, + 542, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 430, + 542, + 478 + ], + "spans": [ + { + "bbox": [ + 304, + 430, + 542, + 478 + ], + "type": "text", + "content": "In summary, while our model's trajectories show promising tendencies to ground themselves to plausible 3D world states, the range of possible outputs from the model does not align with the ground truth distribution." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 494, + 376, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 494, + 376, + 506 + ], + "spans": [ + { + "bbox": [ + 304, + 494, + 376, + 506 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 303, + 514, + 542, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 514, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 303, + 514, + 542, + 717 + ], + "type": "text", + "content": "This work studies post-training as an avenue for adapting adapting pre-trained video generator into world models. We introduce a post-training strategy that is highly effective in aligning our model. Our work raises interesting insights into the learned distributions of generative models. Qualitatively, large scale image or video generative models appear to excel at generating likely samples from the data distribution, but this alone does not imply that they match the data distribution well in its entirety. As long as a model is able to generate likely samples, global distributional misalignment is not necessarily a problem for content creation. However, this problem becomes critical for world models, where alignment across the entire distribution is necessary for faithful world simulation. The insights revealed by our study, made possible by our constrained and tractable setting, indicate that although post-training improves per-sample accuracy, general distributional alignment remains unsolved." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "spans": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 66, + 145, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 66, + 145, + 79 + ], + "spans": [ + { + "bbox": [ + 53, + 66, + 145, + 79 + ], + "type": "text", + "content": "Acknowledgment" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 87, + 291, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 87, + 291, + 194 + ], + "spans": [ + { + "bbox": [ + 52, + 87, + 291, + 194 + ], + "type": "text", + "content": "We thank Boyang Zheng, Srivats Poddar, Ellis Brown, Shengbang Tong, Shusheng Yang, Jihan Yang, Daohan Lu, Anjali Gupta and Ziteng Wang for their help with data collection. We thank Jiraphon Yenphraphai for valuable assistance in setting up our simulation code. We thank Runway and Kling AI for providing API credit. SX also acknowledges support from Intel AI SRS, Korean AI Research Hub, Open Path AI Foundation, Amazon Research Award, Google TRC program, and NSF Award IIS-2443404." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 209, + 111, + 222 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 209, + 111, + 222 + ], + "spans": [ + { + "bbox": [ + 53, + 209, + 111, + 222 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 228, + 291, + 717 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 228, + 291, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 228, + 291, + 277 + ], + "spans": [ + { + "bbox": [ + 53, + 228, + 291, + 277 + ], + "type": "text", + "content": "Agarwal, N., Ali, A., Bala, M., Balaji, Y., Barker, E., Cai, T., Chattopadhyay, P., Chen, Y., Cui, Y., Ding, Y., et al. Cosmos world foundation model platform for physical AI. arXiv preprint arXiv:2501.03575, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 282, + 290, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 282, + 290, + 307 + ], + "spans": [ + { + "bbox": [ + 53, + 282, + 290, + 307 + ], + "type": "text", + "content": "Baillargeon, R. Infants' physical world. Current directions in psychological science, 13(3):89-94, 2004." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 54, + 313, + 291, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 313, + 291, + 361 + ], + "spans": [ + { + "bbox": [ + 54, + 313, + 291, + 361 + ], + "type": "text", + "content": "Bansal, H., Lin, Z., Xie, T., Zong, Z., Yarom, M., Bitton, Y., Jiang, C., Sun, Y., Chang, K.-W., and Grover, A. Videophy: Evaluating physical commonsense for video generation. arXiv preprint arXiv:2406.03520, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 54, + 368, + 291, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 368, + 291, + 415 + ], + "spans": [ + { + "bbox": [ + 54, + 368, + 291, + 415 + ], + "type": "text", + "content": "Battaglia, P. W., Hamrick, J. B., and Tenenbaum, J. B. Simulation as an engine of physical scene understanding. Proceedings of the National Academy of Sciences, 110 (45):18327-18332, 2013." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 54, + 422, + 291, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 422, + 291, + 482 + ], + "spans": [ + { + "bbox": [ + 54, + 422, + 291, + 482 + ], + "type": "text", + "content": "Bear, D. M., Wang, E., Mrowca, D., Binder, F. J., Tung, H.-Y. F., Pramod, R., Holdaway, C., Tao, S., Smith, K., Sun, F.-Y., et al. Physion: Evaluating physical prediction from vision in humans and machines. arXiv preprint arXiv:2106.08261, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 488, + 289, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 488, + 289, + 512 + ], + "spans": [ + { + "bbox": [ + 53, + 488, + 289, + 512 + ], + "type": "text", + "content": "Community, B. O. Blender - a 3d modelling and rendering package, 2018. URL http://www.blender.org." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 518, + 289, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 518, + 289, + 543 + ], + "spans": [ + { + "bbox": [ + 53, + 518, + 289, + 543 + ], + "type": "text", + "content": "Coumans, E. et al. Bullet physics engine. Open Source Software: http://bulletphysics.org, 1(3):84, 2010." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 548, + 289, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 548, + 289, + 572 + ], + "spans": [ + { + "bbox": [ + 53, + 548, + 289, + 572 + ], + "type": "text", + "content": "Craik, K. J. W. The nature of explanation, volume 445. CUP Archive, 1967." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 579, + 291, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 579, + 291, + 650 + ], + "spans": [ + { + "bbox": [ + 53, + 579, + 291, + 650 + ], + "type": "text", + "content": "Downs, L., Francis, A., Koenig, N., Kinman, B., Hickman, R., Reymann, K., McHugh, T. B., and Vanhoucke, V. Google scanned objects: A high-quality dataset of 3d scanned household items. In 2022 International Conference on Robotics and Automation (ICRA), pp. 2553-2560. IEEE, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 657, + 291, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 657, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 53, + 657, + 291, + 717 + ], + "type": "text", + "content": "Greff, K., Belletti, F., Beyer, L., Doersch, C., Du, Y., Duckworth, D., Fleet, D. J., Gnanapragasam, D., Golemo, F., Herrmann, C., et al. Kubric: A scalable dataset generator. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 3749-3761, 2022." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 306, + 67, + 543, + 717 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 306, + 67, + 543, + 103 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 67, + 543, + 103 + ], + "spans": [ + { + "bbox": [ + 306, + 67, + 543, + 103 + ], + "type": "text", + "content": "Ha, D. and Schmidhuber, J. Recurrent world models facilitate policy evolution. Advances in neural information processing systems, 31, 2018." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 112, + 542, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 112, + 542, + 148 + ], + "spans": [ + { + "bbox": [ + 306, + 112, + 542, + 148 + ], + "type": "text", + "content": "Hafner, D., Lillicrap, T., Ba, J., and Norouzi, M. Dream to control: Learning behaviors by latent imagination. arXiv preprint arXiv:1912.01603, 2019." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 156, + 542, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 156, + 542, + 191 + ], + "spans": [ + { + "bbox": [ + 306, + 156, + 542, + 191 + ], + "type": "text", + "content": "Hafner, D., Pasukonis, J., Ba, J., and Lillicrap, T. Mastering diverse domains through world models. arXiv preprint arXiv:2301.04104, 2023." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 201, + 542, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 201, + 542, + 236 + ], + "spans": [ + { + "bbox": [ + 306, + 201, + 542, + 236 + ], + "type": "text", + "content": "Hamrick, J. B., Battaglia, P. W., Griffiths, T. L., and Tenenbaum, J. B. Inferring mass in complex scenes by mental simulation. Cognition, 157:61-76, 2016." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 245, + 542, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 245, + 542, + 293 + ], + "spans": [ + { + "bbox": [ + 306, + 245, + 542, + 293 + ], + "type": "text", + "content": "Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., and Hochreiter, S. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 301, + 542, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 301, + 542, + 361 + ], + "spans": [ + { + "bbox": [ + 306, + 301, + 542, + 361 + ], + "type": "text", + "content": "Huang, Z., He, Y., Yu, J., Zhang, F., Si, C., Jiang, Y., Zhang, Y., Wu, T., Jin, Q., Chanpaisit, N., et al. Vbench: Comprehensive benchmark suite for video generative models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 21807-21818, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 370, + 542, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 370, + 542, + 418 + ], + "spans": [ + { + "bbox": [ + 306, + 370, + 542, + 418 + ], + "type": "text", + "content": "Jin, Y., Sun, Z., Li, N., Xu, K., Jiang, H., Zhuang, N., Huang, Q., Song, Y., Mu, Y., and Lin, Z. Pyramidal flow matching for efficient video generative modeling. arXiv preprint arXiv:2410.05954, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 426, + 542, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 426, + 542, + 474 + ], + "spans": [ + { + "bbox": [ + 306, + 426, + 542, + 474 + ], + "type": "text", + "content": "Kang, B., Yue, Y., Lu, R., Lin, Z., Zhao, Y., Wang, K., Huang, G., and Feng, J. How far is video generation from world model: A physical law perspective. arXiv preprint arXiv:2411.02385, 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 306, + 483, + 543, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 483, + 543, + 506 + ], + "spans": [ + { + "bbox": [ + 306, + 483, + 543, + 506 + ], + "type": "text", + "content": "Kuaishou. Kling, 2024. URL https://kling.kuaishou.com. Accessed: 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 306, + 515, + 542, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 515, + 542, + 552 + ], + "spans": [ + { + "bbox": [ + 306, + 515, + 542, + 552 + ], + "type": "text", + "content": "Lake, B. M., Ullman, T. D., Tenenbaum, J. B., and Gershman, S. J. Building machines that learn and think like people. Behavioral and brain sciences, 40:e253, 2017." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 306, + 559, + 542, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 559, + 542, + 594 + ], + "spans": [ + { + "bbox": [ + 306, + 559, + 542, + 594 + ], + "type": "text", + "content": "LeCun, Y. A path towards autonomous machine intelligence version 0.9.2, 2022-06-27. Open Review, 62(1):1-62, 2022." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 306, + 604, + 542, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 604, + 542, + 640 + ], + "spans": [ + { + "bbox": [ + 306, + 604, + 542, + 640 + ], + "type": "text", + "content": "Liu, X., Gong, C., and Liu, Q. Flow straight and fast: Learning to generate and transfer data with rectified flow. arXiv preprint arXiv:2209.03003, 2022." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 306, + 649, + 542, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 649, + 542, + 673 + ], + "spans": [ + { + "bbox": [ + 306, + 649, + 542, + 673 + ], + "type": "text", + "content": "Luma. Dream machine, 2024. URL https://lumalabs.ai/dream-machine. Accessed: 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 306, + 681, + 542, + 717 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 681, + 542, + 717 + ], + "spans": [ + { + "bbox": [ + 306, + 681, + 542, + 717 + ], + "type": "text", + "content": "Massey Jr, F. J. The kolmogorov-smirnov test for goodness of fit. Journal of the American statistical Association, 46 (253):68-78, 1951." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "spans": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "spans": [ + { + "bbox": [ + 294, + 731, + 301, + 740 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 67, + 293, + 718 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 67, + 293, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 67, + 293, + 128 + ], + "spans": [ + { + "bbox": [ + 53, + 67, + 293, + 128 + ], + "type": "text", + "content": "Meng, F., Liao, J., Tan, X., Shao, W., Lu, Q., Zhang, K., Cheng, Y., Li, D., Qiao, Y., and Luo, P. Towards world simulator: Crafting physical commonsense-based benchmark for video generation. arXiv preprint arXiv:2410.05363, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 135, + 293, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 135, + 293, + 183 + ], + "spans": [ + { + "bbox": [ + 53, + 135, + 293, + 183 + ], + "type": "text", + "content": "Motamed, S., Culp, L., Swersky, K., Jaini, P., and Geirhos, R. Do generative video models learn physical principles from watching videos? arXiv preprint arXiv:2501.09038, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 191, + 292, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 191, + 292, + 216 + ], + "spans": [ + { + "bbox": [ + 53, + 191, + 292, + 216 + ], + "type": "text", + "content": "NBC. Coca-Cola causes controversy with ai-made ad, 2025. Accessed: 2025-01-17." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 224, + 292, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 224, + 292, + 247 + ], + "spans": [ + { + "bbox": [ + 53, + 224, + 292, + 247 + ], + "type": "text", + "content": "OpenAI. Sora, 2024. URL https://sora.com. Accessed: 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 255, + 291, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 255, + 291, + 293 + ], + "spans": [ + { + "bbox": [ + 53, + 255, + 291, + 293 + ], + "type": "text", + "content": "Prabhudesai, M., Mendonca, R., Qin, Z., Fragkiadaki, K., and Pathak, D. Video diffusion alignment via reward gradients. arXiv preprint arXiv:2407.08737, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 300, + 292, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 300, + 292, + 384 + ], + "spans": [ + { + "bbox": [ + 53, + 300, + 292, + 384 + ], + "type": "text", + "content": "Ravi, N., Gabeur, V., Hu, Y.-T., Hu, R., Ryali, C., Ma, T., Khedr, H., Rädle, R., Rolland, C., Gustafson, L., Mintun, E., Pan, J., Alwala, K. V., Carion, N., Wu, C.-Y., Girshick, R., Dollár, P., and Feichtenhofer, C. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024. URL https://arxiv.org/abs/2408.00714." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 392, + 292, + 427 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 392, + 292, + 427 + ], + "spans": [ + { + "bbox": [ + 53, + 392, + 292, + 427 + ], + "type": "text", + "content": "Runway. Gen-3 alpha, 2024. URL https://runwayml.com/research/introducing-gen-3alpha. Accessed: 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 436, + 291, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 436, + 291, + 460 + ], + "spans": [ + { + "bbox": [ + 53, + 436, + 291, + 460 + ], + "type": "text", + "content": "Runway. AIFF 2025: AI Film Festival, 2025. URL https://aiff.runwayml.com/. Accessed: 2025-01-17." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 468, + 291, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 468, + 291, + 517 + ], + "spans": [ + { + "bbox": [ + 53, + 468, + 291, + 517 + ], + "type": "text", + "content": "Salimans, T., Goodfellow, I., Zaremba, W., Cheung, V., Radford, A., and Chen, X. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 525, + 291, + 560 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 525, + 291, + 560 + ], + "spans": [ + { + "bbox": [ + 53, + 525, + 291, + 560 + ], + "type": "text", + "content": "Spelke, E. S., Breinlinger, K., Macomber, J., and Jacobson, K. Origins of knowledge. Psychological review, 99(4): 605, 1992." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 568, + 292, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 568, + 292, + 616 + ], + "spans": [ + { + "bbox": [ + 53, + 568, + 292, + 616 + ], + "type": "text", + "content": "Srivastava, N., Mansimov, E., and Salakhudinov, R. Unsupervised learning of video representations using lstms. In International conference on machine learning, pp. 843-852. PMLR, 2015." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 625, + 292, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 625, + 292, + 685 + ], + "spans": [ + { + "bbox": [ + 53, + 625, + 292, + 685 + ], + "type": "text", + "content": "Teed, Z. and Deng, J. Raft: Recurrent all-pairs field transforms for optical flow. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part II 16, pp. 402-419. Springer, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 693, + 292, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 693, + 292, + 718 + ], + "spans": [ + { + "bbox": [ + 53, + 693, + 292, + 718 + ], + "type": "text", + "content": "Ullman, T. D., Spelke, E., Battaglia, P., and Tenenbaum, J. B. Mind games: Game engines as an architecture for" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 306, + 67, + 542, + 582 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 315, + 67, + 542, + 91 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 67, + 542, + 91 + ], + "spans": [ + { + "bbox": [ + 315, + 67, + 542, + 91 + ], + "type": "text", + "content": "intuitive physics. Trends in cognitive sciences, 21(9): 649-665, 2017." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 99, + 542, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 99, + 542, + 147 + ], + "spans": [ + { + "bbox": [ + 306, + 99, + 542, + 147 + ], + "type": "text", + "content": "Unterthiner, T., Van Steenkiste, S., Kurach, K., Marinier, R., Michalski, M., and Gelly, S. Towards accurate generative models of video: A new metric & challenges. arXiv preprint arXiv:1812.01717, 2018." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 155, + 542, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 155, + 542, + 203 + ], + "spans": [ + { + "bbox": [ + 306, + 155, + 542, + 203 + ], + "type": "text", + "content": "Wu, J., Yildirim, I., Lim, J. J., Freeman, B., and Tenenbaum, J. Galileo: Perceiving physical object properties by integrating a physics engine with deep learning. Advances in neural information processing systems, 28, 2015." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 210, + 542, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 210, + 542, + 258 + ], + "spans": [ + { + "bbox": [ + 306, + 210, + 542, + 258 + ], + "type": "text", + "content": "Xing, J., Xia, M., Zhang, Y., Chen, H., Yu, W., Liu, H., Wang, X., Wong, T.-T., and Shan, Y. Dynamiccafter: Animating open-domain images with video diffusion priors. arXiv preprint arXiv:2310.12190, 2023." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 266, + 542, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 266, + 542, + 315 + ], + "spans": [ + { + "bbox": [ + 306, + 266, + 542, + 315 + ], + "type": "text", + "content": "Xue, T., Wu, J., Bouman, K., and Freeman, B. Visual dynamics: Probabilistic future frame synthesis via cross convolutional networks. Advances in neural information processing systems, 29, 2016." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 322, + 542, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 322, + 542, + 357 + ], + "spans": [ + { + "bbox": [ + 306, + 322, + 542, + 357 + ], + "type": "text", + "content": "Yang, L., Kang, B., Huang, Z., Zhao, Z., Xu, X., Feng, J., and Zhao, H. Depth anything v2. arXiv:2406.09414, 2024a." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 365, + 542, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 365, + 542, + 403 + ], + "spans": [ + { + "bbox": [ + 306, + 365, + 542, + 403 + ], + "type": "text", + "content": "Yang, M., Du, Y., Ghasemipour, K., Tompson, J., Schuurmans, D., and Abbeel, P. Learning interactive real-world simulators. arXiv preprint arXiv:2310.06114, 2023." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 410, + 542, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 410, + 542, + 458 + ], + "spans": [ + { + "bbox": [ + 306, + 410, + 542, + 458 + ], + "type": "text", + "content": "Yang, S., Walker, J., Parker-Holder, J., Du, Y., Bruce, J., Barreto, A., Abbeel, P., and Schuurmans, D. Video as the new language for real-world decision making. arXiv preprint arXiv:2402.17139, 2024b." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 306, + 466, + 542, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 466, + 542, + 525 + ], + "spans": [ + { + "bbox": [ + 306, + 466, + 542, + 525 + ], + "type": "text", + "content": "Yang, Z., Teng, J., Zheng, W., Ding, M., Huang, S., Xu, J., Yang, Y., Hong, W., Zhang, X., Feng, G., et al. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072, 2024c." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 306, + 533, + 542, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 533, + 542, + 582 + ], + "spans": [ + { + "bbox": [ + 306, + 533, + 542, + 582 + ], + "type": "text", + "content": "Zheng, Z., Peng, X., Yang, T., Shen, C., Li, S., Liu, H., Zhou, Y., Li, T., and You, Y. Open-sora: Democratizing efficient video production for all, March 2024. URL https://github.com/hpcaitech/Open-Sora." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "spans": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 66, + 258, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 66, + 258, + 79 + ], + "spans": [ + { + "bbox": [ + 52, + 66, + 258, + 79 + ], + "type": "text", + "content": "A. Discussion of Image-to-Video setting." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 87, + 541, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 87, + 541, + 159 + ], + "spans": [ + { + "bbox": [ + 52, + 87, + 541, + 159 + ], + "type": "text", + "content": "We note that our choice of single-image input, as opposed to multi-frame input, comes with some trade-offs. We choose the image-to-video setting because it is widely supported among many different models, allowing us to make effective comparisons across the current state-of-the-art. However, only conditioning on a single frame introduces significant ambiguity. Due to the loss of information caused by projecting the 3D world through perspective, it may not be possible to directly infer the size of the object or its height. In practice, we find our metrics are still reliable signals of task success, but we still study the problem of ambiguity more extensively in Section 5.2." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 174, + 143, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 174, + 143, + 186 + ], + "spans": [ + { + "bbox": [ + 52, + 174, + 143, + 186 + ], + "type": "text", + "content": "B. Metric details." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 194, + 541, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 194, + 541, + 263 + ], + "spans": [ + { + "bbox": [ + 52, + 194, + 541, + 263 + ], + "type": "text", + "content": "We propose three metrics to assess the accuracy of trajectories, shape fidelity, and object permanence. Each of our metrics compare frames from the ground-truth video with the generated video. Because different models can operate at different fps, we perform fps alignment as part of our evaluation process. To perform fps alignment, we map each frame index of the generated videos to the ground truth using " + }, + { + "bbox": [ + 52, + 194, + 541, + 263 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{gen}}" + }, + { + "bbox": [ + 52, + 194, + 541, + 263 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 52, + 194, + 541, + 263 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{gt}}" + }, + { + "bbox": [ + 52, + 194, + 541, + 263 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 52, + 194, + 541, + 263 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{gen}}" + }, + { + "bbox": [ + 52, + 194, + 541, + 263 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 52, + 194, + 541, + 263 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{gt}}" + }, + { + "bbox": [ + 52, + 194, + 541, + 263 + ], + "type": "text", + "content": " are the fps of generated video and ground truth respectively. For " + }, + { + "bbox": [ + 52, + 194, + 541, + 263 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 52, + 194, + 541, + 263 + ], + "type": "text", + "content": "-th frame in the generated video, we find the corresponding aligned frame index " + }, + { + "bbox": [ + 52, + 194, + 541, + 263 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 52, + 194, + 541, + 263 + ], + "type": "text", + "content": " in the ground truth video:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 255, + 263, + 542, + 289 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 263, + 542, + 289 + ], + "spans": [ + { + "bbox": [ + 255, + 263, + 542, + 289 + ], + "type": "interline_equation", + "content": "j = \\operatorname {r o u n d} \\left(i \\cdot \\frac {f _ {\\text {g e n}}}{f _ {\\mathrm {g t}}}\\right) \\tag {4}", + "image_path": "ebac98ec4b7c4b91f83432d9328234ee0cb2346246756bf3c6ef6bf977273ad0.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 293, + 542, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 293, + 542, + 327 + ], + "spans": [ + { + "bbox": [ + 52, + 293, + 542, + 327 + ], + "type": "text", + "content": "Through fps alignment, we downsample the ground truth video to match the frame number of the generated video. We denote the downsampled ground truth as " + }, + { + "bbox": [ + 52, + 293, + 542, + 327 + ], + "type": "inline_equation", + "content": "\\{I_i^{\\mathrm{gt}}\\}_{i = 1}^N" + }, + { + "bbox": [ + 52, + 293, + 542, + 327 + ], + "type": "text", + "content": " and the generated video as " + }, + { + "bbox": [ + 52, + 293, + 542, + 327 + ], + "type": "inline_equation", + "content": "\\{I_i^{\\mathrm{gen}}\\}_{i = 1}^N" + }, + { + "bbox": [ + 52, + 293, + 542, + 327 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 52, + 293, + 542, + 327 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 52, + 293, + 542, + 327 + ], + "type": "text", + "content": " is the number of frames in the generated video." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 328, + 542, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 328, + 542, + 353 + ], + "spans": [ + { + "bbox": [ + 52, + 328, + 542, + 353 + ], + "type": "text", + "content": "Trajectory L2. For each frame in both the generated video and ground truth, we calculate the centroid of the masked region. We then compute " + }, + { + "bbox": [ + 52, + 328, + 542, + 353 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 52, + 328, + 542, + 353 + ], + "type": "text", + "content": " distance between the centroids of corresponding frames:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 238, + 360, + 542, + 393 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 360, + 542, + 393 + ], + "spans": [ + { + "bbox": [ + 238, + 360, + 542, + 393 + ], + "type": "interline_equation", + "content": "L _ {2} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\| C _ {i} ^ {\\text {g e n}} - C _ {i} ^ {\\mathrm {g t}} \\| _ {2} \\tag {5}", + "image_path": "74d2a95e99fc2fd73ad4d1df3a6192e0a7054af3d825efca106428f50d84a7b8.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 402, + 541, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 402, + 541, + 426 + ], + "spans": [ + { + "bbox": [ + 52, + 402, + 541, + 426 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 52, + 402, + 541, + 426 + ], + "type": "inline_equation", + "content": "C_i^{\\mathrm{gen}}, C_i^{\\mathrm{gt}} \\in \\mathbb{R}^2" + }, + { + "bbox": [ + 52, + 402, + 541, + 426 + ], + "type": "text", + "content": " are the centroids of the dropping object in the " + }, + { + "bbox": [ + 52, + 402, + 541, + 426 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 52, + 402, + 541, + 426 + ], + "type": "text", + "content": "-th frame of generated video and the ground truth respectively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 426, + 541, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 426, + 541, + 451 + ], + "spans": [ + { + "bbox": [ + 52, + 426, + 541, + 451 + ], + "type": "text", + "content": "Chamfer Distance (CD). To assess the shape fidelity of objects, we calculate the Chamfer Distance (CD) between the mask regions of the generated video and ground truth:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 156, + 459, + 436, + 493 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 459, + 436, + 493 + ], + "spans": [ + { + "bbox": [ + 156, + 459, + 436, + 493 + ], + "type": "interline_equation", + "content": "\\mathrm {C D} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left(\\frac {1}{| P _ {i} |} \\sum_ {p \\in P _ {i}} \\min _ {q \\in Q _ {i}} \\| p - q \\| _ {2} + \\frac {1}{| Q _ {i} |} \\sum_ {q \\in Q _ {i}} \\min _ {p \\in P _ {i}} \\| q - p \\| _ {2}\\right)", + "image_path": "7e1e125b014d5443ce72b21ee8652e2160e81fcf0acf07a108129a1ed596ddf4.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 502, + 541, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 502, + 541, + 528 + ], + "spans": [ + { + "bbox": [ + 52, + 502, + 541, + 528 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 52, + 502, + 541, + 528 + ], + "type": "inline_equation", + "content": "P_{i} = \\{p_{j}\\}_{j = 1}^{|P_{i}|}" + }, + { + "bbox": [ + 52, + 502, + 541, + 528 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 52, + 502, + 541, + 528 + ], + "type": "inline_equation", + "content": "Q_{i} = \\{q_{j}\\}_{j = 1}^{|Q_{i}|}" + }, + { + "bbox": [ + 52, + 502, + 541, + 528 + ], + "type": "text", + "content": " are the sets of mask points in the " + }, + { + "bbox": [ + 52, + 502, + 541, + 528 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 52, + 502, + 541, + 528 + ], + "type": "text", + "content": "-th frame of the generated video and ground truth respectively." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 528, + 541, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 528, + 541, + 552 + ], + "spans": [ + { + "bbox": [ + 52, + 528, + 541, + 552 + ], + "type": "text", + "content": "Intersection over Union (IoU). We use the Intersection over Union (IoU) metric to evaluate object permanence. IoU measures objects' degree of overlap between the generated video and ground truth. This is formulated as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 233, + 560, + 542, + 594 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 560, + 542, + 594 + ], + "spans": [ + { + "bbox": [ + 233, + 560, + 542, + 594 + ], + "type": "interline_equation", + "content": "\\mathrm {I o U} = \\frac {1}{| N |} \\sum_ {i = 1} ^ {N} \\frac {\\left| M _ {i} ^ {\\text {g e n}} \\cap M _ {i} ^ {\\mathrm {g t}} \\right|}{\\left| M _ {i} ^ {\\text {g e n}} \\cup M _ {i} ^ {\\mathrm {g t}} \\right|} \\tag {6}", + "image_path": "76e15cbc4824d4b47dfc7cac69adb8b782eaf0ac238e42bf738fa8fe88e04090.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 52, + 601, + 541, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 601, + 541, + 628 + ], + "spans": [ + { + "bbox": [ + 52, + 601, + 541, + 628 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 52, + 601, + 541, + 628 + ], + "type": "inline_equation", + "content": "M_{i}^{\\mathrm{gen}}" + }, + { + "bbox": [ + 52, + 601, + 541, + 628 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 52, + 601, + 541, + 628 + ], + "type": "inline_equation", + "content": "M_{i}^{\\mathrm{gt}} \\in \\{0,1\\}^{H\\times W}" + }, + { + "bbox": [ + 52, + 601, + 541, + 628 + ], + "type": "text", + "content": " are binary segmentation masks of the falling object in the " + }, + { + "bbox": [ + 52, + 601, + 541, + 628 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 52, + 601, + 541, + 628 + ], + "type": "text", + "content": "-th frame of the generated and ground truth videos respectively." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "spans": [ + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "text", + "content": "Time error. When testing on videos generated in simulation, we can provide a timing error. From the dropping height " + }, + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "inline_equation", + "content": "Y_{0}" + }, + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "text", + "content": " of the ground truth video, which we have access to from the simulator, we can derive " + }, + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "inline_equation", + "content": "t_{\\mathrm{drop}} = \\sqrt{Y_0\\frac{2}{g}}" + }, + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "text", + "content": ". We then obtain a dropping time from the model's output by estimating the frame of impact as the first frame " + }, + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "text", + "content": " whose centroid velocity in the " + }, + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "text", + "content": " direction is negative. If " + }, + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "inline_equation", + "content": "t_{\\mathrm{drop}}" + }, + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "text", + "content": " occurs in between " + }, + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "inline_equation", + "content": "F - 1" + }, + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "text", + "content": ", then we define the time error " + }, + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "inline_equation", + "content": "E_{\\mathrm{time}}" + }, + { + "bbox": [ + 52, + 632, + 541, + 694 + ], + "type": "text", + "content": " as zero. Otherwise, we define the time error as" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 200, + 694, + 542, + 721 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 694, + 542, + 721 + ], + "spans": [ + { + "bbox": [ + 200, + 694, + 542, + 721 + ], + "type": "interline_equation", + "content": "E _ {\\text {t i m e}} = \\min \\left(\\left| \\frac {F - 1}{\\mathrm {f p s}} - t _ {\\text {d r o p}} \\right|, \\left| \\frac {F}{\\mathrm {f p s}} - t _ {\\text {d r o p}} \\right|\\right). \\tag {7}", + "image_path": "dbfaf034811ce5459e629284804f36c7a1d8b92410b0042ff3289fca3e7293e2.jpg" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "spans": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 66, + 220, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 66, + 220, + 79 + ], + "spans": [ + { + "bbox": [ + 52, + 66, + 220, + 79 + ], + "type": "text", + "content": "C. ORO implementation details." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 87, + 542, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 87, + 542, + 111 + ], + "spans": [ + { + "bbox": [ + 52, + 87, + 542, + 111 + ], + "type": "text", + "content": "In our setting, we do not cut the gradient after step " + }, + { + "bbox": [ + 52, + 87, + 542, + 111 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 52, + 87, + 542, + 111 + ], + "type": "text", + "content": " like VADER. The gradient " + }, + { + "bbox": [ + 52, + 87, + 542, + 111 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta}R(x_0',x_0)" + }, + { + "bbox": [ + 52, + 87, + 542, + 111 + ], + "type": "text", + "content": " backpropagates through all diffusion timesteps and update the model weights " + }, + { + "bbox": [ + 52, + 87, + 542, + 111 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 52, + 87, + 542, + 111 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 213, + 118, + 542, + 152 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 213, + 118, + 542, + 152 + ], + "spans": [ + { + "bbox": [ + 213, + 118, + 542, + 152 + ], + "type": "interline_equation", + "content": "\\nabla_ {\\theta} \\left(R \\left(x _ {0} ^ {\\prime}, x _ {0}\\right)\\right) = \\sum_ {t = 0} ^ {T} \\frac {\\partial R \\left(x _ {0} ^ {\\prime} , x _ {0}\\right)}{\\partial x _ {t}} \\cdot \\frac {\\partial x _ {t}}{\\partial \\theta} \\tag {8}", + "image_path": "18f6ca9a4d35a17aace73c8956747dc9f04f552c0b87a2dd86040242c6bd19e8.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 159, + 212, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 159, + 212, + 171 + ], + "spans": [ + { + "bbox": [ + 52, + 159, + 212, + 171 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 52, + 159, + 212, + 171 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 52, + 159, + 212, + 171 + ], + "type": "text", + "content": " is the total diffusion timesteps." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 176, + 542, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 176, + 542, + 199 + ], + "spans": [ + { + "bbox": [ + 52, + 176, + 542, + 199 + ], + "type": "text", + "content": "Segmentation Reward. We utilize SAM 2 (Ravi et al., 2024) to generate segmentation masks across frames for generated video:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 254, + 210, + 542, + 224 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 254, + 210, + 542, + 224 + ], + "spans": [ + { + "bbox": [ + 254, + 210, + 542, + 224 + ], + "type": "interline_equation", + "content": "M ^ {\\text {g e n}} = \\operatorname {S A M} - 2 \\left(x _ {0}\\right) \\tag {9}", + "image_path": "6e609a608080271c51e72d3bb004d998e6ef365863e6486c69de7a14c67bf666.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 233, + 542, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 233, + 542, + 269 + ], + "spans": [ + { + "bbox": [ + 52, + 233, + 542, + 269 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 52, + 233, + 542, + 269 + ], + "type": "inline_equation", + "content": "M^{\\mathrm{gen}}" + }, + { + "bbox": [ + 52, + 233, + 542, + 269 + ], + "type": "text", + "content": " denotes the masks of the falling object in the generated video. We obtain ground truth masks " + }, + { + "bbox": [ + 52, + 233, + 542, + 269 + ], + "type": "inline_equation", + "content": "M^{\\mathrm{gt}}" + }, + { + "bbox": [ + 52, + 233, + 542, + 269 + ], + "type": "text", + "content": " using Kubric (Greff et al., 2022). To avoid non-differentiable reward, we use Sigmoid to normalize mask logits of generated video instead of converting them to binary masks. We use IoU between " + }, + { + "bbox": [ + 52, + 233, + 542, + 269 + ], + "type": "inline_equation", + "content": "M^{\\mathrm{gen}}" + }, + { + "bbox": [ + 52, + 233, + 542, + 269 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 52, + 233, + 542, + 269 + ], + "type": "inline_equation", + "content": "M^{\\mathrm{gt}}" + }, + { + "bbox": [ + 52, + 233, + 542, + 269 + ], + "type": "text", + "content": " as reward function:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 235, + 277, + 542, + 291 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 277, + 542, + 291 + ], + "spans": [ + { + "bbox": [ + 235, + 277, + 542, + 291 + ], + "type": "interline_equation", + "content": "R \\left(x _ {0} ^ {\\prime}, x _ {0}\\right) = \\operatorname {I o U} \\left(M ^ {\\text {g e n}}, M ^ {\\text {g t}}\\right) \\tag {10}", + "image_path": "cd46345119bb9f763ede3215ff65c6a675244fe939c3245326654a7eb9c2c6b0.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 300, + 362, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 300, + 362, + 312 + ], + "spans": [ + { + "bbox": [ + 52, + 300, + 362, + 312 + ], + "type": "text", + "content": "Maximizing objective 1 is equivalent to minimizing the following objective:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 192, + 319, + 542, + 335 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 319, + 542, + 335 + ], + "spans": [ + { + "bbox": [ + 192, + 319, + 542, + 335 + ], + "type": "interline_equation", + "content": "J (\\theta) = \\mathbb {E} _ {\\left(x _ {0}, c\\right) \\sim \\mathcal {D}, x _ {0} ^ {\\prime} \\sim p _ {\\theta} \\left(x _ {0} ^ {\\prime} \\mid c\\right)} \\left[ 1 - \\operatorname {I o U} \\left(M ^ {\\text {g e n}}, M ^ {\\text {g t}}\\right) \\right] \\tag {11}", + "image_path": "dad12d0b6072d76092e95ba4d3424ea053c4dfb1ed2ec31c20542fc94215138d.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 342, + 542, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 342, + 542, + 378 + ], + "spans": [ + { + "bbox": [ + 52, + 342, + 542, + 378 + ], + "type": "text", + "content": "This objective constrains the position and shape of the generated object in the video, encouraging a greater intersection with the object region in the ground truth video. The model learns to generate more accurate object positions and shapes through training with this objective." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 384, + 542, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 384, + 542, + 407 + ], + "spans": [ + { + "bbox": [ + 52, + 384, + 542, + 407 + ], + "type": "text", + "content": "Optical Flow Reward. We utilize RAFT (Teed & Deng, 2020) to generate optical flow for both generated videos and ground truth:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 257, + 407, + 542, + 425 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 407, + 542, + 425 + ], + "spans": [ + { + "bbox": [ + 257, + 407, + 542, + 425 + ], + "type": "interline_equation", + "content": "V ^ {\\text {g e n}} = \\operatorname {R A F T} \\left(x _ {0} ^ {\\prime}\\right) \\tag {12}", + "image_path": "afb3cd5801f3ce6384f1620771da148f5537afbdc9f1890db3bb151e6321637a.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 262, + 422, + 337, + 434 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 262, + 422, + 337, + 434 + ], + "spans": [ + { + "bbox": [ + 262, + 422, + 337, + 434 + ], + "type": "interline_equation", + "content": "V ^ {\\mathrm {g t}} = \\operatorname {R A F T} (x _ {0})", + "image_path": "75ae96615ee2cc27de037c2571b7e943cd4557143b31224230f453e92e8ee6c4.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 52, + 439, + 501, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 439, + 501, + 452 + ], + "spans": [ + { + "bbox": [ + 52, + 439, + 501, + 452 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 52, + 439, + 501, + 452 + ], + "type": "inline_equation", + "content": "V^{\\mathrm{gen}}" + }, + { + "bbox": [ + 52, + 439, + 501, + 452 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 52, + 439, + 501, + 452 + ], + "type": "inline_equation", + "content": "V^{\\mathrm{gt}}" + }, + { + "bbox": [ + 52, + 439, + 501, + 452 + ], + "type": "text", + "content": " denote the optical flows of generated videos and ground truth. We define the reward as follows:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 239, + 460, + 542, + 474 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 460, + 542, + 474 + ], + "spans": [ + { + "bbox": [ + 239, + 460, + 542, + 474 + ], + "type": "interline_equation", + "content": "R \\left(x _ {0} ^ {\\prime}, x _ {0}\\right) = - \\left| V ^ {\\text {g e n}} - V ^ {\\text {g t}} \\right| \\tag {13}", + "image_path": "9a02ea6f114914c7d76f049a88a46de99ef271b776da7080f7929a05b2d82157.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 52, + 481, + 362, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 481, + 362, + 494 + ], + "spans": [ + { + "bbox": [ + 52, + 481, + 362, + 494 + ], + "type": "text", + "content": "Maximizing objective 1 is equivalent to minimizing the following objective:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 208, + 502, + 542, + 517 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 502, + 542, + 517 + ], + "spans": [ + { + "bbox": [ + 208, + 502, + 542, + 517 + ], + "type": "interline_equation", + "content": "J (\\theta) = \\mathbb {E} _ {\\left(x _ {0}, c\\right) \\sim \\mathcal {D}, x _ {0} ^ {\\prime} \\sim p _ {\\theta} \\left(x _ {0} ^ {\\prime} \\mid c\\right)} \\left[ \\left| V ^ {\\text {g e n}} - V ^ {\\text {g t}} \\right| \\right] \\tag {14}", + "image_path": "6e1f9e94954a2f07537b844ab77f7db174e9dbc07feff0f2ba04ac58b383fd0c.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 52, + 524, + 542, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 524, + 542, + 548 + ], + "spans": [ + { + "bbox": [ + 52, + 524, + 542, + 548 + ], + "type": "text", + "content": "This objective constrains the motion of the generated object in the video. The model learns to generate more accurate physical motion through training with this objective." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 52, + 553, + 542, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 553, + 542, + 577 + ], + "spans": [ + { + "bbox": [ + 52, + 553, + 542, + 577 + ], + "type": "text", + "content": "Depth Reward. We utilize Depth-Anything-V2 (Yang et al., 2024a) to generate optical depth maps for both generated videos and ground truth:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 227, + 577, + 542, + 594 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 577, + 542, + 594 + ], + "spans": [ + { + "bbox": [ + 227, + 577, + 542, + 594 + ], + "type": "interline_equation", + "content": "D ^ {\\text {g e n}} = \\text {D e p t h - A n y t h i n g - V 2} \\left(x _ {0} ^ {\\prime}\\right) \\tag {15}", + "image_path": "eceaeaf5277e4ba1350d29f94dfc4847af28e5a6642caf910ffe300f4e298d7c.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 233, + 592, + 366, + 604 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 592, + 366, + 604 + ], + "spans": [ + { + "bbox": [ + 233, + 592, + 366, + 604 + ], + "type": "interline_equation", + "content": "D ^ {\\mathrm {g t}} = \\text {D e p t h - A n y t h i n g - V 2} (x _ {0})", + "image_path": "fa39dd5c194a70cc51e8058e5369a1cc529a139d485f223e0720f97bfeb118d8.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 52, + 609, + 497, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 609, + 497, + 622 + ], + "spans": [ + { + "bbox": [ + 52, + 609, + 497, + 622 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 52, + 609, + 497, + 622 + ], + "type": "inline_equation", + "content": "D^{\\mathrm{gen}}" + }, + { + "bbox": [ + 52, + 609, + 497, + 622 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 52, + 609, + 497, + 622 + ], + "type": "inline_equation", + "content": "D^{\\mathrm{gt}}" + }, + { + "bbox": [ + 52, + 609, + 497, + 622 + ], + "type": "text", + "content": " denote the depth maps of generated videos and ground truth. We define the reward as follows:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 239, + 630, + 542, + 643 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 630, + 542, + 643 + ], + "spans": [ + { + "bbox": [ + 239, + 630, + 542, + 643 + ], + "type": "interline_equation", + "content": "R \\left(x _ {0} ^ {\\prime}, x _ {0}\\right) = - \\left| D ^ {\\text {g e n}} - D ^ {\\mathrm {g t}} \\right| \\tag {16}", + "image_path": "8ae69436605c27369e44a6c3f836bbf91b30b4935fa8dcf15a6d4b282938dfee.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 52, + 651, + 362, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 651, + 362, + 663 + ], + "spans": [ + { + "bbox": [ + 52, + 651, + 362, + 663 + ], + "type": "text", + "content": "Maximizing objective 1 is equivalent to minimizing the following objective:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 208, + 672, + 542, + 687 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 672, + 542, + 687 + ], + "spans": [ + { + "bbox": [ + 208, + 672, + 542, + 687 + ], + "type": "interline_equation", + "content": "J (\\theta) = \\mathbb {E} _ {\\left(x _ {0}, c\\right) \\sim \\mathcal {D}, x _ {0} ^ {\\prime} \\sim p _ {\\theta} \\left(x _ {0} ^ {\\prime} \\mid c\\right)} \\left[ \\left| D ^ {\\mathrm {g e n}} - D ^ {\\mathrm {g t}} \\right| \\right] \\tag {17}", + "image_path": "99e34ae6711da1dd7f954b4f89af1eb173f91e59afdf52dfd382ee502dbf0eb6.jpg" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 52, + 693, + 542, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 693, + 542, + 718 + ], + "spans": [ + { + "bbox": [ + 52, + 693, + 542, + 718 + ], + "type": "text", + "content": "This objective constrains the 3d motion of the generated object in the video. The model learns to generate more accurate 3d physical motion through training with this objective." + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "spans": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 66, + 167, + 80 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 66, + 167, + 80 + ], + "spans": [ + { + "bbox": [ + 52, + 66, + 167, + 80 + ], + "type": "text", + "content": "D. Coordinate system" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 87, + 543, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 87, + 543, + 148 + ], + "spans": [ + { + "bbox": [ + 51, + 87, + 543, + 148 + ], + "type": "text", + "content": "We give a visualization of the coordinate system used in this paper in Figure 12. To compute " + }, + { + "bbox": [ + 51, + 87, + 543, + 148 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 51, + 87, + 543, + 148 + ], + "type": "text", + "content": ", we first leverage a segmentation map and find pixel row index that is just below the object. Once this row index is found, " + }, + { + "bbox": [ + 51, + 87, + 543, + 148 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 51, + 87, + 543, + 148 + ], + "type": "text", + "content": " can easily be computed from the camera position, camera sensor size, and image resolution. We note that because our camera is assumed to be in perspective with the " + }, + { + "bbox": [ + 51, + 87, + 543, + 148 + ], + "type": "inline_equation", + "content": "XY" + }, + { + "bbox": [ + 51, + 87, + 543, + 148 + ], + "type": "text", + "content": " plane, we can ignore " + }, + { + "bbox": [ + 51, + 87, + 543, + 148 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 51, + 87, + 543, + 148 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 51, + 87, + 543, + 148 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 51, + 87, + 543, + 148 + ], + "type": "text", + "content": " (not shown in figure) in our analyses in Section 5.1 and Section 5.2." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 53, + 158, + 203, + 323 + ], + "blocks": [ + { + "bbox": [ + 53, + 158, + 203, + 323 + ], + "lines": [ + { + "bbox": [ + 53, + 158, + 203, + 323 + ], + "spans": [ + { + "bbox": [ + 53, + 158, + 203, + 323 + ], + "type": "image", + "image_path": "afd6a41eb9e3a6bfc87ee7d008d6fc6d00d3b71c75ccf91adf12e6a11f200f01.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 340, + 544, + 365 + ], + "lines": [ + { + "bbox": [ + 51, + 340, + 544, + 365 + ], + "spans": [ + { + "bbox": [ + 51, + 340, + 544, + 365 + ], + "type": "text", + "content": "Figure 12. A visualization of the coordinate system used in this paper (not to scale). The image plane height of the object is denoted as " + }, + { + "bbox": [ + 51, + 340, + 544, + 365 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 51, + 340, + 544, + 365 + ], + "type": "text", + "content": ", its actual height in 3D as " + }, + { + "bbox": [ + 51, + 340, + 544, + 365 + ], + "type": "inline_equation", + "content": "Y" + }, + { + "bbox": [ + 51, + 340, + 544, + 365 + ], + "type": "text", + "content": ", and its depth as " + }, + { + "bbox": [ + 51, + 340, + 544, + 365 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 51, + 340, + 544, + 365 + ], + "type": "text", + "content": ". The camera focal length is denoted as " + }, + { + "bbox": [ + 51, + 340, + 544, + 365 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 51, + 340, + 544, + 365 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 230, + 157, + 538, + 326 + ], + "blocks": [ + { + "bbox": [ + 230, + 157, + 538, + 326 + ], + "lines": [ + { + "bbox": [ + 230, + 157, + 538, + 326 + ], + "spans": [ + { + "bbox": [ + 230, + 157, + 538, + 326 + ], + "type": "image", + "image_path": "0492c7160061c2167d23b2371de275bf324606389678afb1b4fcc06512b614c9.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 390, + 168, + 404 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 390, + 168, + 404 + ], + "spans": [ + { + "bbox": [ + 52, + 390, + 168, + 404 + ], + "type": "text", + "content": "E. Derivation of " + }, + { + "bbox": [ + 52, + 390, + 168, + 404 + ], + "type": "inline_equation", + "content": "p(t|y)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "spans": [ + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "text", + "content": "In our dataset construction, we assume a uniform distribution for " + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "inline_equation", + "content": "Z \\sim \\mathcal{U}(Z_{\\min}, Z_{\\max})" + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "inline_equation", + "content": "Z_{\\min} = 2" + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "inline_equation", + "content": "Z_{\\max} = 18" + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "text", + "content": ". As shown in Figure 12, the dropping height " + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "inline_equation", + "content": "Y" + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "text", + "content": " is a linear function of " + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "text", + "content": ", i.e. " + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "inline_equation", + "content": "Y = y + \\beta Z" + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "text", + "content": " for the slope " + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "text", + "content": " that can be computed from " + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "inline_equation", + "content": "y, f" + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "text", + "content": ", the sensor size, and the camera height. This means we can solve for dropping time as " + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "inline_equation", + "content": "t = \\sqrt{\\frac{2}{g}Y} = \\sqrt{\\frac{2}{g}(y + \\beta Z)}" + }, + { + "bbox": [ + 52, + 411, + 543, + 467 + ], + "type": "text", + "content": ". Applying the transformation rule for probability density yields" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 203, + 471, + 542, + 502 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 471, + 542, + 502 + ], + "spans": [ + { + "bbox": [ + 203, + 471, + 542, + 502 + ], + "type": "interline_equation", + "content": "p (t | y) = \\left\\{ \\begin{array}{l l} \\frac {g t}{\\left(Z _ {\\max } - Z _ {\\min }\\right) \\beta}, & t _ {\\min } \\leq t \\leq t _ {\\max } \\\\ 0, & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {18}", + "image_path": "305b266d41310514e36020946f50f0d4e84d78dde1d04ab214ef5e367fadf418.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 508, + 541, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 508, + 541, + 529 + ], + "spans": [ + { + "bbox": [ + 51, + 508, + 541, + 529 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 51, + 508, + 541, + 529 + ], + "type": "inline_equation", + "content": "t_{\\mathrm{min}} = \\sqrt{\\frac{2}{g} (y + \\beta Z_{\\mathrm{min}})}" + }, + { + "bbox": [ + 51, + 508, + 541, + 529 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 51, + 508, + 541, + 529 + ], + "type": "inline_equation", + "content": "t_{\\mathrm{max}} = \\sqrt{\\frac{2}{g} (y + \\beta Z_{\\mathrm{max}})}" + }, + { + "bbox": [ + 51, + 508, + 541, + 529 + ], + "type": "text", + "content": ". Plugging in " + }, + { + "bbox": [ + 51, + 508, + 541, + 529 + ], + "type": "inline_equation", + "content": "Z_{\\mathrm{min}} = 2" + }, + { + "bbox": [ + 51, + 508, + 541, + 529 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 51, + 508, + 541, + 529 + ], + "type": "inline_equation", + "content": "Z_{\\mathrm{max}} = 18" + }, + { + "bbox": [ + 51, + 508, + 541, + 529 + ], + "type": "text", + "content": " yields Equation (3)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 541, + 167, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 541, + 167, + 555 + ], + "spans": [ + { + "bbox": [ + 52, + 541, + 167, + 555 + ], + "type": "text", + "content": "F. Ambiguous dataset" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 561, + 543, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 561, + 543, + 658 + ], + "spans": [ + { + "bbox": [ + 51, + 561, + 543, + 658 + ], + "type": "text", + "content": "We introduce a new dataset for distributional analysis that broadens " + }, + { + "bbox": [ + 51, + 561, + 543, + 658 + ], + "type": "inline_equation", + "content": "p(t|y)" + }, + { + "bbox": [ + 51, + 561, + 543, + 658 + ], + "type": "text", + "content": ", in contrast to the PSFT dataset, which prioritizes realism and has a narrower distribution due to limited object depth variability. To create a dataset with " + }, + { + "bbox": [ + 51, + 561, + 543, + 658 + ], + "type": "inline_equation", + "content": "p(t|y)" + }, + { + "bbox": [ + 51, + 561, + 543, + 658 + ], + "type": "text", + "content": " that is sufficiently diverse for meaningful analysis, we first set up the initial scenes as before, but then apply an augmentation where a new depth values is sampled uniformly from [2, 18] and the object is scaled and translated such that it appears the same in the original image, as shown in Figure 9. For simplicity, we limit our scenes to a single dropping object with no other objects on the ground. We also disable shadows, preventing the model from using them as cues to infer depth and height. Our dataset contains 5k samples consisting of 1k unique initial scenes each containing 5 different trajectories produced by the augmentation." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 673, + 200, + 687 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 673, + 200, + 687 + ], + "spans": [ + { + "bbox": [ + 52, + 673, + 200, + 687 + ], + "type": "text", + "content": "G. Lifting trajectories to 3D" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 693, + 543, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 693, + 543, + 718 + ], + "spans": [ + { + "bbox": [ + 51, + 693, + 543, + 718 + ], + "type": "text", + "content": "To lift trajectories to 3D, we first estimate " + }, + { + "bbox": [ + 51, + 693, + 543, + 718 + ], + "type": "inline_equation", + "content": "t_{\\mathrm{drop}}" + }, + { + "bbox": [ + 51, + 693, + 543, + 718 + ], + "type": "text", + "content": " as described in Section 5.1. Using SAM2 to estimate object masks in the generated video, we can obtain a trajectory of the bottom of the object which we denote as " + }, + { + "bbox": [ + 51, + 693, + 543, + 718 + ], + "type": "inline_equation", + "content": "y_0, y_1, \\ldots, y_N" + }, + { + "bbox": [ + 51, + 693, + 543, + 718 + ], + "type": "text", + "content": " where" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "spans": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 67, + 543, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 67, + 543, + 95 + ], + "spans": [ + { + "bbox": [ + 52, + 67, + 543, + 95 + ], + "type": "inline_equation", + "content": "N = t_{\\mathrm{drop}} \\times \\mathrm{fps}" + }, + { + "bbox": [ + 52, + 67, + 543, + 95 + ], + "type": "text", + "content": ". From " + }, + { + "bbox": [ + 52, + 67, + 543, + 95 + ], + "type": "inline_equation", + "content": "t_{\\mathrm{drop}}" + }, + { + "bbox": [ + 52, + 67, + 543, + 95 + ], + "type": "text", + "content": ", we can solve for an implied depth " + }, + { + "bbox": [ + 52, + 67, + 543, + 95 + ], + "type": "inline_equation", + "content": "Z = \\frac{\\frac{1}{2}gt^2 - y}{\\beta}" + }, + { + "bbox": [ + 52, + 67, + 543, + 95 + ], + "type": "text", + "content": ". We then compute the lifted 3D trajectory as " + }, + { + "bbox": [ + 52, + 67, + 543, + 95 + ], + "type": "inline_equation", + "content": "y_i \\mapsto y_i + \\beta Z" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 108, + 162, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 108, + 162, + 121 + ], + "spans": [ + { + "bbox": [ + 53, + 108, + 162, + 121 + ], + "type": "text", + "content": "H. PisaBench Details" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 129, + 543, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 129, + 543, + 153 + ], + "spans": [ + { + "bbox": [ + 52, + 129, + 543, + 153 + ], + "type": "text", + "content": "In this section, we discuss the details of our data collection pipeline and annotations. We present more examples of real-world videos and corresponding annotations in Figure 13." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 166, + 180, + 178 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 166, + 180, + 178 + ], + "spans": [ + { + "bbox": [ + 52, + 166, + 180, + 178 + ], + "type": "text", + "content": "H.1. Data Collection Pipeline" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 184, + 543, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 184, + 543, + 233 + ], + "spans": [ + { + "bbox": [ + 52, + 184, + 543, + 233 + ], + "type": "text", + "content": "Collecting Real World Videos. We enlist approximately 15 volunteers to participate in the data collection process. We hand out a tripod, tape, and invisible wire for each volunteer. To ensure the quality, diversity, and minimize the ambiguity introduced by the environments, volunteers are provided with detailed guidelines. The key points of the data collection guidelines are shown in Table 3." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 238, + 543, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 238, + 543, + 299 + ], + "spans": [ + { + "bbox": [ + 52, + 238, + 543, + 299 + ], + "type": "text", + "content": "Raw videos processing. For the collected raw videos, we cut each video into multiple clips and crop their sizes. For each video clip, we annotate its starting position in the original long video and ensure that the duration of each segment does not exceed 12 seconds. Regarding the sizes of the videos, we manually crop each video to an aspect ratio of " + }, + { + "bbox": [ + 52, + 238, + 543, + 299 + ], + "type": "inline_equation", + "content": "1:1" + }, + { + "bbox": [ + 52, + 238, + 543, + 299 + ], + "type": "text", + "content": ", ensuring that the falling objects remain fully visible within the frame during the cropping process. The processing interface is shown in Figure 14." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 312, + 157, + 323 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 312, + 157, + 323 + ], + "spans": [ + { + "bbox": [ + 53, + 312, + 157, + 323 + ], + "type": "text", + "content": "H.2. Annotation Details" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 330, + 543, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 330, + 543, + 378 + ], + "spans": [ + { + "bbox": [ + 52, + 330, + 543, + 378 + ], + "type": "text", + "content": "We present our annotation details in Figure 15. For video captions, we present the word cloud figure in (a). For segmentation masks, we annotate all objects in the first frame using positive and negative points, which are then propagated across frames using the SAM 2 (Ravi et al., 2024) model to produce segmentation masks for all objects throughout the video. The annotation interface is shown in (b)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 384, + 543, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 384, + 543, + 456 + ], + "spans": [ + { + "bbox": [ + 52, + 384, + 543, + 456 + ], + "type": "text", + "content": "In addition to providing the annotated caption \" {object description} falls,\" we also add information to inform off-the-shelf models of the task's context as much as possible. To further enhance task comprehension, we append an additional description \"A video that conforms to the laws of physics.\" We also employ negative prompts \"no camera motion\" and \"no slow-motion\" to ensure environmental stability and impose constraints on the generated videos. These prompts explicitly instruct the models to avoid including camera motion or any non-real-time object motion, thereby maintaining consistency with real-world physics." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 471, + 154, + 483 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 471, + 154, + 483 + ], + "spans": [ + { + "bbox": [ + 53, + 471, + 154, + 483 + ], + "type": "text", + "content": "I. Inference Details" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 491, + 543, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 491, + 543, + 529 + ], + "spans": [ + { + "bbox": [ + 52, + 491, + 543, + 529 + ], + "type": "text", + "content": "We present the inference configurations of each closed or open model we evaluate in Table 4. For models that do not support generating videos with 1:1 aspect ratio, we pad initial frames with black borders to the resolution supported by these models, and finally remove the black borders from the generated videos." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 543, + 208, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 543, + 208, + 556 + ], + "spans": [ + { + "bbox": [ + 53, + 543, + 208, + 556 + ], + "type": "text", + "content": "J. More Qualitative Examples" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 563, + 543, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 563, + 543, + 600 + ], + "spans": [ + { + "bbox": [ + 52, + 563, + 543, + 600 + ], + "type": "text", + "content": "We present more qualitative examples in Figure 16 - Figure 22. Although in some showcases, models can roughly predict the downward trend, models still struggle to predict plausible shape and motion. The defects in the models can be mainly attributed to the following aspects:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 63, + 613, + 541, + 715 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 63, + 613, + 541, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 613, + 541, + 659 + ], + "spans": [ + { + "bbox": [ + 63, + 613, + 541, + 659 + ], + "type": "text", + "content": "- Trajectory correctness: in most videos, models fail to predict even the basic falling trajectory of objects, as shown in Figure 19 (a), despite this being highly intuitive for humans. Even in cases where the falling trajectory is roughly correctly predicted, the models still struggle to accurately predict subsequent events, such as collisions, as illustrated in Figure 16 (f)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 63, + 670, + 541, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 670, + 541, + 715 + ], + "spans": [ + { + "bbox": [ + 63, + 670, + 541, + 715 + ], + "type": "text", + "content": "- Object consistency: in many generated videos, object consistency is poor. Models struggle to infer the appearance of objects from multiple viewpoints in a physically plausible manner, resulting in unnatural appearances, as shown in Figure 16 (a). Additionally, models perform poorly in maintaining object permanence, causing objects to appear blurry, as illustrated in Figure 20 (f). Furthermore, models sometimes introduce new objects into the video, as depicted in" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "spans": [ + { + "bbox": [ + 95, + 45, + 499, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 74, + 68, + 129, + 79 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 68, + 129, + 79 + ], + "spans": [ + { + "bbox": [ + 74, + 68, + 129, + 79 + ], + "type": "text", + "content": "Figure 20 (e)." + } + ] + } + ], + "index": 1, + "type": "text" + }, + { + "bbox": [ + 66, + 87, + 543, + 112 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 87, + 543, + 112 + ], + "spans": [ + { + "bbox": [ + 66, + 87, + 543, + 112 + ], + "type": "text", + "content": "- Scene consistency: models struggle to maintain scene consistency, leading to abrupt transitions in many videos. These sudden changes make videos appear unnatural, as shown in Figure 18 (f)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 126, + 211, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 126, + 211, + 140 + ], + "spans": [ + { + "bbox": [ + 54, + 126, + 211, + 140 + ], + "type": "text", + "content": "K. Simulated Adaption Details" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 147, + 543, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 147, + 543, + 220 + ], + "spans": [ + { + "bbox": [ + 54, + 147, + 543, + 220 + ], + "type": "text", + "content": "We use the Kubric (Greff et al., 2022) simulation and rendering engine for creating our simulated videos. Kubric uses PyBullet (Coumans et al., 2010) for running physics simulations and Blender (Community, 2018) for rendering. We set the simulation rate to 240 steps per second and render 2-second videos at 16 fps, resulting in 32 frames per video. Each scene consists of objects from the Google Scanned Objects (GSO) dataset (Downs et al., 2022) and uses environmental lighting from HDRI maps provided by Kubric. We use 930 objects and 458 HDRI maps for training and 103 objects and 51 HDRI maps for testing." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 224, + 543, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 224, + 543, + 261 + ], + "spans": [ + { + "bbox": [ + 54, + 224, + 543, + 261 + ], + "type": "text", + "content": "For each video, we randomly choose 1-6 objects to drop. These objects are placed at a height uniformly sampled from " + }, + { + "bbox": [ + 54, + 224, + 543, + 261 + ], + "type": "inline_equation", + "content": "0.5\\mathrm{m}" + }, + { + "bbox": [ + 54, + 224, + 543, + 261 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 54, + 224, + 543, + 261 + ], + "type": "inline_equation", + "content": "1.5\\mathrm{m}" + }, + { + "bbox": [ + 54, + 224, + 543, + 261 + ], + "type": "text", + "content": ". Below each of these objects, a possibly empty pile of up to 4 objects spawns beneath to create collisions. The objects are placed in a spawn region of size " + }, + { + "bbox": [ + 54, + 224, + 543, + 261 + ], + "type": "inline_equation", + "content": "2\\mathrm{m} \\times 2\\mathrm{m}" + }, + { + "bbox": [ + 54, + 224, + 543, + 261 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 54, + 266, + 543, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 266, + 543, + 303 + ], + "spans": [ + { + "bbox": [ + 54, + 266, + 543, + 303 + ], + "type": "text", + "content": "The camera is initially positioned " + }, + { + "bbox": [ + 54, + 266, + 543, + 303 + ], + "type": "inline_equation", + "content": "1\\mathrm{m}" + }, + { + "bbox": [ + 54, + 266, + 543, + 303 + ], + "type": "text", + "content": " behind this region, with its height varying uniformly between " + }, + { + "bbox": [ + 54, + 266, + 543, + 303 + ], + "type": "inline_equation", + "content": "0.4\\mathrm{m}" + }, + { + "bbox": [ + 54, + 266, + 543, + 303 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 54, + 266, + 543, + 303 + ], + "type": "inline_equation", + "content": "0.6\\mathrm{m}" + }, + { + "bbox": [ + 54, + 266, + 543, + 303 + ], + "type": "text", + "content": ". Once all objects are placed, the camera moves back in random increments until all objects are visible within the camera frame. The camera uses a focal length of " + }, + { + "bbox": [ + 54, + 266, + 543, + 303 + ], + "type": "inline_equation", + "content": "35\\mathrm{mm}" + }, + { + "bbox": [ + 54, + 266, + 543, + 303 + ], + "type": "text", + "content": ", a sensor width of " + }, + { + "bbox": [ + 54, + 266, + 543, + 303 + ], + "type": "inline_equation", + "content": "32\\mathrm{mm}" + }, + { + "bbox": [ + 54, + 266, + 543, + 303 + ], + "type": "text", + "content": ", and an aspect ratio of " + }, + { + "bbox": [ + 54, + 266, + 543, + 303 + ], + "type": "inline_equation", + "content": "1\\times 1" + }, + { + "bbox": [ + 54, + 266, + 543, + 303 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 54, + 318, + 129, + 331 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 318, + 129, + 331 + ], + "spans": [ + { + "bbox": [ + 54, + 318, + 129, + 331 + ], + "type": "text", + "content": "L. Limitations" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 54, + 338, + 543, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 338, + 543, + 398 + ], + "spans": [ + { + "bbox": [ + 54, + 338, + 543, + 398 + ], + "type": "text", + "content": "In this work, we collect and manually annotate a dataset of 361 real-world videos and design three spatial metrics to evaluate the performance of state-of-the-art image-to-video (I2V) models in a fundamental physical scenario: free fall. Our metrics focus solely on spatial positional relationships, excluding object appearance attributes such as color. To enable more fine-grained evaluations of appearance characteristics, we aim to develop metrics based on Multimodal Large Language Models (MLLMs) or pixel-level analysis in future work." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 54, + 403, + 543, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 403, + 543, + 453 + ], + "spans": [ + { + "bbox": [ + 54, + 403, + 543, + 453 + ], + "type": "text", + "content": "Furthermore, we propose the PSFT and ORO methods to fine-tune the Open-Sora model (Zheng et al., 2024), improving its ability to generate physically plausible videos. Despite these improvements, certain limitations remain, specifically, the generation of blurry objects in some videos. We hope to address these challenges in future research by refining both the dataset and the fine-tuning strategies, aiming to produce videos that better maintain object visuals." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 95, + 45, + 499, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 45, + 499, + 56 + ], + "spans": [ + { + "bbox": [ + 95, + 45, + 499, + 56 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 293, + 731, + 302, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 731, + 302, + 740 + ], + "spans": [ + { + "bbox": [ + 293, + 731, + 302, + 740 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 95, + 541, + 218 + ], + "blocks": [ + { + "bbox": [ + 53, + 95, + 541, + 218 + ], + "lines": [ + { + "bbox": [ + 53, + 95, + 541, + 218 + ], + "spans": [ + { + "bbox": [ + 53, + 95, + 541, + 218 + ], + "type": "image", + "image_path": "2c3255e61507df64f2fe2b8fc33da6df4f8074be9bf5d02291a3cf3bb8a8a94a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 222, + 220, + 373, + 234 + ], + "lines": [ + { + "bbox": [ + 222, + 220, + 373, + 234 + ], + "spans": [ + { + "bbox": [ + 222, + 220, + 373, + 234 + ], + "type": "text", + "content": "(a) A white paper roll falls." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 53, + 236, + 541, + 358 + ], + "blocks": [ + { + "bbox": [ + 53, + 236, + 541, + 358 + ], + "lines": [ + { + "bbox": [ + 53, + 236, + 541, + 358 + ], + "spans": [ + { + "bbox": [ + 53, + 236, + 541, + 358 + ], + "type": "image", + "image_path": "76661e958af011fe7f1f7ab37c8d6b11f4f98a6f04af442101dbe8308c1127ee.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 222, + 361, + 351, + 374 + ], + "lines": [ + { + "bbox": [ + 222, + 361, + 351, + 374 + ], + "spans": [ + { + "bbox": [ + 222, + 361, + 351, + 374 + ], + "type": "text", + "content": "(c) A black bottle falls." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 53, + 377, + 541, + 499 + ], + "blocks": [ + { + "bbox": [ + 53, + 377, + 541, + 499 + ], + "lines": [ + { + "bbox": [ + 53, + 377, + 541, + 499 + ], + "spans": [ + { + "bbox": [ + 53, + 377, + 541, + 499 + ], + "type": "image", + "image_path": "f96ae24b9af0730f8094994e180d7d5a6e32804e7b580a5dadd002836de5fced.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 222, + 502, + 386, + 515 + ], + "lines": [ + { + "bbox": [ + 222, + 502, + 386, + 515 + ], + "spans": [ + { + "bbox": [ + 222, + 502, + 386, + 515 + ], + "type": "text", + "content": "(b) A transparent bottle falls." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 53, + 518, + 541, + 640 + ], + "blocks": [ + { + "bbox": [ + 53, + 518, + 541, + 640 + ], + "lines": [ + { + "bbox": [ + 53, + 518, + 541, + 640 + ], + "spans": [ + { + "bbox": [ + 53, + 518, + 541, + 640 + ], + "type": "image", + "image_path": "14aef21ca7858494c097f6f67d50528c4b5917b1b19db5da86d4a491cd5a520f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 222, + 643, + 352, + 656 + ], + "lines": [ + { + "bbox": [ + 222, + 643, + 352, + 656 + ], + "spans": [ + { + "bbox": [ + 222, + 643, + 352, + 656 + ], + "type": "text", + "content": "(d) A white bottle falls." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 51, + 674, + 541, + 696 + ], + "lines": [ + { + "bbox": [ + 51, + 674, + 541, + 696 + ], + "spans": [ + { + "bbox": [ + 51, + 674, + 541, + 696 + ], + "type": "text", + "content": "Figure 13. Examples of real world videos and annotations. We present video frames in the first row and mask annotations in the second row." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "spans": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 55, + 73, + 541, + 677 + ], + "blocks": [ + { + "bbox": [ + 55, + 73, + 541, + 677 + ], + "lines": [ + { + "bbox": [ + 55, + 73, + 541, + 677 + ], + "spans": [ + { + "bbox": [ + 55, + 73, + 541, + 677 + ], + "type": "table", + "html": "
AspectRequirements
Camera·The camera must be stabilized using a tripod.\n·The dropping object should remain visible throughout the entire fall.\n·The trajectory of the object should be sufficiently centered in the frame.\n·Ensure the slow-motion setting is configured to 120 fps.\n·Avoid a completely top-down perspective; the frame should include both the floor and the wall for spatial context.\n·It is acceptable to record one long video containing multiple drops at the same location.
Objects·Most objects should be rigid and non-deformable.\n·A limited number of flexible or deformable objects may be included, as such data is also valuable.
Dropping Procedure·Secure the object with a wire using tape, ensuring stability. Multiple tapings may be necessary for proper stabilization.\n·Visibility of the wire in the video is acceptable.\n·No body parts should appear in the frame. If this is challenging, consider having a partner monitor the camera or use screen-sharing software to view the camera feed on a laptop for uninterrupted framing.\n·Record videos in a horizontal orientation to simplify cropping and to help keep the frame free of unnecessary elements.\n·Use a short wire to enhance object stability.\n·The object should remain stationary before being dropped.
Scene Composition·Make the scenes dynamic and engaging. Include interactions with other objects, such as collisions or objects tipping over. Static objects should serve as active elements rather than mere background props.\n·Avoid filming in classroom or laboratory environments.\n·Include a variety of dropping heights.\n·Film in different environments, ensuring at least one setting is outside your apartment.\n·Minimize human shadows in the frame whenever possible.\n·Ensure good lighting and maintain strong contrast between the objects and the back-ground.
", + "image_path": "1211a8336c5089da8a8c3424e1cc2df81e4f22b093cc82bb6236a03de951a3b1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 684, + 541, + 707 + ], + "lines": [ + { + "bbox": [ + 52, + 684, + 541, + 707 + ], + "spans": [ + { + "bbox": [ + 52, + 684, + 541, + 707 + ], + "type": "text", + "content": "Table 3. Key points of real world videos collection guideline. We have detailed requirements for camera, objects, dropping procedure and scene composition to ensure the quality, diversity and minimize ambiguity introduced by environments." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "spans": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 69, + 282, + 223 + ], + "blocks": [ + { + "bbox": [ + 53, + 69, + 282, + 223 + ], + "lines": [ + { + "bbox": [ + 53, + 69, + 282, + 223 + ], + "spans": [ + { + "bbox": [ + 53, + 69, + 282, + 223 + ], + "type": "image", + "image_path": "945c3d2b2e7ba5dd24811fc027b02c812dd3fdb62673324359a47eefccc06ed5.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 160, + 224, + 170, + 233 + ], + "lines": [ + { + "bbox": [ + 160, + 224, + 170, + 233 + ], + "spans": [ + { + "bbox": [ + 160, + 224, + 170, + 233 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 301, + 69, + 543, + 223 + ], + "blocks": [ + { + "bbox": [ + 301, + 69, + 543, + 223 + ], + "lines": [ + { + "bbox": [ + 301, + 69, + 543, + 223 + ], + "spans": [ + { + "bbox": [ + 301, + 69, + 543, + 223 + ], + "type": "image", + "image_path": "4d595693598e6f902081fbae2160c14f17629bf42e1282931e980a4273a07e54.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 416, + 224, + 427, + 233 + ], + "lines": [ + { + "bbox": [ + 416, + 224, + 427, + 233 + ], + "spans": [ + { + "bbox": [ + 416, + 224, + 427, + 233 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 55, + 276, + 260, + 480 + ], + "blocks": [ + { + "bbox": [ + 55, + 276, + 260, + 480 + ], + "lines": [ + { + "bbox": [ + 55, + 276, + 260, + 480 + ], + "spans": [ + { + "bbox": [ + 55, + 276, + 260, + 480 + ], + "type": "image", + "image_path": "8f522784783a329f5017e6371582ae27e84586d17a8f7bcd9025f339933400e5.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 492, + 161, + 502 + ], + "lines": [ + { + "bbox": [ + 149, + 492, + 161, + 502 + ], + "spans": [ + { + "bbox": [ + 149, + 492, + 161, + 502 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 290, + 274, + 542, + 483 + ], + "blocks": [ + { + "bbox": [ + 51, + 244, + 542, + 268 + ], + "lines": [ + { + "bbox": [ + 51, + 244, + 542, + 268 + ], + "spans": [ + { + "bbox": [ + 51, + 244, + 542, + 268 + ], + "type": "text", + "content": "Figure 14. Video processing interface. (a) we annotate starting positions in the original long videos and clip them into multiple clips less than 12 seconds. (b) We drag the cropping box to crop the video size to an aspect ratio of 1:1." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 290, + 274, + 542, + 483 + ], + "lines": [ + { + "bbox": [ + 290, + 274, + 542, + 483 + ], + "spans": [ + { + "bbox": [ + 290, + 274, + 542, + 483 + ], + "type": "image", + "image_path": "733aaf4a714c7ec870de8f5e762d4a545653a15ef5ea24677ad5c2400309aed0.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 408, + 492, + 421, + 502 + ], + "lines": [ + { + "bbox": [ + 408, + 492, + 421, + 502 + ], + "spans": [ + { + "bbox": [ + 408, + 492, + 421, + 502 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 51, + 516, + 543, + 550 + ], + "lines": [ + { + "bbox": [ + 51, + 516, + 543, + 550 + ], + "spans": [ + { + "bbox": [ + 51, + 516, + 543, + 550 + ], + "type": "text", + "content": "Figure 15. Annotation details of real world videos. (a) Word cloud of objects in video captions. Our videos contain a variety of daily life objects. (b) Interface for annotating positive and negative points in the first frame. Red and blue dots indicate positive and negative points respectively. We annotate all objects in the midair and ground." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 53, + 556, + 542, + 692 + ], + "blocks": [ + { + "bbox": [ + 53, + 556, + 542, + 692 + ], + "lines": [ + { + "bbox": [ + 53, + 556, + 542, + 692 + ], + "spans": [ + { + "bbox": [ + 53, + 556, + 542, + 692 + ], + "type": "table", + "html": "
ModelResolutionNumber of FramesFPSGuidance ScaleSampling StepsNoise Scheduler
ClosedSora720 × 72015030---
Kling-V1.5960 × 960150301.0--
Kling-V1960 × 960150301.0--
Runway Gen31280 × 76815630---
OpenCogVideoX-5B-I2V720 × 4804886.050DDIM
DynamiCrafter512 × 32090300.750DDIM
Pyramid-Flow1280 × 768120244.010EulerDiscrete
Open-Sora512 × 51290307.030RFLOW
", + "image_path": "5ff8dcc1fc52105fef32fc975178bc49046b998c818452290cac625a89916132.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 112, + 699, + 482, + 710 + ], + "lines": [ + { + "bbox": [ + 112, + 699, + 482, + 710 + ], + "spans": [ + { + "bbox": [ + 112, + 699, + 482, + 710 + ], + "type": "text", + "content": "Table 4. Inference details for models we evaluate, where “-” indicates the information is not available." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 45, + 499, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 45, + 499, + 56 + ], + "spans": [ + { + "bbox": [ + 96, + 45, + 499, + 56 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 302, + 740 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 105, + 541, + 167 + ], + "blocks": [ + { + "bbox": [ + 53, + 105, + 541, + 167 + ], + "lines": [ + { + "bbox": [ + 53, + 105, + 541, + 167 + ], + "spans": [ + { + "bbox": [ + 53, + 105, + 541, + 167 + ], + "type": "image", + "image_path": "c26bfecf28ebcde71af6d60da63253f75d02e0c5503f1425cbff24859ab09444.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 173, + 349, + 186 + ], + "lines": [ + { + "bbox": [ + 216, + 173, + 349, + 186 + ], + "spans": [ + { + "bbox": [ + 216, + 173, + 349, + 186 + ], + "type": "text", + "content": "(a) A brown bottle falls." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 53, + 191, + 541, + 255 + ], + "blocks": [ + { + "bbox": [ + 53, + 191, + 541, + 255 + ], + "lines": [ + { + "bbox": [ + 53, + 191, + 541, + 255 + ], + "spans": [ + { + "bbox": [ + 53, + 191, + 541, + 255 + ], + "type": "image", + "image_path": "5f5749c22b33d6469cf285b89aba3d78b780ffbd4338568515ee93f4e1fa544d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 261, + 338, + 275 + ], + "lines": [ + { + "bbox": [ + 216, + 261, + 338, + 275 + ], + "spans": [ + { + "bbox": [ + 216, + 261, + 338, + 275 + ], + "type": "text", + "content": "(b) A grey bottle falls." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 53, + 280, + 541, + 342 + ], + "blocks": [ + { + "bbox": [ + 53, + 280, + 541, + 342 + ], + "lines": [ + { + "bbox": [ + 53, + 280, + 541, + 342 + ], + "spans": [ + { + "bbox": [ + 53, + 280, + 541, + 342 + ], + "type": "image", + "image_path": "1f5a8409a2aefddb489c74e7deb33bffefdf80fdb5c072687ddd0946e16f3e00.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 349, + 362, + 363 + ], + "lines": [ + { + "bbox": [ + 216, + 349, + 362, + 363 + ], + "spans": [ + { + "bbox": [ + 216, + 349, + 362, + 363 + ], + "type": "text", + "content": "(c) A grey paper cup falls." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 53, + 368, + 541, + 430 + ], + "blocks": [ + { + "bbox": [ + 53, + 368, + 541, + 430 + ], + "lines": [ + { + "bbox": [ + 53, + 368, + 541, + 430 + ], + "spans": [ + { + "bbox": [ + 53, + 368, + 541, + 430 + ], + "type": "image", + "image_path": "19fe4f4ece7488446d3a1550236f6f8bf176f70d2615cfa7db7c28a0720cf550.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 436, + 334, + 451 + ], + "lines": [ + { + "bbox": [ + 216, + 436, + 334, + 451 + ], + "spans": [ + { + "bbox": [ + 216, + 436, + 334, + 451 + ], + "type": "text", + "content": "(d) A paper cup falls." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 53, + 455, + 541, + 518 + ], + "blocks": [ + { + "bbox": [ + 53, + 455, + 541, + 518 + ], + "lines": [ + { + "bbox": [ + 53, + 455, + 541, + 518 + ], + "spans": [ + { + "bbox": [ + 53, + 455, + 541, + 518 + ], + "type": "image", + "image_path": "7ecc93d403dcb06758ac993647a892395af1264eeca956ba139d2929c18192ba.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 524, + 343, + 537 + ], + "lines": [ + { + "bbox": [ + 216, + 524, + 343, + 537 + ], + "spans": [ + { + "bbox": [ + 216, + 524, + 343, + 537 + ], + "type": "text", + "content": "(e) A white bottle falls." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 53, + 544, + 541, + 605 + ], + "blocks": [ + { + "bbox": [ + 53, + 544, + 541, + 605 + ], + "lines": [ + { + "bbox": [ + 53, + 544, + 541, + 605 + ], + "spans": [ + { + "bbox": [ + 53, + 544, + 541, + 605 + ], + "type": "image", + "image_path": "c8f534dc359b0c6486ea7d10444e18fb31e91a7d173e50458a7fb898ac132de4.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 612, + 329, + 625 + ], + "lines": [ + { + "bbox": [ + 216, + 612, + 329, + 625 + ], + "spans": [ + { + "bbox": [ + 216, + 612, + 329, + 625 + ], + "type": "text", + "content": "(f) A white box falls." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 51, + 643, + 542, + 687 + ], + "lines": [ + { + "bbox": [ + 51, + 643, + 542, + 687 + ], + "spans": [ + { + "bbox": [ + 51, + 643, + 542, + 687 + ], + "type": "text", + "content": "Figure 16. Qualitative examples of Kling-V1 (Kuaishou, 2024). In (a) (b) (c) (f), objects have a tendency to fall. (b) (c) are roughly consistent with the laws of physics. In (a) (f), the shape of the object does not match the first frame. In (d), the paper cup is suspended in midair. In (e), new object is introduced. In (e), the model fails to correctly predict the collision that occurs after the white box falls and the chain of events that follows." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "spans": [ + { + "bbox": [ + 96, + 45, + 499, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 292, + 731, + 303, + 740 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 114, + 542, + 178 + ], + "blocks": [ + { + "bbox": [ + 53, + 114, + 542, + 178 + ], + "lines": [ + { + "bbox": [ + 53, + 114, + 542, + 178 + ], + "spans": [ + { + "bbox": [ + 53, + 114, + 542, + 178 + ], + "type": "image", + "image_path": "d24e9d610f63859bf566728e5b51c496af6f6f54f828b2cc2aae2a5186055f4c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 182, + 394, + 198 + ], + "lines": [ + { + "bbox": [ + 214, + 182, + 394, + 198 + ], + "spans": [ + { + "bbox": [ + 214, + 182, + 394, + 198 + ], + "type": "text", + "content": "(a) A black and grey glove falls." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 53, + 202, + 542, + 266 + ], + "blocks": [ + { + "bbox": [ + 53, + 202, + 542, + 266 + ], + "lines": [ + { + "bbox": [ + 53, + 202, + 542, + 266 + ], + "spans": [ + { + "bbox": [ + 53, + 202, + 542, + 266 + ], + "type": "image", + "image_path": "936b8ee3ceeca5f82d0050ae068b79e918e73267eaa429a43c2317f1f2323c25.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 270, + 344, + 285 + ], + "lines": [ + { + "bbox": [ + 214, + 270, + 344, + 285 + ], + "spans": [ + { + "bbox": [ + 214, + 270, + 344, + 285 + ], + "type": "text", + "content": "(b) A black bottle falls." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 53, + 290, + 542, + 354 + ], + "blocks": [ + { + "bbox": [ + 53, + 290, + 542, + 354 + ], + "lines": [ + { + "bbox": [ + 53, + 290, + 542, + 354 + ], + "spans": [ + { + "bbox": [ + 53, + 290, + 542, + 354 + ], + "type": "image", + "image_path": "2d23c7b2c224aa44ab3a8eed9dfe17398040ed64c31737411568651ceb6dcf15.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 358, + 384, + 373 + ], + "lines": [ + { + "bbox": [ + 214, + 358, + 384, + 373 + ], + "spans": [ + { + "bbox": [ + 214, + 358, + 384, + 373 + ], + "type": "text", + "content": "(c) A blue and white box falls." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 53, + 378, + 542, + 441 + ], + "blocks": [ + { + "bbox": [ + 53, + 378, + 542, + 441 + ], + "lines": [ + { + "bbox": [ + 53, + 378, + 542, + 441 + ], + "spans": [ + { + "bbox": [ + 53, + 378, + 542, + 441 + ], + "type": "image", + "image_path": "20194bd551fbcc50f5676b0a019e1d10041918fadd0264051f4f9a7540545bac.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 446, + 348, + 460 + ], + "lines": [ + { + "bbox": [ + 214, + 446, + 348, + 460 + ], + "spans": [ + { + "bbox": [ + 214, + 446, + 348, + 460 + ], + "type": "text", + "content": "(d) A brown bottle falls." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 53, + 465, + 542, + 529 + ], + "blocks": [ + { + "bbox": [ + 53, + 465, + 542, + 529 + ], + "lines": [ + { + "bbox": [ + 53, + 465, + 542, + 529 + ], + "spans": [ + { + "bbox": [ + 53, + 465, + 542, + 529 + ], + "type": "image", + "image_path": "baf79faae3094c985d469cccd5f8af1aef1f088b8c03e4b23c119ec86aebd807.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 533, + 362, + 548 + ], + "lines": [ + { + "bbox": [ + 214, + 533, + 362, + 548 + ], + "spans": [ + { + "bbox": [ + 214, + 533, + 362, + 548 + ], + "type": "text", + "content": "(e) A Coca-Cola can falls." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 53, + 554, + 542, + 616 + ], + "blocks": [ + { + "bbox": [ + 53, + 554, + 542, + 616 + ], + "lines": [ + { + "bbox": [ + 53, + 554, + 542, + 616 + ], + "spans": [ + { + "bbox": [ + 53, + 554, + 542, + 616 + ], + "type": "image", + "image_path": "8cb3aa9d253fc5e51c44a557e5629610fa5ad282a6ce25364e557518e7b8d6c7.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 621, + 323, + 636 + ], + "lines": [ + { + "bbox": [ + 214, + 621, + 323, + 636 + ], + "spans": [ + { + "bbox": [ + 214, + 621, + 323, + 636 + ], + "type": "text", + "content": "(f) A pink box falls." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 52, + 653, + 542, + 677 + ], + "lines": [ + { + "bbox": [ + 52, + 653, + 542, + 677 + ], + "spans": [ + { + "bbox": [ + 52, + 653, + 542, + 677 + ], + "type": "text", + "content": "Figure 17. Qualitative examples of Runway Gen3 (Runway, 2024). In (b) (e), objects have a tendency to fall. In (a) (e) (f), new objects are introduced. In (b) (d), the shape of the object does not match the first frame. In (c), the box is suspended in midair." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "spans": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 304, + 740 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 110, + 542, + 173 + ], + "blocks": [ + { + "bbox": [ + 53, + 110, + 542, + 173 + ], + "lines": [ + { + "bbox": [ + 53, + 110, + 542, + 173 + ], + "spans": [ + { + "bbox": [ + 53, + 110, + 542, + 173 + ], + "type": "image", + "image_path": "a79a3d2c12bca59c6526e7216d60bb7580bc8c63576be8552b2f29dfd5c346d2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 177, + 343, + 191 + ], + "lines": [ + { + "bbox": [ + 214, + 177, + 343, + 191 + ], + "spans": [ + { + "bbox": [ + 214, + 177, + 343, + 191 + ], + "type": "text", + "content": "(a) A black bottle falls." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 53, + 196, + 542, + 260 + ], + "blocks": [ + { + "bbox": [ + 53, + 196, + 542, + 260 + ], + "lines": [ + { + "bbox": [ + 53, + 196, + 542, + 260 + ], + "spans": [ + { + "bbox": [ + 53, + 196, + 542, + 260 + ], + "type": "image", + "image_path": "498fa6c0164db5fd88370c112e6dd2c5829dbe74624f6e825ac28219d5f5aa3d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 265, + 350, + 279 + ], + "lines": [ + { + "bbox": [ + 214, + 265, + 350, + 279 + ], + "spans": [ + { + "bbox": [ + 214, + 265, + 350, + 279 + ], + "type": "text", + "content": "(b) A black helmet falls." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 53, + 284, + 542, + 348 + ], + "blocks": [ + { + "bbox": [ + 53, + 284, + 542, + 348 + ], + "lines": [ + { + "bbox": [ + 53, + 284, + 542, + 348 + ], + "spans": [ + { + "bbox": [ + 53, + 284, + 542, + 348 + ], + "type": "image", + "image_path": "8317550ad7aa20a11f3bf97aa716e1da5f6f471ce296a6b0a5da5eba39db5c52.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 353, + 334, + 367 + ], + "lines": [ + { + "bbox": [ + 214, + 353, + 334, + 367 + ], + "spans": [ + { + "bbox": [ + 214, + 353, + 334, + 367 + ], + "type": "text", + "content": "(c) A paper box falls." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 53, + 372, + 542, + 435 + ], + "blocks": [ + { + "bbox": [ + 53, + 372, + 542, + 435 + ], + "lines": [ + { + "bbox": [ + 53, + 372, + 542, + 435 + ], + "spans": [ + { + "bbox": [ + 53, + 372, + 542, + 435 + ], + "type": "image", + "image_path": "5259adbc56734c97b69c0f5ac5a0debfc574cf0ee9fab5cae058c157503ada7b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 440, + 344, + 454 + ], + "lines": [ + { + "bbox": [ + 214, + 440, + 344, + 454 + ], + "spans": [ + { + "bbox": [ + 214, + 440, + 344, + 454 + ], + "type": "text", + "content": "(d) A white bottle falls." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 53, + 460, + 542, + 523 + ], + "blocks": [ + { + "bbox": [ + 53, + 460, + 542, + 523 + ], + "lines": [ + { + "bbox": [ + 53, + 460, + 542, + 523 + ], + "spans": [ + { + "bbox": [ + 53, + 460, + 542, + 523 + ], + "type": "image", + "image_path": "fb0eb57bfd7df70674700e7d467e475c936b995b0451b6a6746c321bb30fd4dc.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 528, + 362, + 543 + ], + "lines": [ + { + "bbox": [ + 214, + 528, + 362, + 543 + ], + "spans": [ + { + "bbox": [ + 214, + 528, + 362, + 543 + ], + "type": "text", + "content": "(e) A grey paper cup falls." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 53, + 548, + 483, + 611 + ], + "blocks": [ + { + "bbox": [ + 53, + 548, + 483, + 611 + ], + "lines": [ + { + "bbox": [ + 53, + 548, + 483, + 611 + ], + "spans": [ + { + "bbox": [ + 53, + 548, + 483, + 611 + ], + "type": "image", + "image_path": "dad22c2ccf02d28f88996ada3f6458c8f7ba92408c341e39e6da0ed00ee05fbb.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 615, + 330, + 629 + ], + "lines": [ + { + "bbox": [ + 214, + 615, + 330, + 629 + ], + "spans": [ + { + "bbox": [ + 214, + 615, + 330, + 629 + ], + "type": "text", + "content": "(f) A white box falls." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 51, + 647, + 543, + 681 + ], + "lines": [ + { + "bbox": [ + 51, + 647, + 543, + 681 + ], + "spans": [ + { + "bbox": [ + 51, + 647, + 543, + 681 + ], + "type": "text", + "content": "Figure 18. Qualitative examples of CogVideoX-5B-I2V (Yang et al., 2024c). In (a) - (f), objects have a tendency to fall. However, in all the videos, there are violations of physics. In (a) (b), the objects are divided into two parts. In (c) (d) (e), the shape of the object does not match the first frame. In (c), the trajectory is not a vertical fall. In (f), scene changes suddenly, which does not match the first frame." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "spans": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 302, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 302, + 740 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 302, + 740 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 120, + 542, + 183 + ], + "blocks": [ + { + "bbox": [ + 53, + 120, + 542, + 183 + ], + "lines": [ + { + "bbox": [ + 53, + 120, + 542, + 183 + ], + "spans": [ + { + "bbox": [ + 53, + 120, + 542, + 183 + ], + "type": "image", + "image_path": "e39efd16222ce5ec5787e1ab30db7573f7cb53089f14019eb71cfee5130fa917.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 188, + 332, + 201 + ], + "lines": [ + { + "bbox": [ + 214, + 188, + 332, + 201 + ], + "spans": [ + { + "bbox": [ + 214, + 188, + 332, + 201 + ], + "type": "text", + "content": "(a) A black box falls." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 53, + 208, + 542, + 271 + ], + "blocks": [ + { + "bbox": [ + 53, + 208, + 542, + 271 + ], + "lines": [ + { + "bbox": [ + 53, + 208, + 542, + 271 + ], + "spans": [ + { + "bbox": [ + 53, + 208, + 542, + 271 + ], + "type": "image", + "image_path": "a17478f38168df434239dbbc27ca455093b6c1e40467cd5487d213bb5d83095d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 276, + 342, + 290 + ], + "lines": [ + { + "bbox": [ + 214, + 276, + 342, + 290 + ], + "spans": [ + { + "bbox": [ + 214, + 276, + 342, + 290 + ], + "type": "text", + "content": "(b) A card holder falls." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 53, + 295, + 542, + 360 + ], + "blocks": [ + { + "bbox": [ + 53, + 295, + 542, + 360 + ], + "lines": [ + { + "bbox": [ + 53, + 295, + 542, + 360 + ], + "spans": [ + { + "bbox": [ + 53, + 295, + 542, + 360 + ], + "type": "image", + "image_path": "f5955939eff061d366f9461ca64daddccce438bc7cf26e0ca0177e4bea14e76c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 364, + 344, + 378 + ], + "lines": [ + { + "bbox": [ + 214, + 364, + 344, + 378 + ], + "spans": [ + { + "bbox": [ + 214, + 364, + 344, + 378 + ], + "type": "text", + "content": "(c) A white bottle falls." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 53, + 384, + 542, + 447 + ], + "blocks": [ + { + "bbox": [ + 53, + 384, + 542, + 447 + ], + "lines": [ + { + "bbox": [ + 53, + 384, + 542, + 447 + ], + "spans": [ + { + "bbox": [ + 53, + 384, + 542, + 447 + ], + "type": "image", + "image_path": "3064ab2bd099f6c34a0b0143af9b37cafbc9b07d4aff556a41357fd47bdbe8d3.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 452, + 332, + 465 + ], + "lines": [ + { + "bbox": [ + 214, + 452, + 332, + 465 + ], + "spans": [ + { + "bbox": [ + 214, + 452, + 332, + 465 + ], + "type": "text", + "content": "(d) A white box falls." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 53, + 471, + 542, + 534 + ], + "blocks": [ + { + "bbox": [ + 53, + 471, + 542, + 534 + ], + "lines": [ + { + "bbox": [ + 53, + 471, + 542, + 534 + ], + "spans": [ + { + "bbox": [ + 53, + 471, + 542, + 534 + ], + "type": "image", + "image_path": "f9b6d84ca868eb7b320e5bf66a6830342a093523d4a8c01e2b0d2c6c9b32e833.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 539, + 405, + 554 + ], + "lines": [ + { + "bbox": [ + 214, + 539, + 405, + 554 + ], + "spans": [ + { + "bbox": [ + 214, + 539, + 405, + 554 + ], + "type": "text", + "content": "(e) An orange and white box falls." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 53, + 559, + 542, + 622 + ], + "blocks": [ + { + "bbox": [ + 53, + 559, + 542, + 622 + ], + "lines": [ + { + "bbox": [ + 53, + 559, + 542, + 622 + ], + "spans": [ + { + "bbox": [ + 53, + 559, + 542, + 622 + ], + "type": "image", + "image_path": "b68b172758804959f5687238c0b53d284f904543618dda06d81c04419d71a2ce.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 627, + 301, + 640 + ], + "lines": [ + { + "bbox": [ + 214, + 627, + 301, + 640 + ], + "spans": [ + { + "bbox": [ + 214, + 627, + 301, + 640 + ], + "type": "text", + "content": "(f) A shoe falls." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 52, + 658, + 541, + 670 + ], + "lines": [ + { + "bbox": [ + 52, + 658, + 541, + 670 + ], + "spans": [ + { + "bbox": [ + 52, + 658, + 541, + 670 + ], + "type": "text", + "content": "Figure 19. Qualitative examples of DynamiCrafter (?). In all the videos, objects do not have a tendency to fall, suspended in the midair." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "spans": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 114, + 542, + 178 + ], + "blocks": [ + { + "bbox": [ + 53, + 114, + 542, + 178 + ], + "lines": [ + { + "bbox": [ + 53, + 114, + 542, + 178 + ], + "spans": [ + { + "bbox": [ + 53, + 114, + 542, + 178 + ], + "type": "image", + "image_path": "b39f635b4b2874364c63137dcdc9fc15f4eb68ba57cc4f852a463fac4a5c56f0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 182, + 343, + 197 + ], + "lines": [ + { + "bbox": [ + 214, + 182, + 343, + 197 + ], + "spans": [ + { + "bbox": [ + 214, + 182, + 343, + 197 + ], + "type": "text", + "content": "(a) A black bottle falls." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 53, + 202, + 542, + 266 + ], + "blocks": [ + { + "bbox": [ + 53, + 202, + 542, + 266 + ], + "lines": [ + { + "bbox": [ + 53, + 202, + 542, + 266 + ], + "spans": [ + { + "bbox": [ + 53, + 202, + 542, + 266 + ], + "type": "image", + "image_path": "c3078aa13d861c89ebf70c3db430662cdcd8c83d8880aa9a934a258e5c3598e5.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 270, + 392, + 285 + ], + "lines": [ + { + "bbox": [ + 214, + 270, + 392, + 285 + ], + "spans": [ + { + "bbox": [ + 214, + 270, + 392, + 285 + ], + "type": "text", + "content": "(b) A green and white box falls." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 53, + 289, + 542, + 353 + ], + "blocks": [ + { + "bbox": [ + 53, + 289, + 542, + 353 + ], + "lines": [ + { + "bbox": [ + 53, + 289, + 542, + 353 + ], + "spans": [ + { + "bbox": [ + 53, + 289, + 542, + 353 + ], + "type": "image", + "image_path": "47bbef5b7ad9b2e303cbcdd98429261eea928bc17ba376000894eba09e78ab5b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 358, + 338, + 373 + ], + "lines": [ + { + "bbox": [ + 214, + 358, + 338, + 373 + ], + "spans": [ + { + "bbox": [ + 214, + 358, + 338, + 373 + ], + "type": "text", + "content": "(c) A grey bottle falls." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 53, + 378, + 542, + 441 + ], + "blocks": [ + { + "bbox": [ + 53, + 378, + 542, + 441 + ], + "lines": [ + { + "bbox": [ + 53, + 378, + 542, + 441 + ], + "spans": [ + { + "bbox": [ + 53, + 378, + 542, + 441 + ], + "type": "image", + "image_path": "d20954189b98d66653a95abacce5f2c333474ea3a756f9e105766842d1c52aaf.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 446, + 352, + 460 + ], + "lines": [ + { + "bbox": [ + 214, + 446, + 352, + 460 + ], + "spans": [ + { + "bbox": [ + 214, + 446, + 352, + 460 + ], + "type": "text", + "content": "(d) An orange tube falls." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 53, + 465, + 542, + 529 + ], + "blocks": [ + { + "bbox": [ + 53, + 465, + 542, + 529 + ], + "lines": [ + { + "bbox": [ + 53, + 465, + 542, + 529 + ], + "spans": [ + { + "bbox": [ + 53, + 465, + 542, + 529 + ], + "type": "image", + "image_path": "52a4ded90059019d5599737a9ccf91373f0a89472a4470044d57fa2d382b574c.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 533, + 343, + 547 + ], + "lines": [ + { + "bbox": [ + 214, + 533, + 343, + 547 + ], + "spans": [ + { + "bbox": [ + 214, + 533, + 343, + 547 + ], + "type": "text", + "content": "(e) A white bottle falls." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 53, + 553, + 542, + 616 + ], + "blocks": [ + { + "bbox": [ + 53, + 553, + 542, + 616 + ], + "lines": [ + { + "bbox": [ + 53, + 553, + 542, + 616 + ], + "spans": [ + { + "bbox": [ + 53, + 553, + 542, + 616 + ], + "type": "image", + "image_path": "1bfe20845da001136df0ce41999f5d2267a843826b8f310807bc262f0f0570d5.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 621, + 337, + 635 + ], + "lines": [ + { + "bbox": [ + 214, + 621, + 337, + 635 + ], + "spans": [ + { + "bbox": [ + 214, + 621, + 337, + 635 + ], + "type": "text", + "content": "(f) A plastic box falls." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 52, + 653, + 542, + 676 + ], + "lines": [ + { + "bbox": [ + 52, + 653, + 542, + 676 + ], + "spans": [ + { + "bbox": [ + 52, + 653, + 542, + 676 + ], + "type": "text", + "content": "Figure 20. Qualitative examples of Pyramid-Flow (Jin et al., 2024). In (b) (d) (e), objects have a tendency to fall. In (a) (b) (e) (f), new objects are introduced. In (c), scene changes, which does not match the first frame.. In (d), the tube becomes blurry." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "spans": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 114, + 542, + 178 + ], + "blocks": [ + { + "bbox": [ + 53, + 114, + 542, + 178 + ], + "lines": [ + { + "bbox": [ + 53, + 114, + 542, + 178 + ], + "spans": [ + { + "bbox": [ + 53, + 114, + 542, + 178 + ], + "type": "image", + "image_path": "161db114927f1a531685c756c1a59ca048d49f95035d117b63e02913d1aeda55.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 182, + 382, + 197 + ], + "lines": [ + { + "bbox": [ + 214, + 182, + 382, + 197 + ], + "spans": [ + { + "bbox": [ + 214, + 182, + 382, + 197 + ], + "type": "text", + "content": "(a) A bottle full of water falls." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 53, + 202, + 542, + 266 + ], + "blocks": [ + { + "bbox": [ + 53, + 202, + 542, + 266 + ], + "lines": [ + { + "bbox": [ + 53, + 202, + 542, + 266 + ], + "spans": [ + { + "bbox": [ + 53, + 202, + 542, + 266 + ], + "type": "image", + "image_path": "00dd072f248c8d96ba525c8f9026f6dbe513577c2833c0a39bcc58836058a3ae.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 270, + 349, + 284 + ], + "lines": [ + { + "bbox": [ + 214, + 270, + 349, + 284 + ], + "spans": [ + { + "bbox": [ + 214, + 270, + 349, + 284 + ], + "type": "text", + "content": "(b) A brown bottle falls." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 53, + 290, + 542, + 353 + ], + "blocks": [ + { + "bbox": [ + 53, + 290, + 542, + 353 + ], + "lines": [ + { + "bbox": [ + 53, + 290, + 542, + 353 + ], + "spans": [ + { + "bbox": [ + 53, + 290, + 542, + 353 + ], + "type": "image", + "image_path": "20691688d65a9c34668843431f6da68506d385837a8adf32d934a8338cfbb623.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 358, + 362, + 373 + ], + "lines": [ + { + "bbox": [ + 214, + 358, + 362, + 373 + ], + "spans": [ + { + "bbox": [ + 214, + 358, + 362, + 373 + ], + "type": "text", + "content": "(c) A grey paper cup falls." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 53, + 378, + 542, + 441 + ], + "blocks": [ + { + "bbox": [ + 53, + 378, + 542, + 441 + ], + "lines": [ + { + "bbox": [ + 53, + 378, + 542, + 441 + ], + "spans": [ + { + "bbox": [ + 53, + 378, + 542, + 441 + ], + "type": "image", + "image_path": "20dc724f7197dd626747f2db293d23a957a1d7672a436a2fd07469a0a937b08a.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 446, + 334, + 460 + ], + "lines": [ + { + "bbox": [ + 214, + 446, + 334, + 460 + ], + "spans": [ + { + "bbox": [ + 214, + 446, + 334, + 460 + ], + "type": "text", + "content": "(d) A paper box falls." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 53, + 465, + 542, + 529 + ], + "blocks": [ + { + "bbox": [ + 53, + 465, + 542, + 529 + ], + "lines": [ + { + "bbox": [ + 53, + 465, + 542, + 529 + ], + "spans": [ + { + "bbox": [ + 53, + 465, + 542, + 529 + ], + "type": "image", + "image_path": "5f2dbd497dd96b551059198818fd41a1cc13a1d01d40d7814fd8343102c994a3.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 533, + 344, + 547 + ], + "lines": [ + { + "bbox": [ + 214, + 533, + 344, + 547 + ], + "spans": [ + { + "bbox": [ + 214, + 533, + 344, + 547 + ], + "type": "text", + "content": "(e) A white bottle falls." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 53, + 553, + 542, + 616 + ], + "blocks": [ + { + "bbox": [ + 53, + 553, + 542, + 616 + ], + "lines": [ + { + "bbox": [ + 53, + 553, + 542, + 616 + ], + "spans": [ + { + "bbox": [ + 53, + 553, + 542, + 616 + ], + "type": "image", + "image_path": "5794ff6b8e6a1c23e1b350538a38f18063fb742919db3216ae76aa58e6f391f5.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 621, + 330, + 635 + ], + "lines": [ + { + "bbox": [ + 214, + 621, + 330, + 635 + ], + "spans": [ + { + "bbox": [ + 214, + 621, + 330, + 635 + ], + "type": "text", + "content": "(f) A white box falls." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 51, + 653, + 542, + 676 + ], + "lines": [ + { + "bbox": [ + 51, + 653, + 542, + 676 + ], + "spans": [ + { + "bbox": [ + 51, + 653, + 542, + 676 + ], + "type": "text", + "content": "Figure 21. Qualitative examples of Open-Sora (Zheng et al., 2024). In all the videos, objects do not have a tendency to fall, suspended in the midair. In (b) (d), scene changes suddenly, which does not match the first frame. In (e), new object is introduced." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "spans": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 114, + 542, + 178 + ], + "blocks": [ + { + "bbox": [ + 53, + 114, + 542, + 178 + ], + "lines": [ + { + "bbox": [ + 53, + 114, + 542, + 178 + ], + "spans": [ + { + "bbox": [ + 53, + 114, + 542, + 178 + ], + "type": "image", + "image_path": "0cc35a21a9c1a24bb99fd5ae428c1b943237dcc4bf44654420f09f88cbbe622f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 182, + 350, + 197 + ], + "lines": [ + { + "bbox": [ + 214, + 182, + 350, + 197 + ], + "spans": [ + { + "bbox": [ + 214, + 182, + 350, + 197 + ], + "type": "text", + "content": "(a) A brown bottle falls." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 53, + 202, + 542, + 266 + ], + "blocks": [ + { + "bbox": [ + 53, + 202, + 542, + 266 + ], + "lines": [ + { + "bbox": [ + 53, + 202, + 542, + 266 + ], + "spans": [ + { + "bbox": [ + 53, + 202, + 542, + 266 + ], + "type": "image", + "image_path": "20453f58b53c0dbbefd7d26b5854ca263406b127771a6983ee453bcedd34a9b8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 270, + 342, + 285 + ], + "lines": [ + { + "bbox": [ + 214, + 270, + 342, + 285 + ], + "spans": [ + { + "bbox": [ + 214, + 270, + 342, + 285 + ], + "type": "text", + "content": "(b) A grey eraser falls." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 53, + 290, + 542, + 353 + ], + "blocks": [ + { + "bbox": [ + 53, + 290, + 542, + 353 + ], + "lines": [ + { + "bbox": [ + 53, + 290, + 542, + 353 + ], + "spans": [ + { + "bbox": [ + 53, + 290, + 542, + 353 + ], + "type": "image", + "image_path": "d0de139686427ffdf841921b45e326470ebe6a127e77cc98801376148a86c3b4.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 358, + 362, + 373 + ], + "lines": [ + { + "bbox": [ + 214, + 358, + 362, + 373 + ], + "spans": [ + { + "bbox": [ + 214, + 358, + 362, + 373 + ], + "type": "text", + "content": "(c) A grey paper cup falls." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 53, + 378, + 542, + 441 + ], + "blocks": [ + { + "bbox": [ + 53, + 378, + 542, + 441 + ], + "lines": [ + { + "bbox": [ + 53, + 378, + 542, + 441 + ], + "spans": [ + { + "bbox": [ + 53, + 378, + 542, + 441 + ], + "type": "image", + "image_path": "2c9f209b0aeac72570d8c4fde360b1cf6b4dfcb2b6a0e8d4e853f7b3a78fa4d6.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 446, + 379, + 460 + ], + "lines": [ + { + "bbox": [ + 214, + 446, + 379, + 460 + ], + "spans": [ + { + "bbox": [ + 214, + 446, + 379, + 460 + ], + "type": "text", + "content": "(d) A transparent bottle falls." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 53, + 465, + 542, + 529 + ], + "blocks": [ + { + "bbox": [ + 53, + 465, + 542, + 529 + ], + "lines": [ + { + "bbox": [ + 53, + 465, + 542, + 529 + ], + "spans": [ + { + "bbox": [ + 53, + 465, + 542, + 529 + ], + "type": "image", + "image_path": "f52bf9a1a1b538e95d70fbf8e8072fe5c66e0fb960e1c0adc65706d6253840a0.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 533, + 387, + 548 + ], + "lines": [ + { + "bbox": [ + 214, + 533, + 387, + 548 + ], + "spans": [ + { + "bbox": [ + 214, + 533, + 387, + 548 + ], + "type": "text", + "content": "(e) A red wrapping paper falls." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 53, + 554, + 542, + 616 + ], + "blocks": [ + { + "bbox": [ + 53, + 554, + 542, + 616 + ], + "lines": [ + { + "bbox": [ + 53, + 554, + 542, + 616 + ], + "spans": [ + { + "bbox": [ + 53, + 554, + 542, + 616 + ], + "type": "image", + "image_path": "a00eb2f6afd73e7890435740d404820471ec3c29a424ac89fa8f08f0f78a9a90.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 621, + 342, + 635 + ], + "lines": [ + { + "bbox": [ + 214, + 621, + 342, + 635 + ], + "spans": [ + { + "bbox": [ + 214, + 621, + 342, + 635 + ], + "type": "text", + "content": "(f) A white bottle falls." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 51, + 653, + 543, + 677 + ], + "lines": [ + { + "bbox": [ + 51, + 653, + 543, + 677 + ], + "spans": [ + { + "bbox": [ + 51, + 653, + 543, + 677 + ], + "type": "text", + "content": "Figure 22. Qualitative examples of our method (Open-Sora + PSFT + ORO). In all the videos, objects have a tendency to fall. However, the consistency of objects is still insufficient. In some frames, objects become blurry. Objects sometimes disappear after collision." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "spans": [ + { + "bbox": [ + 96, + 45, + 500, + 57 + ], + "type": "text", + "content": "PISA Experiments: Exploring Physics Post-Training for Video Diffusion Models by Watching Stuff Drop" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "spans": [ + { + "bbox": [ + 291, + 731, + 303, + 740 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_11xxx/2503.11701/f561bad2-8e9b-4fb7-9083-b32d2bfd8f1f_content_list.json b/data/2025/2503_11xxx/2503.11701/f561bad2-8e9b-4fb7-9083-b32d2bfd8f1f_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..229b85d006535ca8c03ef9d2df0ffe148fd0dd43 --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/f561bad2-8e9b-4fb7-9083-b32d2bfd8f1f_content_list.json @@ -0,0 +1,2781 @@ +[ + { + "type": "text", + "text": "A Survey of Direct Preference Optimization", + "text_level": 1, + "bbox": [ + 122, + 66, + 872, + 101 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shunyu Liu, Wenkai Fang, Zetian Hu, Junjie Zhang, Yang Zhou, Kongcheng Zhang, Rongcheng Tu, Ting-En Lin, Fei Huang, Mingli Song, Yongbin Li, and Dacheng Tao, Fellow, IEEE", + "bbox": [ + 102, + 114, + 893, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract—Large Language Models (LLMs) have demonstrated unprecedented generative capabilities, yet their alignment with human values remains critical for ensuring helpful and harmless deployments. While Reinforcement Learning from Human Feedback (RLHF) has emerged as a powerful paradigm for aligning LLMs with human preferences, its reliance on complex reward modeling introduces inherent trade-offs in computational efficiency and training stability. In this context, Direct Preference Optimization (DPO) has recently gained prominence as a streamlined alternative that directly optimizes LLMs using human preferences, thereby circumventing the need for explicit reward modeling. Owing to its theoretical elegance and computational efficiency, DPO has rapidly attracted substantial research efforts exploring its various implementations and applications. However, this field currently lacks systematic organization and comparative analysis. In this survey, we conduct a comprehensive overview of DPO and introduce a novel taxonomy, categorizing previous works into four key dimensions: data strategy, learning framework, constraint mechanism, and model property. We further present a rigorous empirical analysis of DPO variants across standardized benchmarks. Additionally, we discuss real-world applications, open challenges, and future directions for DPO. This work delivers both a conceptual framework for understanding DPO and practical guidance for practitioners, aiming to advance robust and generalizable alignment paradigms. All collected resources are available and will be continuously updated at https://github.com/liushunyu/awesome-direct-preference-optimization.", + "bbox": [ + 104, + 172, + 892, + 345 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Index Terms—Alignment, Direct Preference Optimization, Large Language Models, Reinforcement Learning from Human Feedback.", + "bbox": [ + 104, + 356, + 877, + 369 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 73, + 430, + 228, + 445 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The rapid advancement of Large Language Models (LLMs) has revolutionized artificial intelligence [1, 2, 3, 4, 5, 6, 7, 8], enabling unprecedented generative capabilities across diverse applications, such as dialogue systems [9, 10], code generation [11, 12, 13], and medical diagnosis [14, 15, 16, 17]. Models like OpenAI-o1 [18] and DeepSeekR1 [19] have demonstrated remarkable proficiency in understanding and generating human-like text, outperforming traditional language processing techniques [20]. However, their immense power also introduces significant risks: LLMs may inadvertently produce harmful content (e.g., jailbreak suggestion) [21], exhibit hallucination behaviors (e.g., misinformation) [22], or propagate sociocultural stereotypes (e.g., biased recommendations) [23]. Ensuring that these models align with human values (producing outputs that are helpful, harmless, and honest) has thus become a cornerstone of responsible AI development [24].", + "bbox": [ + 71, + 454, + 491, + 702 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The critical challenge of aligning LLMs with human values stems from the inherent complexity of encoding abstract", + "bbox": [ + 73, + 702, + 491, + 733 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This research is supported by the RIE2025 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) (Award I2301E0026), administered by A\\*STAR, as well as supported by Alibaba Group and NTU Singapore through Alibaba-NTU Global e-Sustainability CorpLab (ANGEL). (Corresponding author: Dacheng Tao.) Shunyu Liu, Junjie Zhang, Rongcheng Tu and Dacheng Tao are with Nanyang Technological University, Singapore (e-mail: shunyu.liu@ntu.edu.sg; junjie.zhang@ntu.edu.sg; turongcheng@gmail.com; dacheng.tao@ntu.edu.sg). Wenkai Fang, Yang Zhou, Kongcheng Zhang, and Mingli Song are with the College of Computer Science and Technology, Zhejiang University, China (e-mail: wenkfang@zju.edu.cn; imzhouyang@zju.edu.cn; zhangkc@zju.edu.cn; brooksong@zju.edu.cn). Zetian Hu is with the School of Aerospace Engineering, Tsinghua University, China (e-mail: huzt22@mails.tsinghua.edu.cn). Ting-En Lin, Fei Huang, and Yongbin Li are with the Tongyi Lab, Alibaba Group, China (e-mail: ting-en.lte@alibaba-inc.com; f.huang@alibaba-inc.com; shuide.lyb@alibaba-inc.com).", + "bbox": [ + 71, + 746, + 491, + 941 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ethical principles into concrete model behaviors [25, 26, 27]. Traditional approaches, such as rule-based filtering or supervised learning on curated datasets, often prove inadequate due to their inability to generalize across diverse contexts and adapt to evolving societal norms [28]. The emergence of preference-based alignment paradigms addresses these limitations by framing the problem as optimizing for human feedback rather than inflexible heuristics [29, 30, 31, 32]. This shift recognizes that LLM decision-making often involves nuanced trade-offs between competing values, requiring flexible frameworks capable of incorporating subjective human preferences [33].", + "bbox": [ + 501, + 431, + 924, + 606 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Building upon these insights, Reinforcement Learning from Human Feedback (RLHF) [34, 35] has emerged as the predominant alignment paradigm, leveraging human preferences to guide model optimization. In the RLHF pipeline, human annotators first rank the outputs generated by the language model, and these comparisons are used to train a reward model that quantifies human preferences. The language model is then fine-tuned using RL guided by this reward model, enabling the language model to align with human values by maximizing the predicted rewards. The success of RLHF in aligning models like ChatGPT [36, 37] and Claude [38, 39] underscores its practical utility. By translating subjective human preferences into an objective reward signal, RLHF facilitates the optimization of model behavior for value alignment. However, this RLHF paradigm suffers from critical limitations of computational complexity and training instability. Training a separate reward model demands substantial computational resources and high-quality human preference data, which scales poorly across different domains. Moreover, the RL phase often struggles with optimization challenges, such as reward hacking [40] and mode collapse [41].", + "bbox": [ + 503, + 607, + 923, + 926 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "These limitations have spurred interest in alternative", + "bbox": [ + 527, + 926, + 923, + 941 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 911, + 32, + 919, + 42 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.11701v1 [cs.LG] 12 Mar 2025", + "bbox": [ + 22, + 260, + 57, + 705 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/eec766babe90c18e92b263abf2f3723da32c85a958aae6f3eb98f4722ac37fa7.jpg", + "image_caption": [ + "Fig. 1: A taxonomy of DPO. We categorize existing DPO works into four branches: data strategy, learning framework, constraint mechanism, and model property. Different colored boxes indicate different categories and their corresponding representative references." + ], + "image_footnote": [], + "bbox": [ + 81, + 54, + 919, + 597 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "alignment methods that bypass reward modeling while preserving the benefits of preference-based learning. Direct Preference Optimization (DPO) [74, 210] represents a groundbreaking shift in this direction. Unlike RLHF, DPO reframes alignment as a supervised learning problem, directly optimizing the LLM policy using preference data without explicit reward modeling. By leveraging a closed-form mapping between reward functions and optimal policies, DPO eliminates the need for iterative RL training, reducing computational overhead and improving stability. Due to its inherent advantages, DPO has rapidly gained increasing attention from research communities. Existing studies vary widely in data strategies (e.g., point-wise v.s. pair-wise feedback) [67, 211], learning frameworks (e.g., offline v.s. online learning) [121, 122, 126], constraint mechanisms (e.g., different divergence constraints) [169, 171], and model properties (e.g., length bias) [191, 195]. Recent advancements in DPO variants have demonstrated remarkable efficacy in enhancing model alignment with human preferences, achieving unprecedented success across diverse domains [32].", + "bbox": [ + 71, + 641, + 495, + 933 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "These developments position DPO-based approaches as a compelling alternative to conventional RLHF paradigms for preference alignment tasks. However, despite its promise, the DPO research landscape remains fragmented.", + "bbox": [ + 503, + 641, + 924, + 700 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Several surveys related to DPO have been published in recent years, yet they exhibit notable limitations in their scope and analysis of DPO. (1) Scope limitations. While an early survey of [212] presents a comprehensive overview of preference-based RL methods, it predates the advent of DPO and does not address its applications to modern LLMs. Recent surveys on alignment [24, 26, 213, 214] provide broad overviews of LLM alignment techniques but only offer cursory summaries of DPO-related approaches without in-depth analysis. Similarly, surveys on learning from human feedback [30, 215, 216, 217] also only briefly mention DPO as a potential alternative. (2) Taxonomy deficiencies. Gao et al. [29] and Winata et al. [32] introduce a simplified taxonomy for preference learning, while overlooking technical distinctions within its broad categorization. In contrast, Wang et al. [31] attempt to classify preference learning across dimensions", + "bbox": [ + 503, + 708, + 923, + 941 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "such as reinforcement learning, reward modeling, feedback, and optimization. However, this taxonomy suffers from significant conceptual overlaps (e.g. reinforcement learning inherently involves optimization). A recent work by Xiao et al. [210] categorizes DPO studies through isolated research questions, which, while useful for problem identification, fragments the methodological connections. Our survey addresses these gaps by presenting the first comprehensive analysis specifically focused on DPO. The main contributions of this survey are summarized as follows:", + "bbox": [ + 71, + 53, + 491, + 198 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- In this survey, we introduce a novel taxonomy that categorizes existing DPO works into four key dimensions based on different components of the DPO loss: data strategy, learning framework, constraint mechanism, and model property, as shown in Fig. 1. This taxonomy provides a systematic framework for understanding the methodological evolution of DPO and highlights the key distinctions between different variations.", + "- We conduct a rigorous empirical analysis of DPO variants across standardized benchmarks, revealing critical insights into their performance in diverse scenarios. This analysis offers a comprehensive evaluation of DPO variants and provides practical guidance for practitioners.", + "- We discuss real-world applications of DPO and highlight its potential to democratize alignment research by enabling efficient and scalable preference learning across diverse domains. We also outline open challenges and future directions for DPO research, emphasizing the need for robust and generalizable alignment paradigms." + ], + "bbox": [ + 83, + 202, + 491, + 479 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The remainder of this survey is organized as follows. Section 2 introduces the background and formulation of DPO. Section 3 presents a taxonomy of DPO, categorizing existing works based on key dimensions. Section 4 describes standardized benchmarks for evaluating DPO methods and presents empirical results. Section 5 discusses real-world applications of DPO and highlights its potential. Section 6 outlines open challenges and future directions for DPO research. Finally, Section 7 concludes the survey.", + "bbox": [ + 71, + 483, + 491, + 614 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 BACKGROUND AND FORMULATION", + "text_level": 1, + "bbox": [ + 73, + 641, + 383, + 655 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Preference learning aims to train language model policies to generate responses that better align with human preferences. Specifically, we denote the language model policy as $\\pi(y|x)$ , where $x$ represents the input prompt and $y$ is a candidate response (completion). A language model can be viewed as an autoregressive function that sequentially predicts tokens based on prior context. Mathematically, this is expressed as: $\\pi(y|x) = \\prod_{t=1}^{T} \\pi(y_t | y_{ 0$ is a hyperparameter that controls the strength of the Kullback-Leibler (KL) divergence penalty. Here, the term $\\log \\pi_{\\theta}(\\cdot |x) / \\pi_{\\mathrm{ref}}(\\cdot |x)$ represents the KL divergence between the current policy $\\pi_{\\theta}$ and a reference policy $\\pi_{\\mathrm{ref}}$ . In practice, the reference policy $\\pi_{\\mathrm{ref}}$ is set to the SFT model $\\pi_{\\mathrm{sft}}$ , ensuring that the updated policy remains close to the initial model.", + "bbox": [ + 501, + 781, + 923, + 869 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To optimize the above objective, Proximal Policy Optimization (PPO) [219] has emerged as a promising RL algorithm for LLMs. PPO stabilizes training by constraining policy updates within a trust region via a clipped objective, which prevents significant deviations from the previous", + "bbox": [ + 501, + 869, + 924, + 944 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "policy. However, PPO requires an additional critic model to estimate value functions for advantage calculation, thereby introducing extra computational and memory overhead. To address this, recent methods, such as RLOO [220], ReMax [221], GRPO [222], and Reinforce++ [223], introduce critic-free advantage estimation to reduce resource demands while maintaining stable optimization, making them more scalable for large-scale LLM training.", + "bbox": [ + 71, + 53, + 491, + 170 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 Direct Preference Optimization", + "text_level": 1, + "bbox": [ + 73, + 185, + 341, + 200 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "DPO offers an alternative that streamlines the training process by directly optimizing the policy with preference data [74, 224, 225, 226, 227, 228, 229], thereby eliminating the need for explicit reward modeling in RLHF. The key idea of DPO is a closed-form solution of Eq. 3 that connects reward with the optimal policies. Specifically, the optimal policy corresponding to a given $r$ is defined as follows:", + "bbox": [ + 71, + 203, + 490, + 306 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\pi^ {*} (y | x) = \\frac {1}{Z (x)} \\pi_ {\\mathrm {r e f}} (y | x) \\exp \\left(\\frac {1}{\\beta} r (x, y)\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 133, + 311, + 491, + 344 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where the partition function $Z(x)$ is defined as:", + "bbox": [ + 71, + 348, + 406, + 363 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nZ (x) = \\sum_ {y} \\pi_ {\\mathrm {r e f}} (y | x) \\exp \\left(\\frac {1}{\\beta} r (x, y)\\right). \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 148, + 368, + 491, + 404 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "By rearranging the above equation, the reward $r$ can be recovered from the optimal policy $\\pi^{*}$ :", + "bbox": [ + 71, + 409, + 491, + 439 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nr (x, y) = \\beta \\log \\frac {\\pi^ {*} (y | x)}{\\pi_ {\\operatorname {r e f}} (y | x)} + \\beta \\log Z (x). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 148, + 444, + 491, + 476 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Notice that the partition function $Z(x)$ depends only on the prompt $x$ . By substituting this expression into the preference model of Eq. 1, the preference probability model that $y_{w}$ is preferred over $y_{l}$ becomes:", + "bbox": [ + 71, + 479, + 491, + 540 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nP \\left(y _ {w} \\succ y _ {l} | x\\right) = \\sigma \\left(\\beta \\log \\frac {\\pi^ {*} \\left(y _ {w} \\mid x\\right)}{\\pi_ {\\text {r e f}} \\left(y _ {w} \\mid x\\right)} - \\beta \\log \\frac {\\pi^ {*} \\left(y _ {l} \\mid x\\right)}{\\pi_ {\\text {r e f}} \\left(y _ {l} \\mid x\\right)}\\right). \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 544, + 491, + 588 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Based on the above preference probability model, DPO directly optimizes the language mode policy $\\pi_{\\theta}$ by minimizing the following negative log-likelihood loss function:", + "bbox": [ + 71, + 588, + 491, + 633 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {D P O}} (\\theta) = \\\\ - \\mathbb {E} _ {(x, y _ {w}, y _ {l}) \\sim \\mathcal {D}} \\left[ \\log \\sigma \\left(\\beta \\log \\frac {\\pi_ {\\theta} (y _ {w} | x)}{\\pi_ {\\text {r e f}} (y _ {w} | x)} - \\beta \\log \\frac {\\pi_ {\\theta} (y _ {l} | x)}{\\pi_ {\\text {r e f}} (y _ {l} | x)}\\right) \\right], \\tag {8} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 76, + 643, + 491, + 712 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where the KL constraint is implicitly integrated through the use of the reference model $\\pi_{\\mathrm{ref}}$ . By minimizing this DPO loss, we directly train the policy to satisfy human preferences without resorting to a separate reward modeling stage or using reinforcement learning optimization as in RLHF, significantly reducing implementation complexity while improving training stability.", + "bbox": [ + 71, + 717, + 491, + 821 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.3 Other Preference Optimization", + "text_level": 1, + "bbox": [ + 71, + 835, + 339, + 851 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In addition to DPO, several concurrent preference optimization methods [190, 230, 231] have been proposed that offer alternative approaches to RLHF. These methods explore different strategies for optimizing LLMs to align with human preference without RL. Below, we provide a brief introduction to these approaches.", + "bbox": [ + 71, + 854, + 491, + 944 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.3.1 Sequence Likelihood Calibration", + "text_level": 1, + "bbox": [ + 504, + 53, + 784, + 66 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Zhao et al. [230] propose Sequence Likelihood Calibration with Human Feedback (SLiC-HF) to directly align LLMs with human preferences. Specifically, the loss function of SLiC-HF is defined as follows:", + "bbox": [ + 503, + 69, + 921, + 127 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {S L i C - H F}} (\\theta) = \\max (0, \\delta - \\log \\pi_ {\\theta} (y _ {w} | x) + \\log \\pi_ {\\theta} (y _ {l} | x)) \\\\ - \\lambda \\log \\pi_ {\\theta} (y ^ {*} | x), \\tag {9} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 137, + 921, + 172 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where the first term is the rank calibration loss with $\\delta$ as a margin hyperparameter, and the second term is the cross-entropy regularization loss with $\\lambda$ as a regularization weight. $y^{*}$ is obtained from either high-quality supervised responses in the SFT dataset or the top-ranked candidate response generated by the SFT model.", + "bbox": [ + 503, + 176, + 923, + 265 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.3.2 Rank Responses to Align Human Feedback", + "text_level": 1, + "bbox": [ + 504, + 272, + 862, + 286 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Yuan et al. [190] introduce Rank Responses to align Human Feedback (RRHF) for LLMs. RRHF extends pair-wise ranking by considering the list-wise ranking order of multiple responses, thus better utilizing the preference information. For an input prompt $x$ and $N$ candidate responses $\\{y_i\\}_{i=1}^N$ , it optimizes the model to assign higher probabilities to higher-ranked responses via a ranking loss and directly supervises the best response using cross-entropy as follows:", + "bbox": [ + 501, + 287, + 923, + 406 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {R R H F}} (\\theta) = \\sum_ {r _ {i} < r _ {j}} \\max \\left(0, \\frac {\\log \\pi_ {\\theta} (y _ {i} | x)}{| | y _ {i} | |} - \\frac {\\log \\pi_ {\\theta} (y _ {j} | x)}{| | y _ {j} | |}\\right) \\\\ - \\lambda \\log \\pi_ {\\theta} \\left(y ^ {*} \\mid x\\right), \\tag {10} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 412, + 921, + 468 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $r_i = r_\\phi(x, y_i)$ represents the reward of the response $y_i$ and $y^* = \\arg \\max_{y_i} r_i$ is the response with the highest reward. Although RRHF avoids the need for reinforcement learning in RLHF, it still utilizes a reward model $r_\\phi$ to rank candidate responses based on human preferences.", + "bbox": [ + 503, + 473, + 921, + 547 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.3.3 Preference Ranking Optimization", + "text_level": 1, + "bbox": [ + 504, + 554, + 790, + 569 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Similarly, Song et al. [231] propose Preference Ranking Optimization (PRO) to align LLMs with human preferences by leveraging multiple responses $\\{y_{i}\\}_{i = 1}^{N}$ with the human-annotated order $y_{1} > y_{2} > \\dots >y_{N}$ . The loss function of PRO is defined as follows:", + "bbox": [ + 503, + 571, + 923, + 643 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {P R O}} (\\theta) = - \\sum_ {i = 1} ^ {N - 1} \\log \\frac {\\exp \\left(\\frac {1}{\\| y _ {i} \\|} \\log \\pi_ {\\theta} \\left(y _ {i} | x\\right) / \\mathcal {T} _ {i} ^ {i}\\right)}{\\sum_ {j = i} ^ {N} \\exp \\left(\\frac {1}{\\| y _ {j} \\|} \\log \\pi_ {\\theta} \\left(y _ {j} | x\\right) / \\mathcal {T} _ {i} ^ {j}\\right)}, \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 516, + 646, + 923, + 705 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where the dynamic penalty temperature is defined as $\\mathcal{T}_i^j = 1 / (r_\\phi (x,y^j) - r_\\phi (x,y^i))$ and $\\mathcal{T}_i^i = \\min_{i < j}\\mathcal{T}_i^j$ . This temperature ensures that the probability gap between higher-ranked and lower-ranked responses is adaptively scaled according to their reward differences, thereby stabilizing the optimization process.", + "bbox": [ + 503, + 705, + 921, + 795 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 A TAXONOMY OF DPO", + "text_level": 1, + "bbox": [ + 504, + 805, + 720, + 820 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we introduce a novel taxonomy that categorizes existing DPO works based on four key dimensions: data strategy, learning framework, constraint mechanism, and model property. As illustrated in Fig. 1, these four dimensions are derived from different components of the DPO loss, providing a systematic framework for understanding the methodological evolution of DPO and highlighting the key distinctions between different variations.", + "bbox": [ + 501, + 825, + 923, + 941 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Data Strategy of DPO", + "text_level": 1, + "bbox": [ + 73, + 51, + 272, + 68 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The data strategy constitutes the foundational pillar of DPO, focusing on how to leverage diverse types of preference data for training LLMs. As shown in Fig. 2, our taxonomy identifies three principal axes of data strategy: quality, feedback, and granularity.", + "bbox": [ + 71, + 73, + 491, + 148 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1.1 Data Quality", + "text_level": 1, + "bbox": [ + 73, + 162, + 215, + 178 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The quality of preference data is a critical factor in determining the effectiveness of DPO training. High-quality data ensures that LLMs effectively learn to align with human preferences, while low-quality data may introduce noise and bias, leading to suboptimal model performance. We categorize data quality considerations into three key aspects: heterogeneity, distinguishability, and noise.", + "bbox": [ + 71, + 181, + 491, + 285 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "(a) Data Heterogeneity. Conventional DPO methods assume uniform human preferences when annotating data, thereby overlooking the diversity among annotators. This assumption often skews the model toward the preferences of the majority while neglecting minority viewpoints, potentially leading to biases and unfair treatment of underrepresented groups. To address this issue, Chidambaram et al. [42] propose EM-DPO, which learns the distribution of different preference types and their corresponding response strategies. Building on this, they introduce the MinMax-DPO algorithm, which selects a strategy by minimizing the maximum regret across subgroups, ensuring a more balanced representation of preferences among all groups. MallowsPO [43] decomposes the implicit rewards in DPO into prompt dispersion and response scaling rewards. It introduces a novel objective function to capture human preferences for diverse responses to the same prompt. GRPO [44] formulates an objective function that minimizes the loss for the worst-case group, thereby ensuring fairness by prioritizing the disadvantaged groups in the optimization process. GDPO [45] models the language generation process as a combination of belief distribution prediction and belief-based response generation. The corresponding GDPO loss function consists of belief calibration loss and belief-conditioned preference alignment loss. The former encourages the model to capture the diversity of beliefs across groups, while the latter ensures that generated responses align with the given belief.", + "bbox": [ + 71, + 286, + 493, + 680 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "(b) Data Distinguishability. A key limitation of DPO is its inability to account for the distinguishability of preference between responses [46, 50, 51, 56, 57]. In some cases, the preferred response is only marginally better than the dispreferred one, while in others, the dispreferred response contains harmful or misleading content, making it significantly worse. Thus, optimization should focus more on cases with substantial preference differences while reducing the effort spent on minor differences. However, most existing methods treat all samples equally, ignoring this data distinguishability. To address this, ODPO [46] introduces a monotonically increasing offset function, requiring the reward of the preferred response to exceed that of the dispreferred one by a certain margin. This ensures stronger updates for larger preference gaps. Similarly, Ada-DPO [54] introduces an instance-specific nonlinear scaling parameter, assigning larger weights to strong preference pairs and smaller weights to ambiguous ones based on the reward differences, thereby capturing", + "bbox": [ + 71, + 680, + 493, + 944 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/7d0b0c1f1f41595c2153dd474b54b2117d70052e28f86633cea08504dba7746a.jpg", + "image_caption": [ + "Fig. 2: An overview of DPO data strategy." + ], + "image_footnote": [], + "bbox": [ + 511, + 55, + 919, + 373 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "different levels of data distinguishability. DPO-rc [48] also incorporates the preference reward difference as a coefficient in the loss function. $\\alpha$ -DPO [49] introduces an adaptive preference distribution to obtain dynamic reward margins based on the distribution difference between the policy and reference models. $\\beta$ -DPO [51] analyzes the optimal $\\beta$ parameter for datasets with different reward margins, which dynamically adjusts $\\beta$ based on batch-level reward differences. They also introduce $\\beta$ -guided data filtering to prioritize valuable training data. Curri-DPO [53] sorts preference pairs by reward differences and trains progressively from large to small differences, enabling curricular learning. Similarly, MPO [47] utilizes a reward model to score responses generated by the SFT model, constructing a preference dataset and partitioning it based on preference differences to learn from simple to complex tasks. sDPO [55] computes reward accuracy for different datasets based on an initial target model and partitions the dataset in descending order of accuracy, allowing the model to first optimize on simpler samples. Ma et al. [58] propose a preference dataset construction method that adjusts update weights based on response accuracy, assigning lower weights when the model demonstrates higher proficiency. Furthermore, fDPO [52] enhances DPO training by filtering out samples where the generated response of the model policy surpasses the preferred dataset response in reward score.", + "bbox": [ + 501, + 412, + 924, + 792 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "(c) Data Noise. Human-generated preference annotations often contain inconsistencies, errors, or noise, negatively affecting the performance of DPO. Such noisy data can mislead models, impairing their ability to accurately capture true preferences and generalize effectively to unseen data. Im and Li [64] analyze how noisy feedback influences the generalization performance of preference optimization, showing that increased noise results in higher generalization risks. Specifically, standard DPO loss functions can yield biased estimates under noisy conditions. To address this", + "bbox": [ + 503, + 796, + 923, + 941 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "issue, rDPO [59] proposes to enhance DPO robustness against noisy annotations and improve overall training performance. Zhang et al. [63] introduce a noise-aware strategy leveraging annotator confidence and stability to identify and downweight noisy samples during training. They also propose an adaptive reward margin, emphasizing clean samples to improve learning effectiveness. Complementary to these approaches, PerpCorrect [60] employs a data-driven method to correct noisy annotations directly in the dataset. It trains a proxy language model on both clean and noisy samples, distinguishing noise through perplexity differences to improve dataset quality. To systematically explore noise effects, Gao et al. [65] artificially inject various noise types (e.g., Gaussian noise) into datasets, controlling noise intensity via hyperparameters. Their analysis highlights how noise impacts model alignment, guiding future research towards mitigating such negative effects. To address the vulnerability of DPO in noisy environments, ROPO [61] introduces a regularization term to enhance noise tolerance. Additionally, ROPO employs a robust-guided rejection sampling technique. This technique supplements the dataset with samples that contribute minimally to the loss, thereby improving the overall data quality. Kim et al. [62] propose the SPA framework, using model-generated responses and associated confidence scores to detect noise in annotations. SPA further incorporates smoothing techniques into the loss function to alleviate the noise problem. Finally, Wu et al. [66] categorize noise into two types: point noise (single annotation errors) and pairwise noise (errors between annotated pairs). While DPO naturally handles point noise well, it struggles with pairwise noise. Their proposed Dr. DPO introduces a novel loss function explicitly designed for robustness against both point and pairwise noise.", + "bbox": [ + 76, + 53, + 491, + 534 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1.2 Preference Feedback", + "text_level": 1, + "bbox": [ + 76, + 546, + 274, + 559 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Preference feedback refers to the label signals provided by annotators regarding their preferences for different responses. It can be categorized into point-wise, pair-wise, and list-wise feedback. Point-wise feedback evaluates each response independently, assigning a score or labeling it as positive or negative. Pair-wise feedback compares two responses to determine which one is preferred, while list-wise feedback ranks multiple responses.", + "bbox": [ + 76, + 563, + 490, + 679 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(a) Point-Wise Feedback. Point-wise feedback is the basic form of feedback. It refers to the type of feedback where individual outputs or samples are evaluated independently, rather than through comparisons with other outputs. This form of feedback is characterized by its simplicity and directness, focusing on the quality or relevance of a single response or item. The predominant methodology in RLHF [35] employs point-wise reward signals generated by reward models to optimize policy models. Similarly, KTO [67] directly maximizes the utility of model generations using loss functions based on prospect theory rather than the log-likelihood of preferences. It requires only a binary signal indicating whether an output is desirable or undesirable for a given input. Furthermore, BCO [68] builds upon the concepts introduced in KTO and explores a new approach to aligning with binary signals. While KTO focuses on optimizing human utility, BCO introduces a binary classifier framework incorporating reward shift and distribution matching that implicitly", + "bbox": [ + 76, + 680, + 491, + 941 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "minimizes the DPO loss. Chen et al. [72] and GPO [73] adopt explicit rewards using Noise Contrastive Alignment (NCA) and General Preference Model (GRM) respectively, and then directly optimize language model policies from point-wise preference data with rewards. However, some methods leverage implicit reward signals to refine model behaviors. To ensure that the learned implicit rewards are comparable to the ground-truth rewards, Cal-DPO [69] introduces a calibration term to the preference optimization objective, which prevents the likelihood of chosen responses from decreasing during training. ULMA [71] unifies human demonstration and point-wise preference data into a single framework and handles positive and negative samples with a hybrid objective function. Unlike them, DRO [211] adopts a simple mean-squared objective to optimize the model policy and value function jointly for a single trajectory. Additionally, AOT [70] casts the distributional preference constraint as an optimal transport problem with a convex cost function. The key idea is to minimize the violation of stochastic dominance using a smooth, convex cost function.", + "bbox": [ + 508, + 53, + 919, + 343 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(b) Pair-Wise Feedback. Pair-wise feedback focuses on comparing pairs of data or actions to determine their relative quality or preference. Building upon the theoretical framework of RLHF, DPO implements this paradigm through the utilization of pair-wise preference data, thereby fitting an implicit reward model. Azar et al. [75] introduces a general theoretical framework to unify existing RLHF and DPO methods. The proposed Identity-Preference Optimization (IPO) directly optimizes policies from preferences without relying on reward modeling or the Bradley-Terry assumption, thereby avoiding overfitting issues observed in DPO. Subsequently, DPO-RK and DPO-R [76] integrate the Rao-Kupper and Davidson models into the DPO training objective respectively, thereby extending the capabilities of DPO by explicitly modeling ties in pairwise comparisons. BMC [77] further addresses a key limitation of the weak correlation between winning and losing responses in pairwise data. Specifically, BMC uses \"Bridging\" to enhance the correlation between winning and losing responses by increasing the consistency and informativeness of pairwise preference signals. However, previous attempts for aligning LLMs primarily focus on optimizing the model's output preferences given an instruction, which struggles to effectively perceive the fine-grained constraints within complex instructions. Thus IOPO [78] extends traditional alignment methods by considering both input and output preferences to better understand the constraints within the instructions. As current methods rely heavily on paired preference data (i.e., explicitly labeled preferred vs. dispreferred examples), they can be limiting in scenarios where such paired data is unavailable or insufficient. SAPO [80] addresses this issue based on the concept of self-play, which enhances data exploration and exploitation by automatically generating negative samples and integrating off-policy learning. Furthermore, PMPO [79] extends the EM algorithm to incorporate both preferred and dispreferred outcomes. By introducing the probability distribution of dis-preferred outcomes, PMPO can optimize using both types of samples, even when only negative feedback is available. Similarly, D2O [81] avoids harmful information by maximizing the discrepancy between the generated responses and the negative samples. NPO [82]", + "bbox": [ + 508, + 345, + 919, + 941 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "and SimNPO [83] achieve the goal of forgetting the negative impact by regulating the model's prediction probabilities on negative datasets to be as minimal as possible, where SimNPO further eliminates the reference model bias issue inherent in NPO.", + "bbox": [ + 76, + 53, + 488, + 125 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "(c) List-Wise Feedback. List-wise feedback refers to the type of feedback where multiple outputs or responses generated by the model for a given input are evaluated collectively as a list. This approach considers the relative ranking or ordering among the outputs, rather than focusing on individual outputs in isolation. Panacea [84] reframes alignment as a Multi-Dimensional Preference Optimization (MDPO) problem and introduces a method that aims to learn the entire Pareto front to accommodate diverse user preferences. In short, Panacea is designed to adapt a single model to list-wise preferences in a Pareto-optimal manner. LiPO [85] and LIRE [86] also treat LM alignment as a list-wise ranking problem, drawing on the rich literature of Learning-To-Rank (LTR). Specifically, LiPO introduces a specific method LiPO-λ, which leverages a list-wise ranking objective that weights each preference pair based on the difference in ranking metrics; while LIRE optimizes the response probability by calculating the exponential probability distribution and uses the reward model to directly guide the optimization process. To better capture the relative proximity within ordinal multiple responses, OPO [87] utilizes the Normalized Discounted Cumulative Gain (NDCG), a widely used ranking metric, to optimize the model's generation probability to match the permutation of responses based on these labels. Similarly, DRPO [88] leverages NDCG as a key metric to optimize the ranking of model outputs. However, DRPO incorporates novel elements like diffNDCG and Adaptive Rank Policy Score to dynamically adjust the score margins between preferred and non-preferred responses based on their ranking positions. mDPO [232] extends preference optimization to multi-sample comparisons and introduces a framework that evaluates and optimizes the collective properties of sample groups. It not only addresses the limitations of single pair-wise methods but also provides a more robust optimization framework, especially for characteristics like diversity and bias. Furthermore, RPO [90] introduces a contrastive weighting mechanism that constructs a contrast matrix within each mini-batch to compare preferred and less-preferred responses across prompts. The weights of these comparisons are dynamically adjusted based on the semantic similarity between prompts. Additionally, TODO [91] integrates a tie ranking system into list-wise preference modeling, significantly improving the capture of nuances of human preferences, especially in the presence of noisy or inconsistent labels and frequent ties.", + "bbox": [ + 76, + 125, + 490, + 782 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.1.3 Preference Granularity", + "text_level": 1, + "bbox": [ + 76, + 794, + 284, + 808 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Preference granularity refers to the granularity of preference labels, which determines the level at which preferences are assigned to data. It can be categorized into token-level, step-level, sentence-level, and turn-level granularity, ranging from fine-grained focus on individual tokens to broader preferences over entire interaction turns.", + "bbox": [ + 76, + 811, + 488, + 897 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "(a) Token-Level Granularity. Token-level alignment operates at the character/subword unit of text generation, providing the finest-grained control over model outputs", + "bbox": [ + 76, + 898, + 488, + 941 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Theoretically, Rafailov et al. [92] demonstrate that DPO can represent any dense reward function by reparameterizing it as an optimal advantage function, which allows DPO to optimize policies in the token-level MDP effectively TDPO [93] refines the alignment process from the sentence level to the token level and introduces forward KL divergence constraints. TDPO utilizes the Bradley-Terry model to convert sentence-level preference comparisons into a token-level reward system, which allows the model to dynamically adjust its strategy at each token generation step. Furthermore, TIS-DPO[94] estimates the importance weights of tokens based on the differences in prediction probabilities from contrastive LLMs, performing token-level importance sampling on existing data to approximate optimal distribution by assigning weights to each token based on its reward. Moreover, $\\mathrm{D}^2\\mathrm{PO}$ [99] proposes a temporal decay mechanism that dynamically adjusts the contribution of each token-level reward based on its position in the sequences. Unlike these, SparsePO [95] directly learns sparse masks during the training process and controls which tokens are more important for preferences through the sparsity of the masks, thereby achieving dynamic optimization. RTO [96] and SePO [97] first learn a token-level reward function from preference data using DPO, and then RTO optimizes PPO based on this reward signal, while SePO selects key tokens through the estimated reward function. To tackle the need for large-scale annotated data in training, EPO [98] proposes a hierarchical framework that decomposes complex tasks into manageable subgoals using separate LLMs for subgoal prediction and low-level action generation, leveraging environment feedback to automatically generate reward signals and preference data for aligning LLMs.", + "bbox": [ + 506, + 53, + 919, + 518 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To conclude, token-level granularity optimizes models at individual token positions to maximize expected objectives, preserving semantic precision and capturing local syntactic dependencies. However, it increases computational complexity, as processing numerous tokens extends training time, and its sensitivity to noise means errors in a single token can affect the entire sequence. Thus, careful loss function design and regularization are essential for stability.", + "bbox": [ + 506, + 520, + 921, + 635 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "(b) Step-level Granularity. Step-level granularity focuses on the intermediate steps or stages in a process, particularly effective for complex problem-solving tasks requiring multiple intermediate steps. Step-DPO [100] and SCDPO [101] treat individual reasoning steps as the basic units for preference optimization, where preference pairs of correct and incorrect steps are generated using LLMs. Furthermore, CPO [102] and MCTS-DPO [103] first utilize more powerful inference structures to generate multiple candidate thoughts at each reasoning step following the Tree-of-Thought (ToT) and Monte Carlo Tree Search (MCTS) respectively, and construct preference pairs based on the selected and unselected intermediate steps. Then they finetune LLMs to generate reasoning steps preferred by ToT during inference using DPO. TPO [104] proposes a preference learning algorithm specifically designed for preference trees that have multiple branches and multi-step responses, and introduces the adaptive step reward mechanism to address the issue of small reward margins caused by shared subtrajectories. It adjusts the reward values for each step based on semantic similarity, helping the model better distinguish", + "bbox": [ + 506, + 636, + 921, + 941 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "between preference pairs. RDPO [105] extends traditional preference datasets to incorporate a rationale field, which explains why a particular response is preferred. RDPO introduces rationale information into the DPO loss function by maximizing the likelihood of both the preference and the rationale, which allows the model to better understand the logic behind preferences during training. To address the challenges of sparse rewards and training instability, DAPO [106] uses a critic function to generate dense signals for policy optimization and trains the actor and critic independently to avoid instability.", + "bbox": [ + 71, + 53, + 491, + 213 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To conclude, step-level alignment demonstrates unique advantages in multi-step reasoning tasks by decomposing holistic preferences into intermediate decision points. The primary strength of step-level granularity lies in its capacity to decompose complex objectives into verifiable subgoals, enhancing both interpretability and robustness. For instance, in mathematical reasoning, LLMs can receive feedback on equation derivation steps before final answers, reducing error propagation. However, this granularity still have two key challenges: first, the need for precise step segmentation, which may require domain-specific heuristics or auxiliary models to delineate reasoning boundaries; second, the risk of local optima, where over-optimization of individual steps degrades global coherence.", + "bbox": [ + 71, + 214, + 491, + 417 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(c) Sentence-level Granularity. Sentence-level granularity aligns preferences at the complete utterance level, balancing fine-grained control and computational efficiency. This granularity, represented by the original DPO framework, operates on full response sequences as atomic units for preference comparison. MAPO [107] uses a well-trained translation model to calculate alignment scores between answers in nondominant and dominant languages and then employs preference optimization methods to enhance reasoning consistency. EURUS [108] structures each instruction as a preference tree, containing pairs of correct and incorrect actions to facilitate preference learning. Similarly, IRPO [109] focuses on improving the reasoning capabilities of LLMs through an iterative preference optimization on constructed preference pairs such that the winning response has a higher reward than the losing response. FACTALIGN [110] proposes a fine-grained, sentence-level alignment algorithm called fKTO, which extends the KTO method to leverage fine-grained factuality assessments at the sentence level.", + "bbox": [ + 71, + 417, + 491, + 694 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To conclude, the key strength of sentence-level granularity lies in its capacity to preserve holistic semantics while maintaining tractable optimization complexity. Nevertheless, we must carefully consider task requirements. While suitable for short-form generation and classification tasks, sentence-level methods may insufficiently capture fine-grained stylistic nuances or long-range dependencies critical in generation and reasoning domains.", + "bbox": [ + 71, + 694, + 491, + 811 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(d) Turn-level Granularity. Turn-level granularity focuses on the optimization of model behavior at the level of conversational turns, which is particularly relevant for dialogue systems and interactive agents. This granularity level treats each turn of a conversation as a unit for preference alignment, allowing the LLMs to receive feedback on their responses within the context of a single turn. M-DPO [111] introduces a multi-turn direct preference learning framework to enhance the mathematical reasoning capabilities of LLMs when", + "bbox": [ + 71, + 811, + 491, + 941 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "integrated with external tools. It leverages feedback from code interpreters and optimizes trajectory-level preferences using signals generated by the Bradley-Terry model to improve model performance in multi-turn reasoning tasks. ETO [112] presents a novel trial-and-error learning method that optimizes LLM agents' policies by contrasting successful and failed trajectories that contain multi-turn interaction. To address the challenges of coarse granularity and training noise in previous methods, SDPO [113] optimizes specific key segments within interactions to improve multi-turn dialogues while minimizing training noise. Specifically, it extracts key segments from the positive sessions that contribute to higher goal and relationship scores and pairs them with corresponding segments from the negative sessions to calculate an adapted DPO loss. Similarly, AgentQ [114] combines MCTS with self-critique mechanisms to provide process-level supervision by ranking actions, and then iterative fine-tuning using DPO. This approach enables LLMs to effectively learn from both successful and unsuccessful trajectories, enhancing their generalization and decision-making capabilities in complex, multi-turn reasoning tasks within interactive environments. DMPO [115] enhances the existing DPO method by replacing the policy constraint with a State-Action Occupancy Measure (SAOM) constraint and incorporating length normalization into the Bradley-Terry model, effectively addressing challenges in multi-turn scenarios. Compared to traditional policy constraints, SAOM constraints better guide the agent to select actions that align with expert trajectories, especially in unexplored states, thereby reducing compounding errors.", + "bbox": [ + 501, + 53, + 924, + 489 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To conclude, turn-level alignment offers critical advantages for interactive systems by optimizing contextually grounded responses while preserving conversational flow. However, in multi-turn dialogue tasks, the turn-level granularity may introduce additional training noise. For example, some correct turns in negative samples may be mistakenly treated as incorrect turns in the loss calculation. Additionally, since each turn needs to be processed independently, this can lead to reduced training efficiency.", + "bbox": [ + 503, + 489, + 926, + 623 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.2 Learning Framework of DPO", + "text_level": 1, + "bbox": [ + 504, + 642, + 756, + 657 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The learning framework of DPO focuses on how the language model policy learns from preference data. In this section, we present an overview of the learning framework in DPO, as shown in Fig. 3, which encompasses the learning paradigm and the learning objectives.", + "bbox": [ + 503, + 661, + 923, + 736 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.2.1 Learning Paradigm", + "text_level": 1, + "bbox": [ + 504, + 750, + 692, + 763 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The learning paradigm in DPO determines how preference data is acquired during model training and falls into three distinct categories: offline learning, where the model learns from pre-collected preference datasets; online Learning, where the model updates based on newly generated data; and active Learning, where the model selectively queries annotators obtain preference data.", + "bbox": [ + 501, + 766, + 923, + 869 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(a) Offline Learning. The original DPO framework [74] itself is an offline learning paradigm, where the model learns from a static, pre-collected dataset of preference pairs. Recent research has explored different approaches to merging preference optimization and supervised fine-tuning", + "bbox": [ + 503, + 869, + 924, + 944 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ab1cb06bdd578e7bee29a0deff07d4285154eb2afd39c5194af16ae72fc055ce.jpg", + "image_caption": [ + "Fig. 3: An overview of DPO learning framework." + ], + "image_footnote": [], + "bbox": [ + 81, + 55, + 483, + 349 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "into a single training phase [190]. CPO [116] incorporates a behavior cloning regularizer through KL divergence minimization between the model and preferred data distribution, which effectively combines into adding a negative log-likelihood term on preferred data alongside the contrastive preference loss. Taking a more direct approach, ORPO [117] proposes a monolithic framework that directly augments the standard negative log-likelihood loss with an odds ratio term comparing chosen and rejected responses, eliminating the need for a separate reference policy while preserving SFT's domain adaptation capabilities. ULMA [71] proposes a hybrid method that applies standard SFT loss on positive samples while using a ranking-based DPO loss on negative samples. PAFT [118] introduces a parallel training paradigm where SFT and preference alignment are performed concurrently on the same pre-trained model and then merged using parameter fusion techniques, avoiding the sequential pipeline that can lead to catastrophic forgetting.", + "bbox": [ + 71, + 383, + 491, + 648 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Several advances explore curriculum learning strategies to enhance DPO performance and training efficiency. CurriDPO [53] introduces curriculum learning by ordering multiple preference pairs from easy to hard based on the rating difference between chosen and rejected responses, where pairs with larger rating gaps are presented first, followed by progressively more challenging pairs with smaller rating differences. sDPO [55] implements curriculum learning by partitioning preference datasets into sequential chunks measured by reward accuracy and applying them incrementally.", + "bbox": [ + 71, + 648, + 491, + 796 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To avoid substantial computational and data annotation costs for preference alignment, fine-tuning-free alignment methods have gained popularity. Linear Alignment [119] works by directly estimating the optimal policy through a one-step update to the output distribution during inference without requiring parameter tuning or feedback data. ICDPO [120] reinterprets DPO's reward-policy relationship to create a fine-tuning-free alignment method that harnesses in-context learning, treating models before and after demonstration exposure as amateur and expert policies, respectively,", + "bbox": [ + 71, + 796, + 491, + 944 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "then computing their log probability ratio to score and rank candidate responses.", + "bbox": [ + 503, + 53, + 921, + 82 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "(b) Online Learning. DPO faces significant limitations when relying solely on static, pre-collected preference datasets. These datasets, generated by different models, cause a distribution shift that leads to ineffective off-policy learning as the model evolves [145, 152]. By contrast, online DPO employs an iterative framework that continuously updates the policy with real-time feedback, ensuring on-policy learning and reducing misalignment [143, 144, 233].", + "bbox": [ + 501, + 82, + 923, + 198 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "As online DPO progresses, researchers have introduced more flexible frameworks to tackle key challenges. For instance, Yuan et al. [123] proposed a self-rewarding language model: the model generates prompts and responses, then serves as its own judge via LLM-as-a-Judge prompting, scoring on a 5-point scale. OAIF [121] uses an LLM as an online annotator for real-time feedback, and OFSDPO [122] addresses catastrophic forgetting by using two Low-Rank Adaptive (LoRA) modules with different optimization speeds. BPO [124] constructs a dynamic trust region around the behavior LLM, adjusting it as preference data is collected, unlike methods that rely on fixed reference models. Furthermore, researchers have refined sampling strategies for online DPO. RSO [126] and RS-DPO [125] employ rejection sampling based on reward gaps. ROPO [61] recovers useful information from discarded queries via robustness-guided rejection sampling. Shi et al. [127] introduced DPO-Mix-R and DPO-Mix-P, demonstrating faster convergence by mixing online samplers with uniform samplers. OPTUNE [128] selectively regenerates responses with low reward scores while reusing high-reward responses. Iterative RPO [109] and DPO-ST [129] enhance CoT reasoning by selecting correct and incorrect answers to form preference pairs at each iteration. Xie et al. [103] used MCTS to collect preference data during training. Researchers have also explored advanced optimization techniques. APO [130] incorporates momentum-based acceleration, using an extrapolation step between the current and previous policies to update the policy. Xiong et al. [131] proposed a two-agent, non-symmetric online DPO framework with a main agent for optimal policy learning and an enhancer agent for exploration. COMAL [132] formulates alignment as a two-player zero-sum game, updating its policy toward a regularized Nash equilibrium in each iteration. PCO [133] iteratively trains the model on preference data with pairwise cringe Loss.", + "bbox": [ + 501, + 198, + 923, + 709 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Recent efforts push for greater autonomy by letting models generate their own feedback [62]. SeRA [134] introduces a self-reviewed preference bootstrapping method, using an implicit reward margin to select informative pairs, and employs an ensemble reward approach across iterations. CREAM [135] mitigates self-improving biases by applying a consistency regularization on the preference rankings of consecutive iterations. D2PO [136] combines human-labeled gold data with concurrently updated, discriminator-labeled data. DLMA [137] uses contrastive prompts to compute self-reward scores via log ratio differences, then integrates these scores directly into the DPO objective. Addressing exploration and uncertainty in online DPO has also been a focus [234]. XPO [138] encourages exploration by adding a bonus for responses outside the initial policy's support, and SELM [139] uses an optimism term in reward fitting to", + "bbox": [ + 501, + 709, + 923, + 944 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "actively seek high-reward responses. ETO [112] alternates exploration and training phases to collect failure trajectories, while VPO [140] applies optimism by regularizing the reward model to favor higher-value responses. Xiong et al. [111] extended DPO from single-turn to multi-turn tasks, balancing KL-regularized and non-regularized objectives, and COPO [141] incorporates a count-based bonus to encourage novel responses with low visitation counts.", + "bbox": [ + 71, + 53, + 491, + 167 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Finally, a growing body of work aims to merge online and offline techniques. HyPO [142] uses offline preference data for DPO training while regularizing via online data. MPO [47] combines the strengths of DPO and PPO in a two-stage process: it first trains DPO on an easier dataset, then uses this model as a reference for PPO training on more challenging samples.", + "bbox": [ + 71, + 169, + 491, + 271 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "(c) Active Learning. Active learning in DPO is a strategic approach that aims to reduce the annotation cost and improve sample efficiency by selectively querying annotators for the most informative preference examples. Unlike offline learning that uses a fixed dataset or online learning that generates new data continuously, active learning intelligently selects which data points should be labeled based on model uncertainty or other informativeness criteria.", + "bbox": [ + 71, + 272, + 491, + 387 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Muldrew et al. [146] introduced APL, an iterative data acquisition and fine-tuning loop in which batches of prompt/completion pairs are strategically selected using acquisition functions: a predictive entropy-based approach to measure model uncertainty for prompts and a preference certainty measure based on the implicit Bradley-Terry model for completion pairs in DPO. Unlike two-step selection processes in APL that separately select uncertain input prompts and corresponding completions, divAPO [147] integrates both stages into a single selection phase. divAPO maximizes the preference model certainty by simultaneously evaluating the informativeness of input prompts and completion pairs, while also considering the data distribution of the input prompts. Ji et al. [148] proposed ADPO, which selectively queries human preferences only for responses where the model exhibits high uncertainty while using pseudo-labels for confident cases. Das et al. [149] also employed active learning on RLHF, which actively selects the context-action pairs that maximize exploration and minimize uncertainty in the reward model.", + "bbox": [ + 76, + 388, + 493, + 680 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "3.2.2 Learning Objective", + "text_level": 1, + "bbox": [ + 73, + 691, + 261, + 705 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In what follows, we present the learning objective in DPO, which determines how the model policy is optimized based on preference data. We first discuss multi-objective learning in DPO, which aims to optimize multiple objectives simultaneously. Then, we introduce self-play learning, which leverages self-generated data for preference alignment.", + "bbox": [ + 71, + 708, + 491, + 796 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "(a) Multi-Objective Learning. Multi-objective learning in DPO addresses the challenge of simultaneously optimizing the language model for multiple, potentially competing preference dimensions, such as helpfulness, harmlessness, and truthfulness. This approach aims to find a balanced policy that satisfies multiple human values rather than optimizing for a single objective, which more closely mirrors the complexity of real-world human preferences.", + "bbox": [ + 71, + 797, + 491, + 912 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "MODPO [150] achieves the sequential optimization of multiple preference objectives by incorporating language", + "bbox": [ + 71, + 912, + 491, + 943 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "modeling directly into reward modeling, using a margin-based loss to maintain performance on previously optimized dimensions. SPO [151] takes a similar iterative constrained optimization approach, optimizing each preference dimension while preventing the degradation of prior alignments through regularization terms. MOSLIM [152] takes a different approach by introducing a multi-head classification reward model that assigns different preference dimensions to separate classification heads, enabling simultaneous optimization of multiple preferences without requiring multiple reward or policy models. HPO [153] incorporates auxiliary objectives through offline RL, where the model uses a weighted maximum likelihood objective that combines a preference alignment term with an advantage-weighted term for maximizing arbitrary auxiliary rewards like readability and safety. CPO [154] introduces explicit preference tokens during training that specify desired scores for different objectives, transforming the multi-objective optimization into a conditional optimization problem. DRDO [155] simultaneously models rewards and preferences through a combination of reward distillation and a contrastive log-unlikelihood term in its loss function.", + "bbox": [ + 501, + 53, + 924, + 372 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "(b) Self-Play Learning. Self-play learning in DPO represents an approach where the language model interacts with itself or its previous iterations to generate its own preference data for training, reducing or eliminating the need for human annotations [139, 164]. This method enables continuous self-improvement by leveraging the model's own judgment capabilities to identify and learn from better responses, creating a form of autonomous preference learning.", + "bbox": [ + 503, + 373, + 926, + 489 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "SPIN [156] involves a self-play mechanism where the LLM generates synthetic data from its prior iterations, then fine-tunes itself to distinguish these self-generated responses from those of human-annotated data. The method resembles a two-player game, where the model's current iteration tries to improve its responses to better match the target distribution, while the previous iteration attempts to generate responses as close to human data as possible. SPPO [157] treats LLM alignment as a constant-sum two-player game and iteratively refines itself by competing against its previous iteration. Instead of maintaining two competing policies or a reward model, SPO [158] uses a single policy to sample multiple trajectories and uses the proportion of wins in pairwise comparisons as the reward signal. BoNBoN [159] Alignment likewise relies on sampling responses from a base model, but it selects the best ones among n candidates and fine-tunes itself to approximate that best-of-n distribution.", + "bbox": [ + 503, + 489, + 924, + 737 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Some works approach the alignment problem by leveraging Nash equilibrium [132]. Nash-MD [160] learns a preference model from pairwise human feedback and then computes a Nash equilibrium policy that consistently produces preferred responses. Its self-play approach updates the policy by having it compete against itself (or a slight variant of itself) under the learned preference model, which measures how often one response is preferred to another. DNO [161] extends this concept by implementing a batched on-policy algorithm where the current policy generates multiple outputs that are compared both against each other and against a teacher model's outputs. IPO-MD [162] combines the strengths of IPO and Nash-MD, where the model generates data using a mixture policy between the online and reference", + "bbox": [ + 503, + 738, + 924, + 941 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 906, + 32, + 923, + 42 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "policies, and uses a preference model to annotate pairs of generations, making the optimization equivalent to finding a Nash equilibrium through self-play. SRPO [163] modifies Nash-MD by introducing a self-improvement policy that refines model outputs through iterative revisions, enabling offline optimization without a learned reward function.", + "bbox": [ + 71, + 53, + 491, + 141 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "3.3 Constraint Mechanism of DPO", + "text_level": 1, + "bbox": [ + 73, + 161, + 341, + 175 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The constraint mechanism of DPO derives from its reformulation of RLHF, which includes a KL divergence constraint between the current policy and a reference policy. As shown in Fig. 4, we re-examine the constraint mechanism of DPO from the perspective of the reference model and different divergence constraints. We also explore various DPO variants with different safety constraints.", + "bbox": [ + 71, + 180, + 491, + 282 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "3.3.1 Reference Model", + "text_level": 1, + "bbox": [ + 73, + 297, + 250, + 311 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The reference model in DPO functions as an anchor to ensure policy updates remain within a controlled range, preventing excessive deviation from initial behaviors. Typically, the reference model is initialized using the SFT model that serves as the starting point for preference optimization. The choice of reference model significantly impacts optimization dynamics. A static reference model ensures stable training but may limit adaptability. In the following subsections, we introduce two advanced approaches: reference-free DPO eliminates reliance on the reference model, while dynamic-reference DPO employs an evolving reference model.", + "bbox": [ + 71, + 314, + 491, + 474 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(a) Reference-Free DPO. To reduce the computational and memory costs associated with a reference model, many algorithms have explored training modes that do not require loading the reference model. Xu et al. [116] replaces the reference model with a uniform prior distribution, adding an SFT loss term on preferred data to maintain consistency with the desired behavior. ORPO [117] integrates an odds ratio-based penalty with traditional SFT loss, increasing the probability of preferred responses while decreasing undesirable ones, thereby enabling single-stage training without a separate reference model. SimPO [166] directly uses the average log probability as implicit rewards. This removes the requirement for a separate reference model, significantly improving computational and memory efficiency. SimPER [167] also directly optimizes reverse perplexity for preferred versus rejected responses, creating a preference optimization approach that does not require a separate reference model, thus simplifying training. Despite these advancements, [168] argue that a reference model remains crucial. They compared two reference-free variants using posterior probabilities and likelihood functions as rewards, respectively, and found the original DPO consistently outperformed both. Their results indicate that a strong, well-aligned reference policy can significantly enhance DPO performance.", + "(b) Dynamic-Reference DPO. Offline DPO methods often suffer from reward over-optimization, meaning that as the trained model deviates from the reference model, the quality of generated samples tends to degrade. To address this issue, Gorbatovski et al. [165] proposed dynamically updating the reference model using the current model parameters during training, preventing excessive divergence and maintaining high-quality outputs. Curri-DPO [53] and sDPO [55] adopt" + ], + "bbox": [ + 71, + 476, + 491, + 944 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/cddebe4de78a02102cffc5e93cf429138280b5deac316fd35c18e11122d7aa8e.jpg", + "image_caption": [ + "Fig. 4: An overview of DPO constraint mechanism." + ], + "image_footnote": [], + "bbox": [ + 511, + 53, + 916, + 329 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "curriculum learning by sorting data samples from simpler to more complex based on predefined metrics. At each training iteration, the model from the previous step serves as the updated reference model to provide constraints, facilitating progressive learning. Similarly, MPO [47] partitions datasets according to task difficulty, employing a two-stage training procedure. The model trained in the initial stage serves as the reference for the subsequent stage. Additionally, M-DPO [89] compares the performance of a fixed reference model versus a dynamic reference model, finding that the latter yields superior results.", + "bbox": [ + 503, + 357, + 924, + 518 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "3.3.2 Divergence Constraint", + "text_level": 1, + "bbox": [ + 504, + 531, + 718, + 545 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Divergence constraints in DPO play a crucial role in constraining model optimization, balancing alignment performance and model stability. In the following subsections, we introduce two modifications to the divergence constraint: one for enhancing diversity and the other for improving generalization.", + "bbox": [ + 503, + 547, + 924, + 635 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(a) Diversity. Standard DPO typically uses reverse KL divergence equivalent to RLHF. However, the mode-seeking nature of reverse KL divergence reduces the diversity of the generated outputs. To overcome this limitation, f-DPO [169] explores various divergences, including forward KL divergence, reverse KL divergence, Jensen-Shannon divergence, and $\\alpha$ -divergence, to achieve a better trade-off between alignment performance and diversity. Slocum et al. [170] further proposes splitting the KL divergence term into entropy and cross-entropy terms. This decoupling allows independent control of generation diversity and closeness to the reference model, preserving output diversity without degrading overall model quality.", + "(b) Generalization. Over-optimization in DPO can negatively impact generalization, causing reduced performance on inputs outside the training distribution. To mitigate this, Huang et al. [178] introduce $\\chi^2$ -divergence as a more aggressive form of regularization compared to KL divergence, alleviating the over-optimization problem. DPO-Kernels [171] employs data-driven methods to select optimal kernel-divergence pairs dynamically, improving task adaptability" + ], + "bbox": [ + 503, + 636, + 926, + 944 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 906, + 32, + 919, + 42 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "and robustness. FlipGuard [172] introduces a customized reward characterization to monitor model performance. If performance drops relative to earlier versions, FlipGuard constrains the model's updates to ensure alignment with previous stable behavior. FPO [173] leverages the feature-level constraints using Sparse Autoencoders (SAEs) to improve computational efficiency and training stability. SPO [176] integrates a natural preference loss with a KL divergence-based regularization term computed over the entire model output distribution. By adjusting this divergence term, SPO prevents unwanted shifts beyond the preference dataset, ensuring stable alignment. EXO [175] argues that minimizing the forward KL divergence in DPO introduces bias when approximating the optimal policy. They establish a generalized alignment objective and reveal the equivalence between maximizing KL regularization rewards and minimizing the reverse KL divergence relative to the optimal policy. QDPO [177] utilizes divergence between the quantized model and the full-precision model for preference optimization, effectively addressing the token-flipping issue. Token-flipping refers to the phenomenon where quantization errors skew token distributions, leading to incorrect token selection. GPO [174] constructs a framework that unifies different DPO-related algorithms through theoretical derivations, enabling a deeper understanding of the regularization mechanisms in the DPO family of algorithms.", + "bbox": [ + 71, + 53, + 491, + 433 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "3.3.3 Safety Constraint", + "text_level": 1, + "bbox": [ + 73, + 446, + 250, + 460 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Safety constraints in DPO aim to prevent LLMs from generating harmful, biased, or unethical outputs. However, traditional alignment algorithms often fail to address safety concerns. To enhance the safety alignment, recent studies have introduced several specialized mechanisms based on DPO. SafeDPO [179] introduces a streamlined approach for safety alignment by implicitly optimizing safety objectives within a single stage of policy learning. SACPO [180] addresses safety constraints by explicitly formulating language model alignment as a constrained optimization problem, using DPO to optimize the model under safety constraints. Zhang et al. [184] propose creating a backtracking preference dataset that identifies and reverses unsafe outputs, enhancing the safety and robustness of the model. C-DPO [181] integrates dual gradient descent into DPO to balance safety and utility efficiently. This approach achieves a robust trade-off between helpfulness and harmlessness, offering explicit safety guarantees. ADPO [182] introduces adversarial techniques into DPO. It specifically trains models to reduce the probability of unsafe outputs by deliberately generating harmful responses using controlled toxic tokens. Finally, Lee et al. [183] explore the internal mechanisms through which DPO reduces harmful outputs. Their findings suggest that DPO does not remove harmful behaviors learned during pretraining but instead teaches models to bypass or suppress these behaviors. This insight helps explain certain safety vulnerabilities like jailbreaks.", + "bbox": [ + 71, + 465, + 491, + 859 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "3.4 Model Property of DPO", + "text_level": 1, + "bbox": [ + 71, + 878, + 285, + 893 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "DPO has shown great promise in aligning LLMs with human preferences by directly optimizing model outputs based on preference data. During this process, the underlying models", + "bbox": [ + 71, + 898, + 491, + 944 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/e5c5a6a0d780ea4b5c9a8a90db2723f6fdb6c09e14347e19016d55dcced76086.jpg", + "image_caption": [ + "Fig. 5: An overview of DPO model property." + ], + "image_footnote": [], + "bbox": [ + 514, + 55, + 916, + 327 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "exhibit certain properties that are crucial for understanding their behavior and effectiveness. These properties can be broadly categorized into two aspects: the generation property and the optimization property, as shown in Fig. 5. In the following sections, we explore these two properties in more detail, analyzing their implications for model alignment.", + "bbox": [ + 503, + 356, + 923, + 446 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "3.4.1 Generation Property", + "text_level": 1, + "bbox": [ + 504, + 458, + 702, + 473 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The generation property of DPO primarily concerns issues related to distribution shifts and length biases. DPO is sensitive to distribution shifts between the base model outputs and the preference data, which may reduce diversity and generalization. Additionally, DPO has a tendency to favor longer responses, a phenomenon known as morbidity, which can negatively impact performance and user experience.", + "bbox": [ + 501, + 474, + 923, + 578 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "(a) Distribution Shift. In RLHF, the reward model is trained on a static set of preference data collected offline. During fine-tuning, the generated responses often differ from this original training data, resulting in a distribution shift. This shift can cause inaccurate reward predictions and lead to over-optimization. The implicit reward model in DPO also suffers from this distribution shift issue. Moreover, Lin et al. [188] have shown that the implicit reward model in DPO performs poorly on Out-Of-Distribution (OOD) data compared to explicit reward models. Experimental results indicate that DPO can transfer probability mass to the highreward response regions covered by the preference data, but it may also cause the distribution of responses generated by the model to deviate significantly from that of the reference policy, resulting in responses that do not meet expectations [189]. To address these problems, many researchers are now exploring online DPO approaches [109, 121, 122, 125], aiming to mitigate OOD by continuously updating preference data during training.", + "bbox": [ + 503, + 578, + 923, + 853 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Existing DPO methods also face significant limitations due to their dependence on specific training tasks. Their optimal solutions lack robustness when applied to OOD tasks. Thus, SRPO [163] reframes alignment as a self-improvement process, which optimizes a self-improvement policy and a generative policy using a min-max objective, ensuring", + "bbox": [ + 501, + 854, + 924, + 944 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "robustness by making the solution independent of training tasks. Zhang et al. [139] also identify notable issues in DPO when handling OOD tasks. First, DPO tends to overly favor novel content it has not seen during training. Second, it easily gets stuck in local optima, limiting exploration. To address these problems, they propose Self-Exploring Language Models (SELM), incorporating an optimism term to encourage broader exploration of new responses.", + "bbox": [ + 71, + 53, + 491, + 169 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Another significant challenge of DPO is preference drift, where human preferences evolve, changing data distributions over time. Traditional DPO algorithms typically overlook such temporal shifts, mistakenly interpreting them as noise. To address this, NS-DPO [185] propose to assign higher weights to recent data, allowing models to better adjust to evolving preferences.", + "bbox": [ + 71, + 169, + 491, + 272 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "(b) Length Bias. Length bias in DPO refers to the tendency of model-generated outputs to become excessively long during training. This issue is similar to the length bias observed in RLHF [197] and is particularly pronounced in DPO. Length bias affects response quality and overall model performance. To mitigate this issue, researchers have developed several solutions, which can be categorized into three main approaches: length regularization, length normalization, and length sampling.", + "bbox": [ + 71, + 272, + 491, + 404 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Length regularization is a common approach to controlling length bias in DPO. By introducing regularization terms into the objective function, the model can constrain response length and reduce morbidity, thereby alleviating the length bias problem. R-DPO [191] introduces a length-based penalty term to the DPO objective function, explicitly discouraging morbidity. $\\mathrm{D}^2\\mathrm{PO}$ [99] introduces a dynamic weighting mechanism by incorporating a temporal decay factor. Unlike previous methods that apply uniform reward contributions across sequences, $\\mathrm{D}^2\\mathrm{PO}$ adjusts the influence of each reward based on its position in the response. Higher weights are assigned to rewards associated with earlier tokens, as they are more critical for model alignment, while later rewards gradually receive lower weights. This adaptive approach prevents overfitting to less relevant tokens, thereby addressing length bias in DPO.", + "bbox": [ + 71, + 404, + 491, + 636 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Length normalization aims to eliminate the loss bias caused by response length differences, allowing the model to evaluate texts of varying lengths more fairly. This approach prevents the model from developing an unreasonable preference for either long or short responses [198]. RRHF [190] and SimPO [166] first propose to apply length normalization to responses, ensuring that the loss remains unaffected by response length. LN-DPO [194] further integrates SimPO-like length normalization into DPO, demonstrating that this approach enhances response quality while mitigating morbidity. LD-DPO [195] achieves length desensitization by reparameterizing the likelihood in DPO. Specifically, it decomposes the likelihood of the longer response in a preference pair into the product of the likelihood of the public-length portion and the likelihood of the excessive portion. It then introduces a hyperparameter to mitigate the morbidity preference. This adjustment smooths the relationship between likelihood and response length, reducing its impact on optimization. For multi-turn dialogue tasks, DMPO [115] introduces length normalization for the number of turns in multi-turn preference optimization.", + "bbox": [ + 71, + 636, + 493, + 944 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "An alternative approach to controlling length bias in DPO is through sampling-based methods. SamPO [192] introduces a down-sampling method to compute regularized KL divergences. By balancing token-level probability distributions between preferred and rejected responses, SamPO reduces length bias in DPO training. Yuan et al. [193] propose Length-Instruction Fine-Tuning (LIFT), a method to improve instruction-following models' ability to adhere to length constraints by augmenting existing training data with explicit length instructions and using DPO for training. This enables the model to generalize across prompts requiring different response lengths. For long-context tasks, LongPO [196] enables short-context LLMs to self-evolve for long-context tasks by learning from self-generated short-to-long preference data, which includes paired responses for long-context inputs and their compressed short-context counterparts. LongPO incorporates a short-to-long KL constraint to prevent degradation of short-context performance during long-context alignment, achieving strong performance on both short- and long-context tasks.", + "bbox": [ + 501, + 53, + 924, + 345 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "3.4.2 Optimization Property", + "text_level": 1, + "bbox": [ + 504, + 356, + 714, + 372 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The optimization property of DPO involves likelihood collapse and alignment tax. While DPO aims to increase the likelihood of preferred responses and decrease dispreferred ones, the actual optimization process does not explicitly enforce this balance. Moreover, alignment improvements often come at the cost of the original capabilities of LLMs, known as alignment tax.", + "bbox": [ + 501, + 373, + 924, + 474 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "(a) Likelihood Collapse. Likelihood collapse refers to the unintended reduction in the likelihood of both preferred and dispreferred responses during DPO training [92]. This phenomenon can lead to unintentional unalignment, where the model's outputs deviate from human preferences, potentially producing undesirable or harmful responses. This phenomenon is also referred to as likelihood displacement in prior studies [204]. Additionally, the gradients associated with increasing the likelihood of preferred responses and decreasing that of dispreferred responses can become entangled, hindering effective learning. This entanglement complicates the optimization process, making it challenging to achieve the desired alignment [203]. Theoretical analyses have further elucidated the underlying causes of likelihood collapse. In particular, Feng et al. [202] developed an analytical framework grounded in field theory. Their analysis of the gradient vector field of the DPO loss function revealed that the loss function decreases the probability of generating human-disliked data at a faster rate than it increases the probability of generating human-like data.", + "bbox": [ + 503, + 476, + 924, + 767 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Several strategies have been proposed to address likelihood collapse. Pal et al. [200] introduce DPO-Positive (DPOP), which adds a penalty term to maintain a high log-likelihood for preferred examples. Similarly, LLaMA [235] augments DPO training with a negative log-likelihood term to stabilize training and preserve the log-likelihood of chosen responses [109]. Flex-DPO [201] adaptively adjusts parameters to slow the decline in the likelihood of dispreferred responses and balance gradients for both chosen and rejected outputs. D'Oosterlinck et al. [199] propose Anchored Preference Optimization (APO), which provides fine-grained control over probability updates: APO-zero increases the", + "bbox": [ + 503, + 767, + 924, + 941 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 906, + 32, + 923, + 42 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/522902a82b6177e867e4aff6c3488ea5e1a2da6b4a249cebc164f52549476e7d.jpg", + "table_caption": [ + "TABLE 1: An overview of datasets (upper row) and benchmarks (lower row) for DPO." + ], + "table_footnote": [], + "table_body": "
DatasetTask DescriptionData Size (Training & Test)Data SourceData StructureEvaluation Metric
UltraFeedback [237]Instruction-Following, Helpful64K&-AIList-
SafeRLHF [238]Harmless, Helpful73.9K&8.21KHuman&AIPair-
HelpSteer [239]Helpful35.3K&1.8KHumanPoint-
PRM800K [240]Mathematical Reasoning800K&-HumanPoint-
SHP-2 [241]Q&A From Reddit3600K&241KHumanPair-
Nectar [242]Conversations183K&-AIList-
OpenOrca [243]Conversations2940K&-AISample-
Capybara [244]Multi-Turn Conversations16K&-Human&AISample-
Step-DPO [100]Mathematical Reasoning10.8K&-Human&AIPair-
BeaverTails [245]Harmless, Helpful330K&36KHuman&AIPoint-
IMDb [246]Movie Reviews25K&25KHumanSampleAccuracy
Reddit TL;DR [247]Summarization1330K&-HumanSampleWin Rate
Anthropic-HH [248]Harmless, Helpful161K&8.55KAIPairWin Rate
GSM8K [249]Mathematical Reasoning7.47K&1.32KHumanSampleAccuracy
AlpacaEval2 [250]Automatic Evaluation-&0.8KAISampleWin Rate
MT-Bench [251]Multi-Turn Question-&3.3KHumanPairWin Rate
AdvBench [252]Harmful Behaviors-&0.5KHumanSampleAttack Success
Arena-Hard [253]Updating Evaluation-&0.5KAISampleWin Rate
TruthfulQA [254]Truthful-&0.8KHumanPairAccuracy
IFEval [255]Instruction-Following-&0.5KHumanSampleAccuracy
BBH [256]Multistep Reasoning-&23 TasksHumanSampleAccuracy
MATH [257]Mathematical Reasoning7.5K&5KHumanSampleAccuracy
GPQA [258]Biology, Physics, and Chemistry-&0.45KHumanSampleAccuracy
MUSR [259]Multistep Reasoning-&0.76KAISampleAccuracy
MMLU-Pro [260]Language Understanding-&12KHuman&AISampleAccuracy
", + "bbox": [ + 76, + 61, + 919, + 377 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "probability of winning outputs and decreases that of losing outputs, whereas APO-down decreases both, but with a stronger decline for losing outputs.", + "bbox": [ + 71, + 388, + 491, + 434 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Another notable challenge related to likelihood collapse is likelihood over-optimization, where the performance of a model on a proxy metric (such as its own likelihood estimates) improves, while its true performance does not. Zhang and Ranganath [236] show that reductions in the likelihood loss of DPO do not necessarily translate into higher win rates. Similarly, Shi et al. [205] further investigates the problem of likelihood over-optimization in DPO, demonstrating that higher completion likelihoods do not necessarily correlate with better model performance and may even degrade it. This study identifies key indicators of over-optimization and highlights the need to balance likelihood optimization with output diversity. e-DPO [187] also shows that DPO can lead to degenerate policies due to overfitting, and proposes a solution using reward model distillation to regularize the implicit reward of the language model. The method trains the language model to match the probability distribution induced by a reward model and introduces a pessimistic extension to handle uncertainty in the reward model, thereby improving the robustness of DPO.", + "bbox": [ + 71, + 439, + 491, + 733 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "(b) Alignment Tax. Alignment tax refers to the unintended consequence where improving a model's preference alignment degrades its general capabilities acquired during pretraining [206]. Thakkar et al. [207] demonstrate the sensitivity of DPO to training data composition, showing significantly worse performance degradation than SFT when using mixed-preference datasets. Furthermore, Chen et al. [209] identify that DPO struggles with optimizing ranking tasks. While DPO improves ranking accuracy, it disproportionately harms generative capabilities. Pentyala et al. [118] also observes capability forgetting during sequential training, where DPO objectives conflict with previously learned SFT patterns. To address this, researchers propose model merging strategies that balance alignment and performance.", + "bbox": [ + 71, + 737, + 495, + 944 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "PAFT [118] separately trains SFT and DPO objectives on a pretrained model using distinct datasets, then merges the parameters through weighted averaging. Additionally, Lu et al. [208] proposes online merging optimizers, which integrate model merging into each optimization step of DPO to balance human preferences and basic capabilities. By merging gradients with parameter differences between SFT and pretrained models, these optimizers effectively enhance alignment while mitigating alignment tax.", + "bbox": [ + 503, + 388, + 924, + 523 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "4 BENCHMARKS AND ANALYSIS", + "text_level": 1, + "bbox": [ + 504, + 544, + 779, + 559 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this section, we provide a comprehensive overview of existing benchmarks and evaluation for DPO methods. We first introduce the key datasets and benchmarks used to train or evaluate DPO models. We then present a comparative analysis of the performance of different DPO methods on these benchmarks, highlighting their strengths and limitations.", + "bbox": [ + 503, + 566, + 924, + 655 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "4.1 Datasets and Benchmarks", + "text_level": 1, + "bbox": [ + 504, + 674, + 741, + 689 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A diverse range of datasets and benchmarks has been specifically curated to facilitate research in DPO. Table 1 summarizes these datasets and benchmarks, highlighting their task descriptions, dataset sizes, data sources, data structures, and evaluation metrics. These datasets and benchmarks span a broad range of tasks, such as harmlessness and helpfulness evaluation and mathematical reasoning. They also exhibit significant diversity in scale, ranging from smaller, specialized datasets to large-scale collections such as SHP-2, which contains over 3.6 million samples. Additionally, datasets differ in their sources: some rely purely on human annotations, others on AI-generated content, and many adopt a hybrid approach combining human and AI-generated data. The data structures employed across these datasets include single-sample without preference label, point-wise annotations, pair-wise comparisons, and list-wise comparisons. Common evaluation metrics include accuracy", + "bbox": [ + 501, + 694, + 924, + 944 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/6f8f061ab915d05683da8b9488cffb80716ed95531ead04552cb45454c8c4d61.jpg", + "table_caption": [ + "TABLE 2: Experimental results of different DPO variants on Open LLM Leaderboard. The underline indicates the best performance." + ], + "table_footnote": [], + "table_body": "
ModelMistral-7B-BaseLLaMA-3-8B-Base
IFEvalBBHMATHGPQAMUSRMMLU-ProAVERAGEIFEvalBBHMATHGPQAMUSRMMLU-ProAVERAGE
SFT3.441.19.228.842.027.725.429.046.315.328.641.331.031.9
RRHF [190]10.040.61.726.446.326.125.231.046.813.931.436.830.531.7
SLiC-HF [230]11.044.09.929.242.628.127.541.749.517.530.439.731.735.1
DPO [74]11.143.77.128.543.826.726.834.348.217.231.940.131.533.9
IPO [75]9.442.89.729.739.727.826.535.349.015.932.841.431.934.4
CPO [116]8.042.79.628.942.127.326.432.446.916.830.639.131.832.9
KTO [67]12.943.712.028.946.128.328.640.248.318.031.040.131.134.8
ORPO [117]28.446.413.530.241.429.531.640.049.116.830.738.432.034.5
R-DPO [191]10.043.07.628.739.327.226.036.448.817.231.640.631.534.4
SimPO [166]11.143.18.428.939.527.226.440.848.615.831.040.531.834.7
ModelMistral-7B-InstructLLaMA-3-8B-Instruct
IFEvalBBHMATHGPQAMUSRMMLU-ProAVERAGEIFEvalBBHMATHGPQAMUSRMMLU-ProAVERAGE
SFT48.446.210.929.147.627.134.950.749.326.931.037.935.738.6
RRHF [190]45.245.310.128.544.226.233.351.349.327.229.639.535.338.7
SLiC-HF [230]39.446.211.428.749.026.833.641.650.926.331.339.235.337.4
DPO [74]49.045.611.026.946.126.834.248.950.125.829.438.736.038.2
IPO [75]42.645.311.827.849.327.234.050.449.526.329.637.935.738.2
CPO [116]38.846.010.128.548.426.933.150.649.126.831.338.135.838.6
KTO [67]46.245.710.927.846.027.334.043.150.126.331.238.135.037.3
ORPO [117]37.645.111.228.246.926.532.643.050.626.929.339.135.137.3
R-DPO [191]46.845.99.928.746.227.634.250.950.325.329.839.035.738.5
SimPO [166]45.445.910.428.345.027.133.748.849.225.029.339.235.137.8
", + "bbox": [ + 76, + 61, + 919, + 324 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "(for tasks like mathematical reasoning found in GSM8K and MATH), win rates derived from pairwise comparisons (such as MT-Bench and Anthropic-HH), and attack success rates used for assessing adversarial robustness (AdvBench).", + "bbox": [ + 71, + 337, + 491, + 398 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "4.2 Results", + "text_level": 1, + "bbox": [ + 73, + 412, + 171, + 426 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "To demonstrate the effectiveness of different DPO variants, we conduct experiments on the Open LLM Leaderboard. We compare different DPO variants using Mistral-7B-Base, Mistral-7B-Instruct [261], LLaMA-3-8B-Base, and LLaMA-3-8B-Instruct [235] as starting points. The overall experimental setup follows Meng et al. [166], ensuring a reproducible evaluation of different DPO variants. For Mistral-7B-Base and LLaMA-3-8B-Base, the SFT models are trained based on the UltraChat-200k dataset [262], and subsequently applied different DPO variants on the SFT models using the UltraFeedback dataset [237]. For Mistral-7B-Instruct and LLaMA-3-8B-Instruct, which have already undergone instruction-tuning, the preference dataset is regenerated by collecting responses from the SFT models using prompts from the UltraFeedback dataset [237].", + "bbox": [ + 71, + 431, + 491, + 648 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The experimental results, as summarized in Table 2, highlight the performance of different DPO variants across various benchmarks. For the Mistral-7B-Base and LLaMA-3-8B-Base models, ORPO consistently achieves the highest average scores, indicating its effectiveness in aligning models with human preferences. Notably, ORPO outperforms other methods on IFEval, BBH, and MATH, demonstrating its superiority in instruction-following and mathematical reasoning tasks. Meanwhile, SLiC-HF and KTO also achieve competitive results, particularly in BBH and GPQA, suggesting that these methods effectively leverage preference data for enhanced performance. For the Mistral-7B-Instruct and LLaMA-3-8B-Instruct models, the improvements across different DPO variants are more nuanced. While DPO and R-DPO show strong performance in IFEval and MMLU-Pro, IPO and CPO demonstrate robustness in handling complex reasoning tasks like MATH and GPQA. Overall, the results indicate that different DPO variants exhibit varying strengths across benchmarks, with some methods excelling in base models while others are more effective for instruct models.", + "bbox": [ + 71, + 650, + 493, + 941 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "5 APPLICATIONS", + "text_level": 1, + "bbox": [ + 504, + 335, + 658, + 351 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In this section, we discuss the applications of DPO in various domains, including different LLM-based applications, diffusion models, and multi-modal LLMs. We provide an overview of the key challenges and opportunities in each domain and highlight the potential impact of DPO on real-world applications.", + "bbox": [ + 503, + 354, + 926, + 445 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "5.1 LLM-based Applications", + "text_level": 1, + "bbox": [ + 504, + 462, + 728, + 478 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "DPO has emerged as a powerful paradigm for aligning LLMs with human preferences across diverse applications [116, 235, 263, 264]. In code generation, DPO enhances control over code quality by optimizing based on preferences from automated tests [265, 266, 267]. In mathematical reasoning, DPO reduces errors in complex problem-solving by emphasizing step-level preference optimization [100, 101, 129, 268]. Multilingual applications leverage DPO to synchronize cross-lingual preferences, thereby improving translation accuracy and cultural relevance [107, 269]. Recommendation systems utilize DPO to refine personalization by incorporating user preference data to optimize item rankings, thereby enhancing the model ability to distinguish preferred items from less preferred ones [270, 271]. These examples highlight the adaptability of DPO in achieving human-aligned outputs across diverse tasks.", + "bbox": [ + 501, + 479, + 924, + 715 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "5.2 Diffusion Models", + "text_level": 1, + "bbox": [ + 504, + 733, + 671, + 747 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In the realm of diffusion models, DPO has been adapted to better align generated content with user expectations [272, 273, 274, 275]. By optimizing preferences over image-text pairs, DPO enhances the semantic accuracy of generated images and mitigates the production of undesirable or biased content. Studies have demonstrated that diffusion models fine-tuned with DPO respond more accurately to complex prompts compared to those trained with traditional techniques. Moreover, the efficiency of DPO allows for the fine-tuning of large-scale models using limited preference data, addressing significant computational challenges in training diffusion models [276, 277, 278]. While scaling DPO for high-resolution and dynamic content generation remains", + "bbox": [ + 501, + 751, + 924, + 944 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "challenging, its ability to simplify reward modeling makes it a promising method for controlled content creation [279].", + "bbox": [ + 71, + 53, + 491, + 84 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "5.3 Multi-Modal LLMs", + "text_level": 1, + "bbox": [ + 73, + 102, + 246, + 116 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "For multi-modal LLMs, DPO plays a crucial role in aligning preferences across different data types, thereby improving coherence in tasks such as visual question answering and image captioning [89, 280, 281, 282, 283]. By optimizing alignment between textual responses and visual inputs, DPO reduces hallucinations in multi-modal interactions, ensuring outputs remain faithful to the given context. Although reconciling different types of feedback can be challenging, DPO offers a practical framework for lightweight adaptation, making it well-suited to preference-intensive multi-modal applications [280, 284, 285].", + "bbox": [ + 71, + 121, + 491, + 282 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "6 CHALLENGES AND FUTURE DIRECTIONS", + "text_level": 1, + "bbox": [ + 73, + 304, + 433, + 319 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In this section, we discuss the key challenges and future directions in DPO research. We identify several critical issues that need to be addressed to further advance the field. Moreover, we propose several promising research directions that can help overcome these challenges and accelerate the adoption of DPO in the future.", + "bbox": [ + 71, + 324, + 491, + 412 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "6.1 Efficient Preference Optimization", + "text_level": 1, + "bbox": [ + 73, + 431, + 361, + 446 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Efficient preference optimization remains a pivotal challenge, as current DPO methods hinge on the availability of high-quality preference data, yet the manual collection of human annotations is both time-consuming and labor-intensive while automatically model-generated datasets often suffer from issues such as limited diversity, inherent biases, and insufficient fidelity to human judgment [121, 122, 128, 129]. Moreover, even though DPO circumvents the intricacies of reward model engineering common in RL, it does not fully leverage the exploratory strengths that RL methods offer, as evidenced by recent advances in reasoning approaches where RL-based training has achieved notable successes [18, 19]. This opens up an avenue for future research to not only enhance data efficiency through advanced learning techniques but also to integrate novel exploration mechanisms [138, 141], potentially through hybrid models that amalgamate the direct preference optimization benefits of DPO with the robust exploratory capabilities characteristic of RL.", + "bbox": [ + 71, + 450, + 493, + 715 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "6.2 Multi-Modal Preference Optimization", + "text_level": 1, + "bbox": [ + 73, + 733, + 385, + 748 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Multi-Modal Preference Optimization presents another frontier, given that existing DPO frameworks have primarily targeted text-based modalities while many real-world applications demand the alignment of diverse human preferences across text, images, audio, and even video [280, 284, 285, 286, 287]. In scenarios where cross-modal cues might conflict, such as the need for concise text paired with richly detailed imagery, the challenge lies in constructing a unified preference representation space that can intelligently and automatically recalibrate the priority of different modalities based on the contextual demands of the task at hand [89, 282, 283]. Future directions in this area could involve the development of innovative multi-modal preference encoding architectures,", + "bbox": [ + 71, + 752, + 495, + 944 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "which are capable of disentangling compound preferences into modality-specific and cross-modal components that align conflicting preferences while also adapting dynamically to changing inputs.", + "bbox": [ + 503, + 53, + 924, + 113 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "6.3 Continuous Preference Optimization", + "text_level": 1, + "bbox": [ + 504, + 136, + 816, + 151 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Continuous preference optimization addresses the dynamic nature of human preferences that evolve over time or vary with different phases of a task, a factor that static DPO models often fail to capture [123, 135, 137, 185]. As social norms and individual preferences shift, there is an increasing need for systems that can continuously recalibrate their alignment strategies in real time while simultaneously mitigating the risk of catastrophic forgetting. Future research in this domain may focus on meta-learning approaches that enable models to learn not only from the current state of preferences but also how to efficiently adapt when these preferences change. By integrating online learning frameworks with mechanisms for detecting temporal shifts and contextual variability in user behavior, researchers can pave the way toward systems that remain consistently relevant and effective in the face of evolving societal and individual expectations.", + "bbox": [ + 503, + 157, + 924, + 392 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "6.4 Interpretable Preference Optimization", + "text_level": 1, + "bbox": [ + 504, + 416, + 823, + 431 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Interpretable preference optimization is critical for building trust in models that implicitly align human values, as the opaque nature of current DPO complicates the ability to audit and control the alignment process. In practice, human preferences are multi-dimensional [150, 151, 154], encompassing aspects such as factual accuracy, fairness, creativity, and beyond, and there is a pressing need to decompose these complex preferences into interpretable components that can be individually examined and fine-tuned. Future research could leverage advances in explainable techniques to develop models that not only achieve fine-grained alignment across diverse values but also provide transparent insights into how different preference dimensions interact to shape final decisions. This level of interpretability would allow stakeholders to balance competing values more effectively, ensuring that the alignment process remains both accountable and adaptable as societal norms continue to evolve.", + "bbox": [ + 503, + 436, + 926, + 686 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "7 CONCLUSION", + "text_level": 1, + "bbox": [ + 504, + 713, + 648, + 728 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In recent years, DPO has emerged as a promising paradigm for aligning LLMs with human preferences by directly optimizing model policies using preference data. Despite its potential, the DPO research landscape remains fragmented, with a lack of systematic organization and comparative analysis. In this survey, we present a comprehensive overview of DPO and introduce a novel taxonomy that categorizes existing works into four key dimensions: data strategy, learning framework, constraint mechanism, and model property. We have also discussed the key benchmarks, evaluation results, and applications of DPO, highlighting the challenges and future directions in this field. By providing a systematic analysis of the existing DPO methods, we aim to facilitate further research and development in this area.", + "bbox": [ + 503, + 737, + 924, + 944 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 906, + 32, + 923, + 42 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 75, + 51, + 187, + 66 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Wayne Xin Zhao et al. A survey of large language models. arXiv, 2023.", + "[2] Humza Naveed et al. A comprehensive overview of large language models. arXiv, 2023.", + "[3] Yupeng Chang et al. A survey on evaluation of large language models. TIIS, 2024.", + "[4] Shervin Minaee et al. Large language models: A survey. arXiv, 2024.", + "[5] Shukang Yin et al. A survey on multimodal large language models. arXiv, 2023.", + "[6] Duzhen Zhang et al. Mm-llms: Recent advances in multimodal large language models. ACL, 2024.", + "[7] Jingyi Zhang et al. Vision-language models for vision tasks: A survey. TPAMI, 2024.", + "[8] Zhehui Wang et al. Enabling energy-efficient deployment of large language models on memristor crossbar: A synergy of large and small. TPAMI, 2024.", + "[9] Hongru Wang et al. A survey of the evolution of language model-based dialogue systems. arXiv, 2023.", + "[10] Zihao Yi et al. A survey on recent advances in llm-based multi-turn dialogue systems. arXiv, 2024.", + "[11] Jiawei Liu et al. Is your code generated by chatgpt really correct? rigorous evaluation of large language models for code generation. NeurIPS, 2023.", + "[12] Daya Guo et al. Deepseek-coder: When the large language model meets programming-the rise of code intelligence. arXiv, 2024.", + "[13] Xue Jiang et al. Self-planning code generation with large language models. TOSEM, 2024.", + "[14] Dave Van Veen et al. Adapted large language models can outperform medical experts in clinical text summarization. Nature Medicine, 2024.", + "[15] Jesutofunmi A Omiye et al. Large language models in medicine: the potentials and pitfalls: a narrative review. Annals of Internal Medicine, 2024.", + "[16] Karan Singhal et al. Toward expert-level medical question answering with large language models. Nature Medicine, 2025.", + "[17] Fenglin Liu et al. Aligning, autoencoding and prompting large language models for novel disease reporting. TPAMI, 2025.", + "[18] Aaron Jaech et al. Openai o1 system card. arXiv, 2024.", + "[19] Daya Guo et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv, 2025.", + "[20] Julia Hirschberg and Christopher D Manning. Advances in natural language processing. Science, 2015.", + "[21] Xiaowei Huang et al. A survey of safety and trustworthiness of large language models through the lens of verification and validation. Artificial Intelligence Review, 2024.", + "[22] Yue Zhang et al. Siren's song in the ai ocean: a survey on hallucination in large language models. arXiv, 2023.", + "[23] Isabel O Gallegos et al. Bias and fairness in large language models: A survey. Computational Linguistics, 2024.", + "[24] Yufei Wang et al. Aligning large language models with human: A survey. arXiv, 2023.", + "[25] Yang Liu et al. Trustworthy llms: A survey and guideline for evaluating large language models' alignment. arXiv, 2023.", + "[26] Tianhao Shen et al. Large language model alignment: A survey. arXiv, 2023.", + "[27] Hannah Rose Kirk et al. The benefits, risks and bounds of personalizing the alignment of large language models to individuals. Nature Machine Intelligence, 2024.", + "[28] Usman Anwar et al. Foundational challenges in assuring alignment and safety of large language models. arXiv, 2024.", + "[29] Bofei Gao et al. Towards a unified view of preference learning for large language models: A survey. arXiv, 2024.", + "[30] Ruili Jiang et al. A survey on human preference learning for large language models. arXiv, 2024.", + "[31] Zhichao Wang et al. A comprehensive survey of llm alignment techniques: Rlhf, rlaif, ppo, dpo and more. arXiv, 2024.", + "[32] Genta Indra Winata et al. Preference tuning with human feedback on language, speech, and vision tasks: A survey. arXiv, 2024.", + "[33] Yue Huang et al. Position: TrustLLM: Trustworthiness in large language models. ICML, 2024.", + "[34] Paul F Christiano et al. Deep reinforcement learning from human preferences. NeurIPS, 2017.", + "[35] Long Ouyang et al. Training language models to follow instructions with human feedback. NeurIPS, 2022.", + "[36] Nisan Stiennon et al. Learning to summarize with human" + ], + "bbox": [ + 76, + 75, + 491, + 941 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "feedback. NeurIPS, 2020.", + "[37] Josh Achiam et al. Gpt-4 technical report. arXiv, 2023.", + "[38] Yuntao Bai et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv, 2022.", + "[39] Anthropic. The claude 3 model family: Opus, sonnet, haiku, 2024.", + "[40] Yuchun Miao et al. Inform: Mitigating reward hacking in rlhf via information-theoretic reward modeling. NeurIPS, 2024.", + "[41] Stephen Casper et al. Open problems and fundamental limitations of reinforcement learning from human feedback. arXiv, 2023.", + "[42] Keertana Chidambaram et al. Direct preference optimization with unobserved preference heterogeneity. arXiv, 2024.", + "[43] Haoxian Chen et al. Mallowspo: Fine-tune your llm with preference dispersions. arXiv, 2024.", + "[44] Shyam Sundhar Ramesh et al. Group robust preference optimization in reward-free rlhf. arXiv, 2024.", + "[45] Binwei Yao et al. No preference left behind: Group distributional preference optimization. *ICLR*, 2025.", + "[46] Afra Amini et al. Direct preference optimization with an offset. ACL Findings, 2024.", + "[47] Qi Gou and Cam-Tu Nguyen. Mixed preference optimization: Reinforcement learning with data selection and better reference model. arXiv, 2024.", + "[48] Shiqi Wang et al. Reward difference optimization for sample reweighting in offline RLHF. EMNLP Findings, 2024.", + "[49] Junkang Wu et al. $\\alpha$ -dpo: Adaptive reward margin is what direct preference optimization needs. arXiv, 2024.", + "[50] Hiroki Furuta et al. Geometric-averaged preference optimization for soft preference labels. NeurIPS, 2024.", + "[51] Junkang Wu et al. Beta-dpo: Direct preference optimization with dynamic beta. NeurIPS, 2024.", + "[52] Tetsuro Morimura et al. Filtered direct preference optimization, EMNLP, 2024.", + "[53] Pulkit Pattnaik et al. Enhancing alignment using curriculum learning & ranked preferences. EMNLP, 2024.", + "[54] Ilgee Hong et al. Adaptive preference scaling for reinforcement learning with human feedback. NeurIPS, 2024.", + "[55] Dahiyun Kim et al. Sdpo: Don't use your data all at once. arXiv, 2024.", + "[56] Runsheng Yu et al. Direct alignment of language models via quality-aware self-refinement. arXiv, 2024.", + "[57] Lou Jieming et al. Gap-aware preference optimization: Enhancing model alignment with perception margin. OpenReview, 2024.", + "[58] Jingyuan Ma et al. Plug-and-play training framework for preference optimization. arXiv, 2024.", + "[59] Sayak Ray Chowdhury et al. Provably robust DPO: Aligning language models with noisy feedback. ICML, 2024.", + "[60] Keyi Kong et al. Perplexity-aware correction for robust alignment with noisy preferences. NeurIPS, 2024.", + "[61] Xize Liang et al. Ropo: Robust preference optimization for large language models. arXiv, 2024.", + "[62] Dongyoung Kim et al. Spread preference annotation: Direct preference judgment for efficient LLM alignment. ICLR, 2025.", + "[63] Lingfan Zhang et al. Combating inherent noise for direct preference optimization. OpenReview, 2025.", + "[64] Shawn Im and Yixuan Li. Understanding generalization of preference optimization under noisy feedback. OpenReview, 2025.", + "[65] Yang Gao et al. Impact of preference noise on the alignment performance of generative language models. COLM, 2024.", + "[66] Junkang Wu et al. Towards robust alignment of language models: Distributionally robustifying direct preference optimization. ICLR, 2024.", + "[67] Kawin Ethayarajh et al. Model alignment as prospect theoretic optimization. ICML, 2024.", + "[68] Seungjae Jung et al. Binary classifier optimization for large language model alignment. arXiv, 2024.", + "[69] Teng Xiao et al. Cal-dpo: Calibrated direct preference optimization for language model alignment. NeurIPS, 2024.", + "[70] Igor Melnyk et al. Distributional preference alignment of llms via optimal transport. NeurIPS, 2024.", + "[71] Tianchi Cai et al. Ulma: Unified language model alignment with human demonstration and point-wise preference. arXiv, 2023.", + "[72] Huayu Chen et al. Noise contrastive alignment of language models with explicit rewards. NeurIPS, 2024.", + "[73] Yifan Zhang et al. General preference modeling with preference representations for aligning language models. arXiv, 2024.", + "[74] Rafael Rafailov et al. Direct preference optimization: Your language model is secretly a reward model. NeurIPS, 2023." + ], + "bbox": [ + 508, + 55, + 921, + 930 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[75] Mohammad Gheshlaghi Azar et al. A general theoretical paradigm to understand learning from human preferences. AISTATS, 2024.", + "[76] Jinghong Chen et al. On extending direct preference optimization to accommodate ties. arXiv, 2024.", + "[77] Yuxin Jiang et al. Bridging and modeling correlations in pairwise data for direct preference optimization. arXiv, 2024.", + "[78] Xinghua Zhang et al. Iopo: Empowering llms with complex instruction following via input-output preference optimization. arXiv, 2024.", + "[79] Abbas Abdelmaleki et al. Preference optimization as probabilistic inference. ICLR, 2024.", + "[80] Yueqin Yin et al. Self-augmented preference optimization: Off-policy paradigms for language model alignment. arXiv, 2024.", + "[81] Shitong Duan et al. Negating negatives: Alignment with human negative samples via distributional dispreference optimization. arXiv, 2024.", + "[82] Ruiqi Zhang et al. Negative preference optimization: From catastrophic collapse to effective unlearning. COLM, 2024.", + "[83] Chongyu Fan et al. Simplicity prevails: Rethinking negative preference optimization for lmm unlearning. arXiv, 2024.", + "[84] Yifan Zhong et al. Panacea: Pareto alignment via preference adaptation for llms. NeurIPS, 2024.", + "[85] Tianqi Liu et al. Lipo: Listwise preference optimization through learning-to-rank, 2024. arXiv, 2024.", + "[86] Mingye Zhu et al. LIRE: listwise reward enhancement for preference alignment. ACL, 2024.", + "[87] Yang Zhao et al. Ordinal preference optimization: Aligning human preferences via ndcg. arXiv, 2024.", + "[88] Jiacong Zhou et al. Optimizing preference alignment with differentiable ndcg ranking. arXiv, 2024.", + "[89] Fei Wang et al. mDPO: Conditional preference optimization for multimodal large language models. EMNLP, 2024.", + "[90] Yueqin Yin et al. Relative preference optimization: Enhancing llm alignment through contrasting responses across identical and diverse prompts. arXiv, 2024.", + "[91] Yuxiang Guo et al. Todo: Enhancing llm alignment with ternary preferences. ICLR, 2024.", + "[92] Rafael Rafailov et al. From r to $q^*$ : Your language model is secretly a q-function. COLM, 2024.", + "[93] Yongcheng Zeng et al. Token-level direct preference optimization. ICML, 2024.", + "[94] Aiwei Liu et al. Tis-dpo: Token-level importance sampling for direct preference optimization with estimated weights. ICLR, 2024.", + "[95] Fenia Christopoulou et al. Sparsepo: Controlling preference alignment of llms via sparse token masks. arXiv, 2024.", + "[96] Han Zhong et al. Dpo meets ppo: Reinforced token optimization for rlhf. arXiv, 2024.", + "[97] Kailai Yang et al. Selective preference optimization via token-level reward function estimation. arXiv, 2024.", + "[98] Qi Zhao et al. EPO: hierarchical LLM agents with environment preference optimization. EMNLP, 2024.", + "[99] Ruichen Shao et al. Earlier tokens contribute more: Learning direct preference optimization from temporal decay perspective. *ICLR*, 2025.", + "[100] Xin Lai et al. Step-dpo: Step-wise preference optimization for long-chain reasoning of llms. arXiv, 2024.", + "[101] Zimu Lu et al. Step-controlled dpo: Leveraging stepwise error for enhanced mathematical reasoning. arXiv, 2024.", + "[102] Xuan Zhang et al. Chain of preference optimization: Improving chain-of-thought reasoning in llms. NeurIPS, 2024.", + "[103] Yuxi Xie et al. Monte carlo tree search boosts reasoning via iterative preference learning. arXiv, 2024.", + "[104] Weibin Liao et al. Tpo: Aligning large language models with multi-branch & multi-step preference trees. arXiv, 2024.", + "[105] Hoang Anh Just et al. Data-centric human preference optimization with rationales. arXiv, 2024.", + "[106] Jiacai Liu et al. Improving multi-step reasoning abilities of large language models with direct advantage policy optimization. arXiv, 2024.", + "[107] Shuaijie She et al. MAPO: advancing multilingual reasoning through multilingual-alignment-as-preference optimization. ACL, 2024.", + "[108] Lifan Yuan et al. Advancing llm reasoning generalists with preference trees. arXiv, 2024.", + "[109] Richard Yuanzhe Pang et al. Iterative reasoning preference optimization. NeurIPS, 2024.", + "[110] Chao-Wei Huang and Yun-Nung Chen. Factalign: Long-form" + ], + "bbox": [ + 76, + 54, + 491, + 931 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "factuality alignment of large language models. arXiv, 2024.", + "[111] Wei Xiong et al. Building math agents with multi-turn iterative preference learning. *ICLR*, 2025.", + "[112] Yifan Song et al. Trial and error: Exploration-based trajectory optimization for lIm agents. ACL, 2024.", + "[113] Aobo Kong et al. Sdpo: Segment-level direct preference optimization for social agents. arXiv, 2025.", + "[114] Pranav Putta et al. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv, 2024.", + "[115] Wentao Shi et al. Direct multi-turn preference optimization for language agents. EMNLP, 2024.", + "[116] Haoran Xu et al. Contrastive preference optimization: Pushing the boundaries of LLM performance in machine translation. ICML, 2024.", + "[117] Jiwoo Hong et al. ORPO: Monolithic preference optimization without reference model. EMNLP, 2024.", + "[118] Shiva Kumar Pentyala et al. Paft: A parallel training paradigm for effective llm fine-tuning. arXiv, 2024.", + "[119] Songyang Gao et al. Linear alignment: A closed-form solution for aligning human preferences without tuning and feedback. ICML, 2024.", + "[120] Feifan Song et al. Icdpo: Effectively borrowing alignment capability of others via in-context direct preference optimization. arXiv, 2024.", + "[121] Shangmin Guo et al. Direct language model alignment from online ai feedback. arXiv, 2024.", + "[122] Biqing Qi et al. Online dpo: Online direct preference optimization with fast-slow chasing. arXiv, 2024.", + "[123] Weizhe Yuan et al. Self-rewarding language models. ICML, 2024.", + "[124] Wenda Xu et al. BPO: Staying close to the behavior LLM creates better online LLM alignment. EMNLP, 2024.", + "[125] Saeed Khaki et al. RS-DPO: A hybrid rejection sampling and direct preference optimization method for alignment of large language models. NAACL, 2024.", + "[126] Tianqi Liu et al. Statistical rejection sampling improves preference optimization. ICLR, 2024.", + "[127] Ruizhe Shi et al. The crucial role of samplers in online direct preference optimization. *ICLR*, 2025.", + "[128] Lichang Chen et al. Optune: Efficient online preference tuning. arXiv, 2024.", + "[129] Tianduo Wang et al. Self-training with direct preference optimization improves chain-of-thought reasoning. ACL, 2024.", + "[130] Jiafan He et al. Accelerated preference optimization for large language model alignment. arXiv, 2024.", + "[131] Wei Xiong et al. Iterative preference learning from human feedback: Bridging theory and practice for RLHF under KL-constraint. ICML, 2024.", + "[132] Yixin Liu et al. Comal: A convergent meta-algorithm for aligning llms with general preferences. arXiv, 2024.", + "[133] Jing Xu et al. Some things are more cringe than others: Iterative preference optimization with the pairwise cringe loss. arXiv, 2024.", + "[134] Jongwoo Ko et al. Sera: Self-reviewing and alignment of large language models using implicit reward margins. *ICLR*, 2025.", + "[135] Zhaoyang Wang et al. Cream: Consistency regularized self-rewarding language models. *ICLR*, 2025.", + "[136] Prasann Singhal et al. D2PO: Discriminator-guided DPO with response evaluation models. COLM, 2024.", + "[137] Aiwei Liu et al. Direct large language model alignment through self-rewarding contrastive prompt distillation. ACL, 2024.", + "[138] Tengyang Xie et al. Exploratory preference optimization: Provably sample-efficient exploration in rlhf with general function approximation. *ICLR*, 2025.", + "[139] Shenao Zhang et al. Self-exploring language models: Active preference elicitation for online alignment. arXiv, 2024.", + "[140] Shicong Cen et al. Value-incentivized preference optimization: A unified approach to online and offline rlhf. *ICLR*, 2025.", + "[141] Chenjia Bai et al. Online preference alignment for language models via count-based exploration. *ICLR*, 2025.", + "[142] Yuda Song et al. The importance of online data: Understanding preference fine-tuning via coverage. NeurIPS, 2024.", + "[143] Yaojie Shen et al. Aipo: Improving training objective for iterative preference optimization. arXiv, 2024.", + "[144] Yunhao Tang et al. Understanding the performance gap between online and offline alignment algorithms. arXiv, 2024.", + "[145] Shusheng Xu et al. Is DPO superior to PPO for LLM alignment? A comprehensive study. ICML, 2024.", + "[146] William Muldrew et al. Active preference learning for large" + ], + "bbox": [ + 506, + 54, + 921, + 931 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "language models. ICML, 2024.", + "[147] Seola Choi et al. Active preference optimization via maximizing learning capacity. OpenReview, 2024.", + "[148] Kaixuan Ji et al. Reinforcement learning from human feedback with active queries. arXiv, 2024.", + "[149] Nirjhar Das et al. Active preference optimization for sample efficient rlhf. arXiv, 2024.", + "[150] Zhanhui Zhou et al. Beyond one-preference-fits-all alignment: Multi-objective direct preference optimization. ACL Findings, 2024.", + "[151] Xingzhou Lou et al. Spo: Multi-dimensional preference sequential alignment with implicit reward modeling. arXiv, 2024.", + "[152] Yu Zhang et al. MOSLIM: Align with diverse preferences in prompts through reward classification. OpenReview, 2025.", + "[153] Anirudhan Badrinath et al. Hybrid preference optimization: Aug-mentation direct preference optimization with auxiliary objectives. arXiv, 2024.", + "[154] Yiju Guo et al. Controllable preference optimization: Toward controllable multi-objective alignment. EMNLP, 2024.", + "[155] Abhijnan Nath et al. Simultaneous reward distillation and preference learning: Get you a language model who can do both. arXiv, 2024.", + "[156] Zixiang Chen et al. Self-play fine-tuning converts weak language models to strong language models. ICML, 2024.", + "[157] Yue Wu et al. Self-play preference optimization for language model alignment. ICLR, 2025.", + "[158]Gokul Swamy et al. A minimaximalist approach to reinforcement learning from human feedback. ICML, 2024.", + "[159] Lin Gui et al. Bonbon alignment for large language models and the sweetness of best-of-n sampling. NeurIPS, 2024.", + "[160] Remi Munos et al. Nash learning from human feedback. ICML, 2024.", + "[161] Corby Rosset et al. Direct nash optimization: Teaching language models to self-improve with general preferences. arXiv, 2024.", + "[162] Daniele Calandriello et al. Human alignment of large language models through online preference optimisation. ICML, 2024.", + "[163] Eugene Choi et al. Self-improving robust preference optimization. *ICLR*, 2025.", + "[164] Haoyan Yang et al. Dynamic noise preference optimization for llm self-improvement via synthetic data. arXiv, 2025.", + "[165] Alexey Gorbatovski et al. Learn your reference model for real good alignment. arXiv, 2024.", + "[166] Yu Meng et al. Simpo: Simple preference optimization with a reference-free reward. NeurIPS, 2024.", + "[167] Teng Xiao et al. SimPER: A minimalist approach to preference alignment without hyperparameters. *ICLR*, 2025.", + "[168] Yixin Liu et al. Understanding reference policies in direct preference optimization. arXiv, 2024.", + "[169] Chaoqi Wang et al. Beyond reverse kl: Generalizing direct preference optimization with diverse divergence constraints. *ICLR*, 2023.", + "[170] Stewart Slocum et al. Diverse preference learning for capabilities and alignment. ICLR, 2025.", + "[171] Amitava Das et al. Dpo kernels: A semantically-aware, kernel-enhanced, and divergence-rich paradigm for direct preference optimization. arXiv, 2025.", + "[172] Mingye Zhu et al. FlipGuard: Defending preference alignment against update regression with constrained optimization. EMNLP, 2024.", + "[173] Qingyu Yin et al. Direct preference optimization using sparse feature-level constraints. arXiv, 2024.", + "[174] Yunhao Tang et al. Generalized preference optimization: A unified approach to offline alignment. ICML, 2024.", + "[175] Haozhe Ji et al. Towards efficient exact optimization of language model alignment. ICML, 2024.", + "[176] Arsalan Sharifnassab et al. Soft preference optimization: Aligning language models to expert distributions. arXiv, 2024.", + "[177] Janghwan Lee et al. Improving conversational abilities of quantized large language models via direct preference alignment. ACL, 2024.", + "[178] Audrey Huang et al. Correcting the mythos of kl-regularization: Direct alignment without overoptimization via chi-squared preference optimization. arXiv, 2025.", + "[179] Geon-Hyeong Kim et al. SafeDPO: A simple approach to direct preference optimization with enhanced safety. OpenReview, 2025.", + "[180] Akifumi Wachi et al. Stepwise alignment for constrained language model policy optimization. NeurIPS, 2024.", + "[181] Zixuan Liu et al. Enhancing llm safety via constrained direct" + ], + "bbox": [ + 75, + 55, + 491, + 931 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "preference optimization. arXiv, 2024.", + "[182] San Kim and Gary Geunbae Lee. Adversarial dpo: Harnessing harmful data for reducing toxicity with minimal impact on coherence and evasiveness in dialogue agents. arXiv, 2024.", + "[183] Andrew Lee et al. A mechanistic understanding of alignment algorithms: a case study on dpo and toxicity. ICML, 2024.", + "[184] Yiming Zhang et al. Backtracking improves generation safety. ICLR, 2025.", + "[185] Seongho Son et al. Right now, wrong then: Non-stationary direct preference optimization under preference drift. arXiv, 2024.", + "[186] Eugene Choi et al. Self-improving robust preference optimization. *ICLR*, 2025.", + "[187] Adam Fisch et al. Robust preference optimization through reward model distillation. arXiv, 2024.", + "[188] Yong Lin et al. On the limited generalization capability of the implicit reward model induced by direct preference optimization. EMNLP Findings, 2024.", + "[189] Fahim Tajwar et al. Preference fine-tuning of llms should leverage suboptimal, on-policy data. ICML, 2024.", + "[190] Hongyi Yuan et al. Rrrh: Rank responses to align language models with human feedback. NeurIPS, 2023.", + "[191] Ryan Park et al. Disentangling length from quality in direct preference optimization. ACL Findings, 2024.", + "[192] Junru Lu et al. Eliminating biased length reliance of direct preference optimization via down-sampled KL divergence. EMNLP, 2024.", + "[193] Weizhe Yuan et al. Following length constraints in instructions. arXiv, 2024.", + "[194] Kian Ahrabian et al. The hitchhiker's guide to human alignment with* po. arXiv, 2024.", + "[195] Wei Liu et al. Length desensitization in directed preference optimization. arXiv, 2024.", + "[196] Guanzheng Chen et al. LongPO: Long context self-evolution of large language models through short-to-long preference optimization. ICLR, 2025.", + "[197] Prasann Singhal et al. A long way to go: Investigating length correlations in RLHF. COLM, 2024.", + "[198] Kyle Richardson et al. Understanding the logic of direct preference alignment through logic. arXiv, 2024.", + "[199] Karel D'Oosterlinck et al. Anchored preference optimization and contrastive revisions: Addressing underspecification in alignment. arXiv, 2024.", + "[200] Arka Pal et al. Smaug: Fixing failure modes of preference optimisation with dpo-positive. arXiv, 2024.", + "[201] Yuzi Yan et al. 3d-properties: Identifying challenges in DPO and charting a path forward. ICLR, 2025.", + "[202] Duanyu Feng et al. Towards analyzing and understanding the limitations of dpo: A theoretical perspective. arXiv, 2024.", + "[203] Hui Yuan et al. A common pitfall of margin-based language model alignment: Gradient entanglement. *ICLR*, 2025.", + "[204] Noam Razin et al. Unintentional unalignment: Likelihood displacement in direct preference optimization. arXiv, 2024.", + "[205] Zhengyan Shi et al. Understanding likelihood over-optimisation in direct alignment algorithms. arXiv, 2024.", + "[206] Yong Lin et al. Mitigating the alignment tax of RLHF. EMNLP, 2024.", + "[207] Megh Thakkar et al. A deep dive into the trade-offs of parameter-efficient preference alignment techniques. ACL, 2024.", + "[208] Keming Lu et al. Online merging optimizers for boosting rewards and mitigating tax in alignment. arXiv, 2024.", + "[209] Angelica Chen et al. Preference learning algorithms do not learn preference rankings. NeurIPS, 2024.", + "[210] Wenyi Xiao et al. A comprehensive survey of direct preference optimization: Datasets, theories, variants, and applications. arXiv, 2024.", + "[211] Pierre Harvey Richemond et al. Offline regularised reinforcement learning for large language models alignment. arXiv, 2024.", + "[212] Christian Wirth et al. A survey of preference-based reinforcement learning methods. JMLR, 2017.", + "[213] Jiaming Ji et al. Ai alignment: A comprehensive survey. arXiv, 2023.", + "[214] Xinpeng Wang et al. On the essence and prospect: An investigation of alignment approaches for big models. *IJCAI*, 2024.", + "[215] Hannah Rose Kirk et al. The past, present and better future of feedback learning in large language models for subjective human preferences and values. EMNLP, 2023.", + "[216] Patrick Fernandes et al. Bridging the gap: A survey on integrating" + ], + "bbox": [ + 506, + 55, + 921, + 931 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "(human) feedback for natural language generation. TACL, 2023.", + "[217] Timo Kaufmann et al. A survey of reinforcement learning from human feedback. arXiv, 2023.", + "[218] Ralph Allan Bradley and Milton E Terry. Rank analysis of incomplete block designs: I. the method of paired comparisons. Biometrika, 1952.", + "[219] John Schulman et al. Proximal policy optimization algorithms. arXiv, 2017.", + "[220] Arash Ahmadian et al. Back to basics: Revisiting reinforce style optimization for learning from human feedback in llms. ACL, 2024.", + "[221] Ziniu Li et al. ReMax: A simple, effective, and efficient reinforcement learning method for aligning large language models. ICML, 2024.", + "[222] Zhihong Shao et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv, 2024.", + "[223] Jian Hu. Reinforce++: A simple and efficient approach for aligning large language models. arXiv, 2025.", + "[224] Chris Lu et al. Discovering preference optimization algorithms with and for large language models. NeurIPS, 2024.", + "[225] Hanyang Zhao et al. RainbowPO: A unified framework for combining improvements in preference optimization. ICLR, 2025.", + "[226] Hamish Ivison et al. Unpacking dpo and ppo: Disentangling best practices for learning from preference feedback. NeurIPS, 2024.", + "[227] Amir Saeidi et al. Insights into alignment: Evaluating dpo and its variants across multiple tasks. arXiv, 2024.", + "[228] Andi Nika et al. Reward model learning vs. direct policy optimization: a comparative analysis of learning from human preferences. ICML, 2024.", + "[229] Ziniu Li et al. When is rl better than dpo in rlhf? a representation and optimization perspective. *ICLR Tiny Papers*, 2024.", + "[230] Yao Zhao et al. Slic-hf: Sequence likelihood calibration with human feedback. arXiv, 2023.", + "[231] Feifan Song et al. Preference ranking optimization for human alignment. AAAI, 2024.", + "[232] Chaoqi Wang et al. Preference optimization with multi-sample comparisons. arXiv, 2024.", + "[233] Ziniu Li et al. Policy optimization in rlhf: The impact of out-of-preference data. arXiv, 2023.", + "[234] Lei Li et al. Improving reasoning ability of large language models via iterative uncertainty-based preference optimization. OpenReview, 2025.", + "[235] Abhimanyu Dubey et al. The llama 3 herd of models. arXiv, 2024.", + "[236] Lily H Zhang and Rajesh Ranganath. Win rate is all that can matter from preference data alone. OpenReview, 2025.", + "[237] Ganqu Cui et al. Ultrafeedback: Boosting language models with high-quality feedback. ICML, 2023.", + "[238] Jiaming Ji et al. Pku-saferlhf: Towards multi-level safety alignment for llms with human preference. arXiv, 2024.", + "[239] Zhilin Wang et al. Helpsteer: Multi-attribute helpfulness dataset for steerlm. arXiv, 2023.", + "[240] Hunter Lightman et al. Let's verify step by step. ICLR, 2023.", + "[241] Kawin Ethayarajh et al. Understanding dataset difficulty with v-usable information. ICML, 2022.", + "[242] Banghua Zhu et al. Starling-7b: Improving llm helpfulness & harmlessness with rlaif, 2023.", + "[243] Wing Lian et al. Openorca: An open dataset of gpt augmented flan reasoning traces, 2023.", + "[244] Luigi Daniele and Suphavadeeprasit. Amplify-instruct: Synthetically generated diverse multi-turn conversations for efficient llm training., 2023.", + "[245] Jiaming Ji et al. Beavertails: Towards improved safety alignment of llm via a human-preference dataset. NeurIPS, 2023.", + "[246] Andrew Maas et al. Learning word vectors for sentiment analysis. ACL, 2011.", + "[247] Michael Volske et al. Tl; dr: Mining reddit to learn automatic summarization. EMNLP Workshop, 2017.", + "[248] Deep Ganguli et al. Red teaming language models to reduce harms: Methods, scaling behaviors, and lessons learned. arXiv, 2022.", + "[249] Karl Cobbe et al. Training verifiers to solve math word problems. arXiv, 2021.", + "[250] Yann Dubois et al. Length-controlled alpacaeval: A simple way to debias automatic evaluators. arXiv, 2024.", + "[251] Lianmin Zheng et al. Judging llm-as-a-judge with mt-bench and chatbot arena. NeurIPS, 2023.", + "[252] Andy Zou et al. Universal and transferable adversarial attacks on" + ], + "bbox": [ + 76, + 55, + 491, + 931 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "aligned language models. arXiv, 2023.", + "[253] Tianle Li et al. From live data to high-quality benchmarks: The arena-hard pipeline. 2024.", + "[254] Stephanie Lin et al. Truthfulqa: Measuring how models mimic human falsehoods. arXiv, 2021.", + "[255] Jeffrey Zhou et al. Instruction-following evaluation for large language models. arXiv, 2023.", + "[256] Mirac Suzgun et al. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv, 2022.", + "[257] Dan Hendrycks et al. Measuring mathematical problem solving with the math dataset. arXiv, 2021.", + "[258] David Rein et al. Gpqa: A graduate-level google-proof q&a benchmark. COLM, 2024.", + "[259] Zayne Sprague et al. Musr: Testing the limits of chain-of-thought with multistep soft reasoning. arXiv, 2023.", + "[260] Yubo Wang et al. Mmlu-pro: A more robust and challenging multi-task language understanding benchmark. NeurIPS, 2024.", + "[261] Fengqing Jiang et al. Identifying and mitigating vulnerabilities in llm-integrated applications. arXiv, 2023.", + "[262] Ning Ding et al. Enhancing chat language models by scaling high-quality instructional conversations. arXiv, 2023.", + "[263] Qiyu Wu et al. Word alignment as preference for machine translation. EMNLP, 2024.", + "[264] Yinghao Hu et al. Fine-tuning large language models for improving factuality in legal question answering. COLING, 2025.", + "[265] Leonidas Gee et al. Code-optimise: Self-generated preference data for correctness and efficiency. arXiv, 2024.", + "[266] Yibo Miao et al. Aligning codellms with direct preference optimization. arXiv, 2024.", + "[267] Kechi Zhang et al. Codedpo: Aligning code models with self generated and verified source code. arXiv, 2024.", + "[268] Guoxin Chen et al. Step-level value preference optimization for mathematical reasoning. EMNLP, 2024.", + "[269] Wen Lai et al. LLMs beyond English: Scaling the multilingual capability of LLMs with cross-lingual feedback. ACL Findings, 2024.", + "[270] Yuxin Chen et al. On softmax direct preference optimization for recommendation. NeurIPS, 2024.", + "[271] Zhuoxi Bai et al. Finetuning large language model for personalized ranking. arXiv, 2024.", + "[272] Yi Gu et al. Diffusion-rpo: Aligning diffusion models through relative preference optimization. arXiv, 2024.", + "[273] Shivanshu Shekhar et al. See-dpo: Self entropy enhanced direct preference optimization. arXiv, 2024.", + "[274] Shufan Li et al. Aligning diffusion models by optimizing human utility. NeurIPS, 2024.", + "[275] Navonil Majumder et al. Tango 2: Aligning diffusion-based text-to-audio generations through direct preference optimization. ACM MM, 2024.", + "[276] Bram Wallace et al. Diffusion model alignment using direct preference optimization. CVPR, 2024.", + "[277] Shentao Yang et al. A dense reward view on aligning text-to-image diffusion with preference. ICML, 2024.", + "[278] Kai Yang et al. Using human feedback to fine-tune diffusion models without any reward model. CVPR, 2024.", + "[279] Buhua Liu et al. Alignment of diffusion models: Fundamentals, challenges, and future. arXiv, 2024.", + "[280] Shengzhi Li et al. Multi-modal preference alignment remedies degradation of visual instruction tuning on language models. ACL, 2024.", + "[281] Ziqi Liang et al. AlignCap: Aligning speech emotion captioning to human preferences. EMNLP, 2024.", + "[282] Elmira Amirloo et al. Understanding alignment in multimodal llms: A comprehensive study. arXiv, 2024.", + "[283] Jinlan Fu et al. Chip: Cross-modal hierarchical direct preference optimization for multimodal llms. arXiv, 2025.", + "[284] Ruohong Zhang et al. Direct preference optimization of video large multimodal models from language model reward. arXiv, 2024.", + "[285] Yuxi Xie et al. V-DPO: Mitigating hallucination in large vision language models via vision-guided direct preference optimization. EMNLP Findings, 2024.", + "[286] Peng Xu et al. Lvlm-ehub: A comprehensive evaluation benchmark for large vision-language models. TPAMI, 2025.", + "[287] Zhongzhan Huang et al. A causality-aware paradigm for evaluating creativity of multimodal large language models. TPAMI, 2025." + ], + "bbox": [ + 506, + 55, + 921, + 929 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 19 + } +] \ No newline at end of file diff --git a/data/2025/2503_11xxx/2503.11701/f561bad2-8e9b-4fb7-9083-b32d2bfd8f1f_model.json b/data/2025/2503_11xxx/2503.11701/f561bad2-8e9b-4fb7-9083-b32d2bfd8f1f_model.json new file mode 100644 index 0000000000000000000000000000000000000000..38ef40e9ed04d6163cf226786a81a491a51f0548 --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/f561bad2-8e9b-4fb7-9083-b32d2bfd8f1f_model.json @@ -0,0 +1,5795 @@ +[ + [ + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.921, + 0.043 + ], + "angle": 0, + "content": "1" + }, + { + "type": "title", + "bbox": [ + 0.123, + 0.067, + 0.873, + 0.102 + ], + "angle": 0, + "content": "A Survey of Direct Preference Optimization" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.116, + 0.895, + 0.152 + ], + "angle": 0, + "content": "Shunyu Liu, Wenkai Fang, Zetian Hu, Junjie Zhang, Yang Zhou, Kongcheng Zhang, Rongcheng Tu, Ting-En Lin, Fei Huang, Mingli Song, Yongbin Li, and Dacheng Tao, Fellow, IEEE" + }, + { + "type": "text", + "bbox": [ + 0.105, + 0.173, + 0.893, + 0.346 + ], + "angle": 0, + "content": "Abstract—Large Language Models (LLMs) have demonstrated unprecedented generative capabilities, yet their alignment with human values remains critical for ensuring helpful and harmless deployments. While Reinforcement Learning from Human Feedback (RLHF) has emerged as a powerful paradigm for aligning LLMs with human preferences, its reliance on complex reward modeling introduces inherent trade-offs in computational efficiency and training stability. In this context, Direct Preference Optimization (DPO) has recently gained prominence as a streamlined alternative that directly optimizes LLMs using human preferences, thereby circumventing the need for explicit reward modeling. Owing to its theoretical elegance and computational efficiency, DPO has rapidly attracted substantial research efforts exploring its various implementations and applications. However, this field currently lacks systematic organization and comparative analysis. In this survey, we conduct a comprehensive overview of DPO and introduce a novel taxonomy, categorizing previous works into four key dimensions: data strategy, learning framework, constraint mechanism, and model property. We further present a rigorous empirical analysis of DPO variants across standardized benchmarks. Additionally, we discuss real-world applications, open challenges, and future directions for DPO. This work delivers both a conceptual framework for understanding DPO and practical guidance for practitioners, aiming to advance robust and generalizable alignment paradigms. All collected resources are available and will be continuously updated at https://github.com/liushunyu/awesome-direct-preference-optimization." + }, + { + "type": "text", + "bbox": [ + 0.106, + 0.357, + 0.878, + 0.371 + ], + "angle": 0, + "content": "Index Terms—Alignment, Direct Preference Optimization, Large Language Models, Reinforcement Learning from Human Feedback." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.261, + 0.058, + 0.707 + ], + "angle": 270, + "content": "arXiv:2503.11701v1 [cs.LG] 12 Mar 2025" + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.431, + 0.23, + 0.446 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.455, + 0.493, + 0.703 + ], + "angle": 0, + "content": "The rapid advancement of Large Language Models (LLMs) has revolutionized artificial intelligence [1, 2, 3, 4, 5, 6, 7, 8], enabling unprecedented generative capabilities across diverse applications, such as dialogue systems [9, 10], code generation [11, 12, 13], and medical diagnosis [14, 15, 16, 17]. Models like OpenAI-o1 [18] and DeepSeekR1 [19] have demonstrated remarkable proficiency in understanding and generating human-like text, outperforming traditional language processing techniques [20]. However, their immense power also introduces significant risks: LLMs may inadvertently produce harmful content (e.g., jailbreak suggestion) [21], exhibit hallucination behaviors (e.g., misinformation) [22], or propagate sociocultural stereotypes (e.g., biased recommendations) [23]. Ensuring that these models align with human values (producing outputs that are helpful, harmless, and honest) has thus become a cornerstone of responsible AI development [24]." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.703, + 0.493, + 0.734 + ], + "angle": 0, + "content": "The critical challenge of aligning LLMs with human values stems from the inherent complexity of encoding abstract" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.747, + 0.492, + 0.943 + ], + "angle": 0, + "content": "This research is supported by the RIE2025 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) (Award I2301E0026), administered by A\\*STAR, as well as supported by Alibaba Group and NTU Singapore through Alibaba-NTU Global e-Sustainability CorpLab (ANGEL). (Corresponding author: Dacheng Tao.) Shunyu Liu, Junjie Zhang, Rongcheng Tu and Dacheng Tao are with Nanyang Technological University, Singapore (e-mail: shunyu.liu@ntu.edu.sg; junjie.zhang@ntu.edu.sg; turongcheng@gmail.com; dacheng.tao@ntu.edu.sg). Wenkai Fang, Yang Zhou, Kongcheng Zhang, and Mingli Song are with the College of Computer Science and Technology, Zhejiang University, China (e-mail: wenkfang@zju.edu.cn; imzhouyang@zju.edu.cn; zhangkc@zju.edu.cn; brooksong@zju.edu.cn). Zetian Hu is with the School of Aerospace Engineering, Tsinghua University, China (e-mail: huzt22@mails.tsinghua.edu.cn). Ting-En Lin, Fei Huang, and Yongbin Li are with the Tongyi Lab, Alibaba Group, China (e-mail: ting-en.lte@alibaba-inc.com; f.huang@alibaba-inc.com; shuide.lyb@alibaba-inc.com)." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.432, + 0.925, + 0.607 + ], + "angle": 0, + "content": "ethical principles into concrete model behaviors [25, 26, 27]. Traditional approaches, such as rule-based filtering or supervised learning on curated datasets, often prove inadequate due to their inability to generalize across diverse contexts and adapt to evolving societal norms [28]. The emergence of preference-based alignment paradigms addresses these limitations by framing the problem as optimizing for human feedback rather than inflexible heuristics [29, 30, 31, 32]. This shift recognizes that LLM decision-making often involves nuanced trade-offs between competing values, requiring flexible frameworks capable of incorporating subjective human preferences [33]." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.608, + 0.924, + 0.928 + ], + "angle": 0, + "content": "Building upon these insights, Reinforcement Learning from Human Feedback (RLHF) [34, 35] has emerged as the predominant alignment paradigm, leveraging human preferences to guide model optimization. In the RLHF pipeline, human annotators first rank the outputs generated by the language model, and these comparisons are used to train a reward model that quantifies human preferences. The language model is then fine-tuned using RL guided by this reward model, enabling the language model to align with human values by maximizing the predicted rewards. The success of RLHF in aligning models like ChatGPT [36, 37] and Claude [38, 39] underscores its practical utility. By translating subjective human preferences into an objective reward signal, RLHF facilitates the optimization of model behavior for value alignment. However, this RLHF paradigm suffers from critical limitations of computational complexity and training instability. Training a separate reward model demands substantial computational resources and high-quality human preference data, which scales poorly across different domains. Moreover, the RL phase often struggles with optimization challenges, such as reward hacking [40] and mode collapse [41]." + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.928, + 0.924, + 0.943 + ], + "angle": 0, + "content": "These limitations have spurred interest in alternative" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "2" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.055, + 0.92, + 0.598 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.072, + 0.606, + 0.924, + 0.631 + ], + "angle": 0, + "content": "Fig. 1: A taxonomy of DPO. We categorize existing DPO works into four branches: data strategy, learning framework, constraint mechanism, and model property. Different colored boxes indicate different categories and their corresponding representative references." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.642, + 0.496, + 0.934 + ], + "angle": 0, + "content": "alignment methods that bypass reward modeling while preserving the benefits of preference-based learning. Direct Preference Optimization (DPO) [74, 210] represents a groundbreaking shift in this direction. Unlike RLHF, DPO reframes alignment as a supervised learning problem, directly optimizing the LLM policy using preference data without explicit reward modeling. By leveraging a closed-form mapping between reward functions and optimal policies, DPO eliminates the need for iterative RL training, reducing computational overhead and improving stability. Due to its inherent advantages, DPO has rapidly gained increasing attention from research communities. Existing studies vary widely in data strategies (e.g., point-wise v.s. pair-wise feedback) [67, 211], learning frameworks (e.g., offline v.s. online learning) [121, 122, 126], constraint mechanisms (e.g., different divergence constraints) [169, 171], and model properties (e.g., length bias) [191, 195]. Recent advancements in DPO variants have demonstrated remarkable efficacy in enhancing model alignment with human preferences, achieving unprecedented success across diverse domains [32]." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.642, + 0.925, + 0.701 + ], + "angle": 0, + "content": "These developments position DPO-based approaches as a compelling alternative to conventional RLHF paradigms for preference alignment tasks. However, despite its promise, the DPO research landscape remains fragmented." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.709, + 0.924, + 0.943 + ], + "angle": 0, + "content": "Several surveys related to DPO have been published in recent years, yet they exhibit notable limitations in their scope and analysis of DPO. (1) Scope limitations. While an early survey of [212] presents a comprehensive overview of preference-based RL methods, it predates the advent of DPO and does not address its applications to modern LLMs. Recent surveys on alignment [24, 26, 213, 214] provide broad overviews of LLM alignment techniques but only offer cursory summaries of DPO-related approaches without in-depth analysis. Similarly, surveys on learning from human feedback [30, 215, 216, 217] also only briefly mention DPO as a potential alternative. (2) Taxonomy deficiencies. Gao et al. [29] and Winata et al. [32] introduce a simplified taxonomy for preference learning, while overlooking technical distinctions within its broad categorization. In contrast, Wang et al. [31] attempt to classify preference learning across dimensions" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.054, + 0.493, + 0.199 + ], + "angle": 0, + "content": "such as reinforcement learning, reward modeling, feedback, and optimization. However, this taxonomy suffers from significant conceptual overlaps (e.g. reinforcement learning inherently involves optimization). A recent work by Xiao et al. [210] categorizes DPO studies through isolated research questions, which, while useful for problem identification, fragments the methodological connections. Our survey addresses these gaps by presenting the first comprehensive analysis specifically focused on DPO. The main contributions of this survey are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.203, + 0.493, + 0.319 + ], + "angle": 0, + "content": "- In this survey, we introduce a novel taxonomy that categorizes existing DPO works into four key dimensions based on different components of the DPO loss: data strategy, learning framework, constraint mechanism, and model property, as shown in Fig. 1. This taxonomy provides a systematic framework for understanding the methodological evolution of DPO and highlights the key distinctions between different variations." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.32, + 0.493, + 0.393 + ], + "angle": 0, + "content": "- We conduct a rigorous empirical analysis of DPO variants across standardized benchmarks, revealing critical insights into their performance in diverse scenarios. This analysis offers a comprehensive evaluation of DPO variants and provides practical guidance for practitioners." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.393, + 0.493, + 0.481 + ], + "angle": 0, + "content": "- We discuss real-world applications of DPO and highlight its potential to democratize alignment research by enabling efficient and scalable preference learning across diverse domains. We also outline open challenges and future directions for DPO research, emphasizing the need for robust and generalizable alignment paradigms." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.203, + 0.493, + 0.481 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.484, + 0.493, + 0.616 + ], + "angle": 0, + "content": "The remainder of this survey is organized as follows. Section 2 introduces the background and formulation of DPO. Section 3 presents a taxonomy of DPO, categorizing existing works based on key dimensions. Section 4 describes standardized benchmarks for evaluating DPO methods and presents empirical results. Section 5 discusses real-world applications of DPO and highlights its potential. Section 6 outlines open challenges and future directions for DPO research. Finally, Section 7 concludes the survey." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.642, + 0.385, + 0.656 + ], + "angle": 0, + "content": "2 BACKGROUND AND FORMULATION" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.664, + 0.495, + 0.945 + ], + "angle": 0, + "content": "Preference learning aims to train language model policies to generate responses that better align with human preferences. Specifically, we denote the language model policy as \\(\\pi(y|x)\\), where \\(x\\) represents the input prompt and \\(y\\) is a candidate response (completion). A language model can be viewed as an autoregressive function that sequentially predicts tokens based on prior context. Mathematically, this is expressed as: \\(\\pi(y|x) = \\prod_{t=1}^{T} \\pi(y_t | y_{ 0\\) is a hyperparameter that controls the strength of the Kullback-Leibler (KL) divergence penalty. Here, the term \\(\\log \\pi_{\\theta}(\\cdot |x) / \\pi_{\\mathrm{ref}}(\\cdot |x)\\) represents the KL divergence between the current policy \\(\\pi_{\\theta}\\) and a reference policy \\(\\pi_{\\mathrm{ref}}\\). In practice, the reference policy \\(\\pi_{\\mathrm{ref}}\\) is set to the SFT model \\(\\pi_{\\mathrm{sft}}\\), ensuring that the updated policy remains close to the initial model." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.87, + 0.925, + 0.945 + ], + "angle": 0, + "content": "To optimize the above objective, Proximal Policy Optimization (PPO) [219] has emerged as a promising RL algorithm for LLMs. PPO stabilizes training by constraining policy updates within a trust region via a clipped objective, which prevents significant deviations from the previous" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "4" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.054, + 0.493, + 0.171 + ], + "angle": 0, + "content": "policy. However, PPO requires an additional critic model to estimate value functions for advantage calculation, thereby introducing extra computational and memory overhead. To address this, recent methods, such as RLOO [220], ReMax [221], GRPO [222], and Reinforce++ [223], introduce critic-free advantage estimation to reduce resource demands while maintaining stable optimization, making them more scalable for large-scale LLM training." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.186, + 0.343, + 0.201 + ], + "angle": 0, + "content": "2.2 Direct Preference Optimization" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.204, + 0.491, + 0.307 + ], + "angle": 0, + "content": "DPO offers an alternative that streamlines the training process by directly optimizing the policy with preference data [74, 224, 225, 226, 227, 228, 229], thereby eliminating the need for explicit reward modeling in RLHF. The key idea of DPO is a closed-form solution of Eq. 3 that connects reward with the optimal policies. Specifically, the optimal policy corresponding to a given \\( r \\) is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.135, + 0.312, + 0.493, + 0.345 + ], + "angle": 0, + "content": "\\[\n\\pi^ {*} (y | x) = \\frac {1}{Z (x)} \\pi_ {\\mathrm {r e f}} (y | x) \\exp \\left(\\frac {1}{\\beta} r (x, y)\\right), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.349, + 0.408, + 0.364 + ], + "angle": 0, + "content": "where the partition function \\( Z(x) \\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.15, + 0.369, + 0.493, + 0.405 + ], + "angle": 0, + "content": "\\[\nZ (x) = \\sum_ {y} \\pi_ {\\mathrm {r e f}} (y | x) \\exp \\left(\\frac {1}{\\beta} r (x, y)\\right). \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.41, + 0.492, + 0.44 + ], + "angle": 0, + "content": "By rearranging the above equation, the reward \\( r \\) can be recovered from the optimal policy \\( \\pi^{*} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.149, + 0.445, + 0.493, + 0.477 + ], + "angle": 0, + "content": "\\[\nr (x, y) = \\beta \\log \\frac {\\pi^ {*} (y | x)}{\\pi_ {\\operatorname {r e f}} (y | x)} + \\beta \\log Z (x). \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.481, + 0.492, + 0.541 + ], + "angle": 0, + "content": "Notice that the partition function \\( Z(x) \\) depends only on the prompt \\( x \\). By substituting this expression into the preference model of Eq. 1, the preference probability model that \\( y_{w} \\) is preferred over \\( y_{l} \\) becomes:" + }, + { + "type": "equation", + "bbox": [ + 0.085, + 0.545, + 0.493, + 0.589 + ], + "angle": 0, + "content": "\\[\nP \\left(y _ {w} \\succ y _ {l} | x\\right) = \\sigma \\left(\\beta \\log \\frac {\\pi^ {*} \\left(y _ {w} \\mid x\\right)}{\\pi_ {\\text {r e f}} \\left(y _ {w} \\mid x\\right)} - \\beta \\log \\frac {\\pi^ {*} \\left(y _ {l} \\mid x\\right)}{\\pi_ {\\text {r e f}} \\left(y _ {l} \\mid x\\right)}\\right). \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.589, + 0.493, + 0.634 + ], + "angle": 0, + "content": "Based on the above preference probability model, DPO directly optimizes the language mode policy \\(\\pi_{\\theta}\\) by minimizing the following negative log-likelihood loss function:" + }, + { + "type": "equation", + "bbox": [ + 0.078, + 0.645, + 0.493, + 0.713 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {D P O}} (\\theta) = \\\\ - \\mathbb {E} _ {(x, y _ {w}, y _ {l}) \\sim \\mathcal {D}} \\left[ \\log \\sigma \\left(\\beta \\log \\frac {\\pi_ {\\theta} (y _ {w} | x)}{\\pi_ {\\text {r e f}} (y _ {w} | x)} - \\beta \\log \\frac {\\pi_ {\\theta} (y _ {l} | x)}{\\pi_ {\\text {r e f}} (y _ {l} | x)}\\right) \\right], \\tag {8} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.718, + 0.493, + 0.822 + ], + "angle": 0, + "content": "where the KL constraint is implicitly integrated through the use of the reference model \\(\\pi_{\\mathrm{ref}}\\). By minimizing this DPO loss, we directly train the policy to satisfy human preferences without resorting to a separate reward modeling stage or using reinforcement learning optimization as in RLHF, significantly reducing implementation complexity while improving training stability." + }, + { + "type": "title", + "bbox": [ + 0.073, + 0.837, + 0.341, + 0.852 + ], + "angle": 0, + "content": "2.3 Other Preference Optimization" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.855, + 0.493, + 0.945 + ], + "angle": 0, + "content": "In addition to DPO, several concurrent preference optimization methods [190, 230, 231] have been proposed that offer alternative approaches to RLHF. These methods explore different strategies for optimizing LLMs to align with human preference without RL. Below, we provide a brief introduction to these approaches." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.054, + 0.785, + 0.068 + ], + "angle": 0, + "content": "2.3.1 Sequence Likelihood Calibration" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.07, + 0.923, + 0.128 + ], + "angle": 0, + "content": "Zhao et al. [230] propose Sequence Likelihood Calibration with Human Feedback (SLiC-HF) to directly align LLMs with human preferences. Specifically, the loss function of SLiC-HF is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.521, + 0.138, + 0.923, + 0.173 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {S L i C - H F}} (\\theta) = \\max (0, \\delta - \\log \\pi_ {\\theta} (y _ {w} | x) + \\log \\pi_ {\\theta} (y _ {l} | x)) \\\\ - \\lambda \\log \\pi_ {\\theta} (y ^ {*} | x), \\tag {9} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.177, + 0.924, + 0.266 + ], + "angle": 0, + "content": "where the first term is the rank calibration loss with \\(\\delta\\) as a margin hyperparameter, and the second term is the cross-entropy regularization loss with \\(\\lambda\\) as a regularization weight. \\(y^{*}\\) is obtained from either high-quality supervised responses in the SFT dataset or the top-ranked candidate response generated by the SFT model." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.273, + 0.863, + 0.287 + ], + "angle": 0, + "content": "2.3.2 Rank Responses to Align Human Feedback" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.289, + 0.924, + 0.407 + ], + "angle": 0, + "content": "Yuan et al. [190] introduce Rank Responses to align Human Feedback (RRHF) for LLMs. RRHF extends pair-wise ranking by considering the list-wise ranking order of multiple responses, thus better utilizing the preference information. For an input prompt \\( x \\) and \\( N \\) candidate responses \\( \\{y_i\\}_{i=1}^N \\), it optimizes the model to assign higher probabilities to higher-ranked responses via a ranking loss and directly supervises the best response using cross-entropy as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.521, + 0.414, + 0.923, + 0.469 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {R R H F}} (\\theta) = \\sum_ {r _ {i} < r _ {j}} \\max \\left(0, \\frac {\\log \\pi_ {\\theta} (y _ {i} | x)}{| | y _ {i} | |} - \\frac {\\log \\pi_ {\\theta} (y _ {j} | x)}{| | y _ {j} | |}\\right) \\\\ - \\lambda \\log \\pi_ {\\theta} \\left(y ^ {*} \\mid x\\right), \\tag {10} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.474, + 0.923, + 0.548 + ], + "angle": 0, + "content": "where \\( r_i = r_\\phi(x, y_i) \\) represents the reward of the response \\( y_i \\) and \\( y^* = \\arg \\max_{y_i} r_i \\) is the response with the highest reward. Although RRHF avoids the need for reinforcement learning in RLHF, it still utilizes a reward model \\( r_\\phi \\) to rank candidate responses based on human preferences." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.555, + 0.791, + 0.57 + ], + "angle": 0, + "content": "2.3.3 Preference Ranking Optimization" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.572, + 0.924, + 0.644 + ], + "angle": 0, + "content": "Similarly, Song et al. [231] propose Preference Ranking Optimization (PRO) to align LLMs with human preferences by leveraging multiple responses \\(\\{y_{i}\\}_{i = 1}^{N}\\) with the human-annotated order \\(y_{1} > y_{2} > \\dots >y_{N}\\). The loss function of PRO is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.517, + 0.647, + 0.924, + 0.707 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {P R O}} (\\theta) = - \\sum_ {i = 1} ^ {N - 1} \\log \\frac {\\exp \\left(\\frac {1}{\\| y _ {i} \\|} \\log \\pi_ {\\theta} \\left(y _ {i} | x\\right) / \\mathcal {T} _ {i} ^ {i}\\right)}{\\sum_ {j = i} ^ {N} \\exp \\left(\\frac {1}{\\| y _ {j} \\|} \\log \\pi_ {\\theta} \\left(y _ {j} | x\\right) / \\mathcal {T} _ {i} ^ {j}\\right)}, \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.707, + 0.923, + 0.796 + ], + "angle": 0, + "content": "where the dynamic penalty temperature is defined as \\(\\mathcal{T}_i^j = 1 / (r_\\phi (x,y^j) - r_\\phi (x,y^i))\\) and \\(\\mathcal{T}_i^i = \\min_{i < j}\\mathcal{T}_i^j\\). This temperature ensures that the probability gap between higher-ranked and lower-ranked responses is adaptively scaled according to their reward differences, thereby stabilizing the optimization process." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.806, + 0.722, + 0.821 + ], + "angle": 0, + "content": "3 A TAXONOMY OF DPO" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.826, + 0.924, + 0.942 + ], + "angle": 0, + "content": "In this section, we introduce a novel taxonomy that categorizes existing DPO works based on four key dimensions: data strategy, learning framework, constraint mechanism, and model property. As illustrated in Fig. 1, these four dimensions are derived from different components of the DPO loss, providing a systematic framework for understanding the methodological evolution of DPO and highlighting the key distinctions between different variations." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "5" + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.053, + 0.274, + 0.069 + ], + "angle": 0, + "content": "3.1 Data Strategy of DPO" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.074, + 0.493, + 0.149 + ], + "angle": 0, + "content": "The data strategy constitutes the foundational pillar of DPO, focusing on how to leverage diverse types of preference data for training LLMs. As shown in Fig. 2, our taxonomy identifies three principal axes of data strategy: quality, feedback, and granularity." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.164, + 0.217, + 0.179 + ], + "angle": 0, + "content": "3.1.1 Data Quality" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.183, + 0.493, + 0.286 + ], + "angle": 0, + "content": "The quality of preference data is a critical factor in determining the effectiveness of DPO training. High-quality data ensures that LLMs effectively learn to align with human preferences, while low-quality data may introduce noise and bias, leading to suboptimal model performance. We categorize data quality considerations into three key aspects: heterogeneity, distinguishability, and noise." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.287, + 0.495, + 0.681 + ], + "angle": 0, + "content": "(a) Data Heterogeneity. Conventional DPO methods assume uniform human preferences when annotating data, thereby overlooking the diversity among annotators. This assumption often skews the model toward the preferences of the majority while neglecting minority viewpoints, potentially leading to biases and unfair treatment of underrepresented groups. To address this issue, Chidambaram et al. [42] propose EM-DPO, which learns the distribution of different preference types and their corresponding response strategies. Building on this, they introduce the MinMax-DPO algorithm, which selects a strategy by minimizing the maximum regret across subgroups, ensuring a more balanced representation of preferences among all groups. MallowsPO [43] decomposes the implicit rewards in DPO into prompt dispersion and response scaling rewards. It introduces a novel objective function to capture human preferences for diverse responses to the same prompt. GRPO [44] formulates an objective function that minimizes the loss for the worst-case group, thereby ensuring fairness by prioritizing the disadvantaged groups in the optimization process. GDPO [45] models the language generation process as a combination of belief distribution prediction and belief-based response generation. The corresponding GDPO loss function consists of belief calibration loss and belief-conditioned preference alignment loss. The former encourages the model to capture the diversity of beliefs across groups, while the latter ensures that generated responses align with the given belief." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.681, + 0.495, + 0.945 + ], + "angle": 0, + "content": "(b) Data Distinguishability. A key limitation of DPO is its inability to account for the distinguishability of preference between responses [46, 50, 51, 56, 57]. In some cases, the preferred response is only marginally better than the dispreferred one, while in others, the dispreferred response contains harmful or misleading content, making it significantly worse. Thus, optimization should focus more on cases with substantial preference differences while reducing the effort spent on minor differences. However, most existing methods treat all samples equally, ignoring this data distinguishability. To address this, ODPO [46] introduces a monotonically increasing offset function, requiring the reward of the preferred response to exceed that of the dispreferred one by a certain margin. This ensures stronger updates for larger preference gaps. Similarly, Ada-DPO [54] introduces an instance-specific nonlinear scaling parameter, assigning larger weights to strong preference pairs and smaller weights to ambiguous ones based on the reward differences, thereby capturing" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.056, + 0.92, + 0.374 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.59, + 0.385, + 0.838, + 0.399 + ], + "angle": 0, + "content": "Fig. 2: An overview of DPO data strategy." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.414, + 0.925, + 0.793 + ], + "angle": 0, + "content": "different levels of data distinguishability. DPO-rc [48] also incorporates the preference reward difference as a coefficient in the loss function. \\(\\alpha\\)-DPO [49] introduces an adaptive preference distribution to obtain dynamic reward margins based on the distribution difference between the policy and reference models. \\(\\beta\\)-DPO [51] analyzes the optimal \\(\\beta\\) parameter for datasets with different reward margins, which dynamically adjusts \\(\\beta\\) based on batch-level reward differences. They also introduce \\(\\beta\\)-guided data filtering to prioritize valuable training data. Curri-DPO [53] sorts preference pairs by reward differences and trains progressively from large to small differences, enabling curricular learning. Similarly, MPO [47] utilizes a reward model to score responses generated by the SFT model, constructing a preference dataset and partitioning it based on preference differences to learn from simple to complex tasks. sDPO [55] computes reward accuracy for different datasets based on an initial target model and partitions the dataset in descending order of accuracy, allowing the model to first optimize on simpler samples. Ma et al. [58] propose a preference dataset construction method that adjusts update weights based on response accuracy, assigning lower weights when the model demonstrates higher proficiency. Furthermore, fDPO [52] enhances DPO training by filtering out samples where the generated response of the model policy surpasses the preferred dataset response in reward score." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.797, + 0.924, + 0.943 + ], + "angle": 0, + "content": "(c) Data Noise. Human-generated preference annotations often contain inconsistencies, errors, or noise, negatively affecting the performance of DPO. Such noisy data can mislead models, impairing their ability to accurately capture true preferences and generalize effectively to unseen data. Im and Li [64] analyze how noisy feedback influences the generalization performance of preference optimization, showing that increased noise results in higher generalization risks. Specifically, standard DPO loss functions can yield biased estimates under noisy conditions. To address this" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "6" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.054, + 0.492, + 0.535 + ], + "angle": 0, + "content": "issue, rDPO [59] proposes to enhance DPO robustness against noisy annotations and improve overall training performance. Zhang et al. [63] introduce a noise-aware strategy leveraging annotator confidence and stability to identify and downweight noisy samples during training. They also propose an adaptive reward margin, emphasizing clean samples to improve learning effectiveness. Complementary to these approaches, PerpCorrect [60] employs a data-driven method to correct noisy annotations directly in the dataset. It trains a proxy language model on both clean and noisy samples, distinguishing noise through perplexity differences to improve dataset quality. To systematically explore noise effects, Gao et al. [65] artificially inject various noise types (e.g., Gaussian noise) into datasets, controlling noise intensity via hyperparameters. Their analysis highlights how noise impacts model alignment, guiding future research towards mitigating such negative effects. To address the vulnerability of DPO in noisy environments, ROPO [61] introduces a regularization term to enhance noise tolerance. Additionally, ROPO employs a robust-guided rejection sampling technique. This technique supplements the dataset with samples that contribute minimally to the loss, thereby improving the overall data quality. Kim et al. [62] propose the SPA framework, using model-generated responses and associated confidence scores to detect noise in annotations. SPA further incorporates smoothing techniques into the loss function to alleviate the noise problem. Finally, Wu et al. [66] categorize noise into two types: point noise (single annotation errors) and pairwise noise (errors between annotated pairs). While DPO naturally handles point noise well, it struggles with pairwise noise. Their proposed Dr. DPO introduces a novel loss function explicitly designed for robustness against both point and pairwise noise." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.547, + 0.276, + 0.56 + ], + "angle": 0, + "content": "3.1.2 Preference Feedback" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.564, + 0.491, + 0.68 + ], + "angle": 0, + "content": "Preference feedback refers to the label signals provided by annotators regarding their preferences for different responses. It can be categorized into point-wise, pair-wise, and list-wise feedback. Point-wise feedback evaluates each response independently, assigning a score or labeling it as positive or negative. Pair-wise feedback compares two responses to determine which one is preferred, while list-wise feedback ranks multiple responses." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.681, + 0.492, + 0.943 + ], + "angle": 0, + "content": "(a) Point-Wise Feedback. Point-wise feedback is the basic form of feedback. It refers to the type of feedback where individual outputs or samples are evaluated independently, rather than through comparisons with other outputs. This form of feedback is characterized by its simplicity and directness, focusing on the quality or relevance of a single response or item. The predominant methodology in RLHF [35] employs point-wise reward signals generated by reward models to optimize policy models. Similarly, KTO [67] directly maximizes the utility of model generations using loss functions based on prospect theory rather than the log-likelihood of preferences. It requires only a binary signal indicating whether an output is desirable or undesirable for a given input. Furthermore, BCO [68] builds upon the concepts introduced in KTO and explores a new approach to aligning with binary signals. While KTO focuses on optimizing human utility, BCO introduces a binary classifier framework incorporating reward shift and distribution matching that implicitly" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.054, + 0.921, + 0.344 + ], + "angle": 0, + "content": "minimizes the DPO loss. Chen et al. [72] and GPO [73] adopt explicit rewards using Noise Contrastive Alignment (NCA) and General Preference Model (GRM) respectively, and then directly optimize language model policies from point-wise preference data with rewards. However, some methods leverage implicit reward signals to refine model behaviors. To ensure that the learned implicit rewards are comparable to the ground-truth rewards, Cal-DPO [69] introduces a calibration term to the preference optimization objective, which prevents the likelihood of chosen responses from decreasing during training. ULMA [71] unifies human demonstration and point-wise preference data into a single framework and handles positive and negative samples with a hybrid objective function. Unlike them, DRO [211] adopts a simple mean-squared objective to optimize the model policy and value function jointly for a single trajectory. Additionally, AOT [70] casts the distributional preference constraint as an optimal transport problem with a convex cost function. The key idea is to minimize the violation of stochastic dominance using a smooth, convex cost function." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.346, + 0.921, + 0.943 + ], + "angle": 0, + "content": "(b) Pair-Wise Feedback. Pair-wise feedback focuses on comparing pairs of data or actions to determine their relative quality or preference. Building upon the theoretical framework of RLHF, DPO implements this paradigm through the utilization of pair-wise preference data, thereby fitting an implicit reward model. Azar et al. [75] introduces a general theoretical framework to unify existing RLHF and DPO methods. The proposed Identity-Preference Optimization (IPO) directly optimizes policies from preferences without relying on reward modeling or the Bradley-Terry assumption, thereby avoiding overfitting issues observed in DPO. Subsequently, DPO-RK and DPO-R [76] integrate the Rao-Kupper and Davidson models into the DPO training objective respectively, thereby extending the capabilities of DPO by explicitly modeling ties in pairwise comparisons. BMC [77] further addresses a key limitation of the weak correlation between winning and losing responses in pairwise data. Specifically, BMC uses \"Bridging\" to enhance the correlation between winning and losing responses by increasing the consistency and informativeness of pairwise preference signals. However, previous attempts for aligning LLMs primarily focus on optimizing the model's output preferences given an instruction, which struggles to effectively perceive the fine-grained constraints within complex instructions. Thus IOPO [78] extends traditional alignment methods by considering both input and output preferences to better understand the constraints within the instructions. As current methods rely heavily on paired preference data (i.e., explicitly labeled preferred vs. dispreferred examples), they can be limiting in scenarios where such paired data is unavailable or insufficient. SAPO [80] addresses this issue based on the concept of self-play, which enhances data exploration and exploitation by automatically generating negative samples and integrating off-policy learning. Furthermore, PMPO [79] extends the EM algorithm to incorporate both preferred and dispreferred outcomes. By introducing the probability distribution of dis-preferred outcomes, PMPO can optimize using both types of samples, even when only negative feedback is available. Similarly, D2O [81] avoids harmful information by maximizing the discrepancy between the generated responses and the negative samples. NPO [82]" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.054, + 0.49, + 0.126 + ], + "angle": 0, + "content": "and SimNPO [83] achieve the goal of forgetting the negative impact by regulating the model's prediction probabilities on negative datasets to be as minimal as possible, where SimNPO further eliminates the reference model bias issue inherent in NPO." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.126, + 0.491, + 0.784 + ], + "angle": 0, + "content": "(c) List-Wise Feedback. List-wise feedback refers to the type of feedback where multiple outputs or responses generated by the model for a given input are evaluated collectively as a list. This approach considers the relative ranking or ordering among the outputs, rather than focusing on individual outputs in isolation. Panacea [84] reframes alignment as a Multi-Dimensional Preference Optimization (MDPO) problem and introduces a method that aims to learn the entire Pareto front to accommodate diverse user preferences. In short, Panacea is designed to adapt a single model to list-wise preferences in a Pareto-optimal manner. LiPO [85] and LIRE [86] also treat LM alignment as a list-wise ranking problem, drawing on the rich literature of Learning-To-Rank (LTR). Specifically, LiPO introduces a specific method LiPO-λ, which leverages a list-wise ranking objective that weights each preference pair based on the difference in ranking metrics; while LIRE optimizes the response probability by calculating the exponential probability distribution and uses the reward model to directly guide the optimization process. To better capture the relative proximity within ordinal multiple responses, OPO [87] utilizes the Normalized Discounted Cumulative Gain (NDCG), a widely used ranking metric, to optimize the model's generation probability to match the permutation of responses based on these labels. Similarly, DRPO [88] leverages NDCG as a key metric to optimize the ranking of model outputs. However, DRPO incorporates novel elements like diffNDCG and Adaptive Rank Policy Score to dynamically adjust the score margins between preferred and non-preferred responses based on their ranking positions. mDPO [232] extends preference optimization to multi-sample comparisons and introduces a framework that evaluates and optimizes the collective properties of sample groups. It not only addresses the limitations of single pair-wise methods but also provides a more robust optimization framework, especially for characteristics like diversity and bias. Furthermore, RPO [90] introduces a contrastive weighting mechanism that constructs a contrast matrix within each mini-batch to compare preferred and less-preferred responses across prompts. The weights of these comparisons are dynamically adjusted based on the semantic similarity between prompts. Additionally, TODO [91] integrates a tie ranking system into list-wise preference modeling, significantly improving the capture of nuances of human preferences, especially in the presence of noisy or inconsistent labels and frequent ties." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.795, + 0.285, + 0.809 + ], + "angle": 0, + "content": "3.1.3 Preference Granularity" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.812, + 0.49, + 0.898 + ], + "angle": 0, + "content": "Preference granularity refers to the granularity of preference labels, which determines the level at which preferences are assigned to data. It can be categorized into token-level, step-level, sentence-level, and turn-level granularity, ranging from fine-grained focus on individual tokens to broader preferences over entire interaction turns." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.9, + 0.49, + 0.943 + ], + "angle": 0, + "content": "(a) Token-Level Granularity. Token-level alignment operates at the character/subword unit of text generation, providing the finest-grained control over model outputs" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.054, + 0.921, + 0.519 + ], + "angle": 0, + "content": "Theoretically, Rafailov et al. [92] demonstrate that DPO can represent any dense reward function by reparameterizing it as an optimal advantage function, which allows DPO to optimize policies in the token-level MDP effectively TDPO [93] refines the alignment process from the sentence level to the token level and introduces forward KL divergence constraints. TDPO utilizes the Bradley-Terry model to convert sentence-level preference comparisons into a token-level reward system, which allows the model to dynamically adjust its strategy at each token generation step. Furthermore, TIS-DPO[94] estimates the importance weights of tokens based on the differences in prediction probabilities from contrastive LLMs, performing token-level importance sampling on existing data to approximate optimal distribution by assigning weights to each token based on its reward. Moreover, \\(\\mathrm{D}^2\\mathrm{PO}\\) [99] proposes a temporal decay mechanism that dynamically adjusts the contribution of each token-level reward based on its position in the sequences. Unlike these, SparsePO [95] directly learns sparse masks during the training process and controls which tokens are more important for preferences through the sparsity of the masks, thereby achieving dynamic optimization. RTO [96] and SePO [97] first learn a token-level reward function from preference data using DPO, and then RTO optimizes PPO based on this reward signal, while SePO selects key tokens through the estimated reward function. To tackle the need for large-scale annotated data in training, EPO [98] proposes a hierarchical framework that decomposes complex tasks into manageable subgoals using separate LLMs for subgoal prediction and low-level action generation, leveraging environment feedback to automatically generate reward signals and preference data for aligning LLMs." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.521, + 0.922, + 0.636 + ], + "angle": 0, + "content": "To conclude, token-level granularity optimizes models at individual token positions to maximize expected objectives, preserving semantic precision and capturing local syntactic dependencies. However, it increases computational complexity, as processing numerous tokens extends training time, and its sensitivity to noise means errors in a single token can affect the entire sequence. Thus, careful loss function design and regularization are essential for stability." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.637, + 0.922, + 0.942 + ], + "angle": 0, + "content": "(b) Step-level Granularity. Step-level granularity focuses on the intermediate steps or stages in a process, particularly effective for complex problem-solving tasks requiring multiple intermediate steps. Step-DPO [100] and SCDPO [101] treat individual reasoning steps as the basic units for preference optimization, where preference pairs of correct and incorrect steps are generated using LLMs. Furthermore, CPO [102] and MCTS-DPO [103] first utilize more powerful inference structures to generate multiple candidate thoughts at each reasoning step following the Tree-of-Thought (ToT) and Monte Carlo Tree Search (MCTS) respectively, and construct preference pairs based on the selected and unselected intermediate steps. Then they finetune LLMs to generate reasoning steps preferred by ToT during inference using DPO. TPO [104] proposes a preference learning algorithm specifically designed for preference trees that have multiple branches and multi-step responses, and introduces the adaptive step reward mechanism to address the issue of small reward margins caused by shared subtrajectories. It adjusts the reward values for each step based on semantic similarity, helping the model better distinguish" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "8" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.054, + 0.493, + 0.214 + ], + "angle": 0, + "content": "between preference pairs. RDPO [105] extends traditional preference datasets to incorporate a rationale field, which explains why a particular response is preferred. RDPO introduces rationale information into the DPO loss function by maximizing the likelihood of both the preference and the rationale, which allows the model to better understand the logic behind preferences during training. To address the challenges of sparse rewards and training instability, DAPO [106] uses a critic function to generate dense signals for policy optimization and trains the actor and critic independently to avoid instability." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.215, + 0.493, + 0.418 + ], + "angle": 0, + "content": "To conclude, step-level alignment demonstrates unique advantages in multi-step reasoning tasks by decomposing holistic preferences into intermediate decision points. The primary strength of step-level granularity lies in its capacity to decompose complex objectives into verifiable subgoals, enhancing both interpretability and robustness. For instance, in mathematical reasoning, LLMs can receive feedback on equation derivation steps before final answers, reducing error propagation. However, this granularity still have two key challenges: first, the need for precise step segmentation, which may require domain-specific heuristics or auxiliary models to delineate reasoning boundaries; second, the risk of local optima, where over-optimization of individual steps degrades global coherence." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.418, + 0.493, + 0.695 + ], + "angle": 0, + "content": "(c) Sentence-level Granularity. Sentence-level granularity aligns preferences at the complete utterance level, balancing fine-grained control and computational efficiency. This granularity, represented by the original DPO framework, operates on full response sequences as atomic units for preference comparison. MAPO [107] uses a well-trained translation model to calculate alignment scores between answers in nondominant and dominant languages and then employs preference optimization methods to enhance reasoning consistency. EURUS [108] structures each instruction as a preference tree, containing pairs of correct and incorrect actions to facilitate preference learning. Similarly, IRPO [109] focuses on improving the reasoning capabilities of LLMs through an iterative preference optimization on constructed preference pairs such that the winning response has a higher reward than the losing response. FACTALIGN [110] proposes a fine-grained, sentence-level alignment algorithm called fKTO, which extends the KTO method to leverage fine-grained factuality assessments at the sentence level." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.695, + 0.493, + 0.812 + ], + "angle": 0, + "content": "To conclude, the key strength of sentence-level granularity lies in its capacity to preserve holistic semantics while maintaining tractable optimization complexity. Nevertheless, we must carefully consider task requirements. While suitable for short-form generation and classification tasks, sentence-level methods may insufficiently capture fine-grained stylistic nuances or long-range dependencies critical in generation and reasoning domains." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.812, + 0.493, + 0.943 + ], + "angle": 0, + "content": "(d) Turn-level Granularity. Turn-level granularity focuses on the optimization of model behavior at the level of conversational turns, which is particularly relevant for dialogue systems and interactive agents. This granularity level treats each turn of a conversation as a unit for preference alignment, allowing the LLMs to receive feedback on their responses within the context of a single turn. M-DPO [111] introduces a multi-turn direct preference learning framework to enhance the mathematical reasoning capabilities of LLMs when" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.925, + 0.491 + ], + "angle": 0, + "content": "integrated with external tools. It leverages feedback from code interpreters and optimizes trajectory-level preferences using signals generated by the Bradley-Terry model to improve model performance in multi-turn reasoning tasks. ETO [112] presents a novel trial-and-error learning method that optimizes LLM agents' policies by contrasting successful and failed trajectories that contain multi-turn interaction. To address the challenges of coarse granularity and training noise in previous methods, SDPO [113] optimizes specific key segments within interactions to improve multi-turn dialogues while minimizing training noise. Specifically, it extracts key segments from the positive sessions that contribute to higher goal and relationship scores and pairs them with corresponding segments from the negative sessions to calculate an adapted DPO loss. Similarly, AgentQ [114] combines MCTS with self-critique mechanisms to provide process-level supervision by ranking actions, and then iterative fine-tuning using DPO. This approach enables LLMs to effectively learn from both successful and unsuccessful trajectories, enhancing their generalization and decision-making capabilities in complex, multi-turn reasoning tasks within interactive environments. DMPO [115] enhances the existing DPO method by replacing the policy constraint with a State-Action Occupancy Measure (SAOM) constraint and incorporating length normalization into the Bradley-Terry model, effectively addressing challenges in multi-turn scenarios. Compared to traditional policy constraints, SAOM constraints better guide the agent to select actions that align with expert trajectories, especially in unexplored states, thereby reducing compounding errors." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.491, + 0.927, + 0.624 + ], + "angle": 0, + "content": "To conclude, turn-level alignment offers critical advantages for interactive systems by optimizing contextually grounded responses while preserving conversational flow. However, in multi-turn dialogue tasks, the turn-level granularity may introduce additional training noise. For example, some correct turns in negative samples may be mistakenly treated as incorrect turns in the loss calculation. Additionally, since each turn needs to be processed independently, this can lead to reduced training efficiency." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.643, + 0.757, + 0.658 + ], + "angle": 0, + "content": "3.2 Learning Framework of DPO" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.662, + 0.924, + 0.737 + ], + "angle": 0, + "content": "The learning framework of DPO focuses on how the language model policy learns from preference data. In this section, we present an overview of the learning framework in DPO, as shown in Fig. 3, which encompasses the learning paradigm and the learning objectives." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.75, + 0.693, + 0.765 + ], + "angle": 0, + "content": "3.2.1 Learning Paradigm" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.767, + 0.924, + 0.87 + ], + "angle": 0, + "content": "The learning paradigm in DPO determines how preference data is acquired during model training and falls into three distinct categories: offline learning, where the model learns from pre-collected preference datasets; online Learning, where the model updates based on newly generated data; and active Learning, where the model selectively queries annotators obtain preference data." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.87, + 0.925, + 0.945 + ], + "angle": 0, + "content": "(a) Offline Learning. The original DPO framework [74] itself is an offline learning paradigm, where the model learns from a static, pre-collected dataset of preference pairs. Recent research has explored different approaches to merging preference optimization and supervised fine-tuning" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "9" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.056, + 0.484, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.137, + 0.36, + 0.427, + 0.373 + ], + "angle": 0, + "content": "Fig. 3: An overview of DPO learning framework." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.385, + 0.493, + 0.649 + ], + "angle": 0, + "content": "into a single training phase [190]. CPO [116] incorporates a behavior cloning regularizer through KL divergence minimization between the model and preferred data distribution, which effectively combines into adding a negative log-likelihood term on preferred data alongside the contrastive preference loss. Taking a more direct approach, ORPO [117] proposes a monolithic framework that directly augments the standard negative log-likelihood loss with an odds ratio term comparing chosen and rejected responses, eliminating the need for a separate reference policy while preserving SFT's domain adaptation capabilities. ULMA [71] proposes a hybrid method that applies standard SFT loss on positive samples while using a ranking-based DPO loss on negative samples. PAFT [118] introduces a parallel training paradigm where SFT and preference alignment are performed concurrently on the same pre-trained model and then merged using parameter fusion techniques, avoiding the sequential pipeline that can lead to catastrophic forgetting." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.65, + 0.493, + 0.797 + ], + "angle": 0, + "content": "Several advances explore curriculum learning strategies to enhance DPO performance and training efficiency. CurriDPO [53] introduces curriculum learning by ordering multiple preference pairs from easy to hard based on the rating difference between chosen and rejected responses, where pairs with larger rating gaps are presented first, followed by progressively more challenging pairs with smaller rating differences. sDPO [55] implements curriculum learning by partitioning preference datasets into sequential chunks measured by reward accuracy and applying them incrementally." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.797, + 0.493, + 0.945 + ], + "angle": 0, + "content": "To avoid substantial computational and data annotation costs for preference alignment, fine-tuning-free alignment methods have gained popularity. Linear Alignment [119] works by directly estimating the optimal policy through a one-step update to the output distribution during inference without requiring parameter tuning or feedback data. ICDPO [120] reinterprets DPO's reward-policy relationship to create a fine-tuning-free alignment method that harnesses in-context learning, treating models before and after demonstration exposure as amateur and expert policies, respectively," + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.923, + 0.083 + ], + "angle": 0, + "content": "then computing their log probability ratio to score and rank candidate responses." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.083, + 0.924, + 0.199 + ], + "angle": 0, + "content": "(b) Online Learning. DPO faces significant limitations when relying solely on static, pre-collected preference datasets. These datasets, generated by different models, cause a distribution shift that leads to ineffective off-policy learning as the model evolves [145, 152]. By contrast, online DPO employs an iterative framework that continuously updates the policy with real-time feedback, ensuring on-policy learning and reducing misalignment [143, 144, 233]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.199, + 0.924, + 0.71 + ], + "angle": 0, + "content": "As online DPO progresses, researchers have introduced more flexible frameworks to tackle key challenges. For instance, Yuan et al. [123] proposed a self-rewarding language model: the model generates prompts and responses, then serves as its own judge via LLM-as-a-Judge prompting, scoring on a 5-point scale. OAIF [121] uses an LLM as an online annotator for real-time feedback, and OFSDPO [122] addresses catastrophic forgetting by using two Low-Rank Adaptive (LoRA) modules with different optimization speeds. BPO [124] constructs a dynamic trust region around the behavior LLM, adjusting it as preference data is collected, unlike methods that rely on fixed reference models. Furthermore, researchers have refined sampling strategies for online DPO. RSO [126] and RS-DPO [125] employ rejection sampling based on reward gaps. ROPO [61] recovers useful information from discarded queries via robustness-guided rejection sampling. Shi et al. [127] introduced DPO-Mix-R and DPO-Mix-P, demonstrating faster convergence by mixing online samplers with uniform samplers. OPTUNE [128] selectively regenerates responses with low reward scores while reusing high-reward responses. Iterative RPO [109] and DPO-ST [129] enhance CoT reasoning by selecting correct and incorrect answers to form preference pairs at each iteration. Xie et al. [103] used MCTS to collect preference data during training. Researchers have also explored advanced optimization techniques. APO [130] incorporates momentum-based acceleration, using an extrapolation step between the current and previous policies to update the policy. Xiong et al. [131] proposed a two-agent, non-symmetric online DPO framework with a main agent for optimal policy learning and an enhancer agent for exploration. COMAL [132] formulates alignment as a two-player zero-sum game, updating its policy toward a regularized Nash equilibrium in each iteration. PCO [133] iteratively trains the model on preference data with pairwise cringe Loss." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.71, + 0.924, + 0.945 + ], + "angle": 0, + "content": "Recent efforts push for greater autonomy by letting models generate their own feedback [62]. SeRA [134] introduces a self-reviewed preference bootstrapping method, using an implicit reward margin to select informative pairs, and employs an ensemble reward approach across iterations. CREAM [135] mitigates self-improving biases by applying a consistency regularization on the preference rankings of consecutive iterations. D2PO [136] combines human-labeled gold data with concurrently updated, discriminator-labeled data. DLMA [137] uses contrastive prompts to compute self-reward scores via log ratio differences, then integrates these scores directly into the DPO objective. Addressing exploration and uncertainty in online DPO has also been a focus [234]. XPO [138] encourages exploration by adding a bonus for responses outside the initial policy's support, and SELM [139] uses an optimism term in reward fitting to" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.924, + 0.044 + ], + "angle": 0, + "content": "10" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.169 + ], + "angle": 0, + "content": "actively seek high-reward responses. ETO [112] alternates exploration and training phases to collect failure trajectories, while VPO [140] applies optimism by regularizing the reward model to favor higher-value responses. Xiong et al. [111] extended DPO from single-turn to multi-turn tasks, balancing KL-regularized and non-regularized objectives, and COPO [141] incorporates a count-based bonus to encourage novel responses with low visitation counts." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.17, + 0.493, + 0.272 + ], + "angle": 0, + "content": "Finally, a growing body of work aims to merge online and offline techniques. HyPO [142] uses offline preference data for DPO training while regularizing via online data. MPO [47] combines the strengths of DPO and PPO in a two-stage process: it first trains DPO on an easier dataset, then uses this model as a reference for PPO training on more challenging samples." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.273, + 0.493, + 0.388 + ], + "angle": 0, + "content": "(c) Active Learning. Active learning in DPO is a strategic approach that aims to reduce the annotation cost and improve sample efficiency by selectively querying annotators for the most informative preference examples. Unlike offline learning that uses a fixed dataset or online learning that generates new data continuously, active learning intelligently selects which data points should be labeled based on model uncertainty or other informativeness criteria." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.389, + 0.495, + 0.681 + ], + "angle": 0, + "content": "Muldrew et al. [146] introduced APL, an iterative data acquisition and fine-tuning loop in which batches of prompt/completion pairs are strategically selected using acquisition functions: a predictive entropy-based approach to measure model uncertainty for prompts and a preference certainty measure based on the implicit Bradley-Terry model for completion pairs in DPO. Unlike two-step selection processes in APL that separately select uncertain input prompts and corresponding completions, divAPO [147] integrates both stages into a single selection phase. divAPO maximizes the preference model certainty by simultaneously evaluating the informativeness of input prompts and completion pairs, while also considering the data distribution of the input prompts. Ji et al. [148] proposed ADPO, which selectively queries human preferences only for responses where the model exhibits high uncertainty while using pseudo-labels for confident cases. Das et al. [149] also employed active learning on RLHF, which actively selects the context-action pairs that maximize exploration and minimize uncertainty in the reward model." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.692, + 0.262, + 0.707 + ], + "angle": 0, + "content": "3.2.2 Learning Objective" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.709, + 0.493, + 0.797 + ], + "angle": 0, + "content": "In what follows, we present the learning objective in DPO, which determines how the model policy is optimized based on preference data. We first discuss multi-objective learning in DPO, which aims to optimize multiple objectives simultaneously. Then, we introduce self-play learning, which leverages self-generated data for preference alignment." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.798, + 0.493, + 0.913 + ], + "angle": 0, + "content": "(a) Multi-Objective Learning. Multi-objective learning in DPO addresses the challenge of simultaneously optimizing the language model for multiple, potentially competing preference dimensions, such as helpfulness, harmlessness, and truthfulness. This approach aims to find a balanced policy that satisfies multiple human values rather than optimizing for a single objective, which more closely mirrors the complexity of real-world human preferences." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.913, + 0.493, + 0.944 + ], + "angle": 0, + "content": "MODPO [150] achieves the sequential optimization of multiple preference objectives by incorporating language" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.925, + 0.373 + ], + "angle": 0, + "content": "modeling directly into reward modeling, using a margin-based loss to maintain performance on previously optimized dimensions. SPO [151] takes a similar iterative constrained optimization approach, optimizing each preference dimension while preventing the degradation of prior alignments through regularization terms. MOSLIM [152] takes a different approach by introducing a multi-head classification reward model that assigns different preference dimensions to separate classification heads, enabling simultaneous optimization of multiple preferences without requiring multiple reward or policy models. HPO [153] incorporates auxiliary objectives through offline RL, where the model uses a weighted maximum likelihood objective that combines a preference alignment term with an advantage-weighted term for maximizing arbitrary auxiliary rewards like readability and safety. CPO [154] introduces explicit preference tokens during training that specify desired scores for different objectives, transforming the multi-objective optimization into a conditional optimization problem. DRDO [155] simultaneously models rewards and preferences through a combination of reward distillation and a contrastive log-unlikelihood term in its loss function." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.374, + 0.927, + 0.491 + ], + "angle": 0, + "content": "(b) Self-Play Learning. Self-play learning in DPO represents an approach where the language model interacts with itself or its previous iterations to generate its own preference data for training, reducing or eliminating the need for human annotations [139, 164]. This method enables continuous self-improvement by leveraging the model's own judgment capabilities to identify and learn from better responses, creating a form of autonomous preference learning." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.491, + 0.925, + 0.738 + ], + "angle": 0, + "content": "SPIN [156] involves a self-play mechanism where the LLM generates synthetic data from its prior iterations, then fine-tunes itself to distinguish these self-generated responses from those of human-annotated data. The method resembles a two-player game, where the model's current iteration tries to improve its responses to better match the target distribution, while the previous iteration attempts to generate responses as close to human data as possible. SPPO [157] treats LLM alignment as a constant-sum two-player game and iteratively refines itself by competing against its previous iteration. Instead of maintaining two competing policies or a reward model, SPO [158] uses a single policy to sample multiple trajectories and uses the proportion of wins in pairwise comparisons as the reward signal. BoNBoN [159] Alignment likewise relies on sampling responses from a base model, but it selects the best ones among n candidates and fine-tunes itself to approximate that best-of-n distribution." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.739, + 0.925, + 0.943 + ], + "angle": 0, + "content": "Some works approach the alignment problem by leveraging Nash equilibrium [132]. Nash-MD [160] learns a preference model from pairwise human feedback and then computes a Nash equilibrium policy that consistently produces preferred responses. Its self-play approach updates the policy by having it compete against itself (or a slight variant of itself) under the learned preference model, which measures how often one response is preferred to another. DNO [161] extends this concept by implementing a batched on-policy algorithm where the current policy generates multiple outputs that are compared both against each other and against a teacher model's outputs. IPO-MD [162] combines the strengths of IPO and Nash-MD, where the model generates data using a mixture policy between the online and reference" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.921, + 0.043 + ], + "angle": 0, + "content": "11" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.142 + ], + "angle": 0, + "content": "policies, and uses a preference model to annotate pairs of generations, making the optimization equivalent to finding a Nash equilibrium through self-play. SRPO [163] modifies Nash-MD by introducing a self-improvement policy that refines model outputs through iterative revisions, enabling offline optimization without a learned reward function." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.162, + 0.342, + 0.176 + ], + "angle": 0, + "content": "3.3 Constraint Mechanism of DPO" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.181, + 0.492, + 0.284 + ], + "angle": 0, + "content": "The constraint mechanism of DPO derives from its reformulation of RLHF, which includes a KL divergence constraint between the current policy and a reference policy. As shown in Fig. 4, we re-examine the constraint mechanism of DPO from the perspective of the reference model and different divergence constraints. We also explore various DPO variants with different safety constraints." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.298, + 0.25, + 0.312 + ], + "angle": 0, + "content": "3.3.1 Reference Model" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.315, + 0.493, + 0.476 + ], + "angle": 0, + "content": "The reference model in DPO functions as an anchor to ensure policy updates remain within a controlled range, preventing excessive deviation from initial behaviors. Typically, the reference model is initialized using the SFT model that serves as the starting point for preference optimization. The choice of reference model significantly impacts optimization dynamics. A static reference model ensures stable training but may limit adaptability. In the following subsections, we introduce two advanced approaches: reference-free DPO eliminates reliance on the reference model, while dynamic-reference DPO employs an evolving reference model." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.477, + 0.493, + 0.826 + ], + "angle": 0, + "content": "(a) Reference-Free DPO. To reduce the computational and memory costs associated with a reference model, many algorithms have explored training modes that do not require loading the reference model. Xu et al. [116] replaces the reference model with a uniform prior distribution, adding an SFT loss term on preferred data to maintain consistency with the desired behavior. ORPO [117] integrates an odds ratio-based penalty with traditional SFT loss, increasing the probability of preferred responses while decreasing undesirable ones, thereby enabling single-stage training without a separate reference model. SimPO [166] directly uses the average log probability as implicit rewards. This removes the requirement for a separate reference model, significantly improving computational and memory efficiency. SimPER [167] also directly optimizes reverse perplexity for preferred versus rejected responses, creating a preference optimization approach that does not require a separate reference model, thus simplifying training. Despite these advancements, [168] argue that a reference model remains crucial. They compared two reference-free variants using posterior probabilities and likelihood functions as rewards, respectively, and found the original DPO consistently outperformed both. Their results indicate that a strong, well-aligned reference policy can significantly enhance DPO performance." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.826, + 0.493, + 0.945 + ], + "angle": 0, + "content": "(b) Dynamic-Reference DPO. Offline DPO methods often suffer from reward over-optimization, meaning that as the trained model deviates from the reference model, the quality of generated samples tends to degrade. To address this issue, Gorbatovski et al. [165] proposed dynamically updating the reference model using the current model parameters during training, preventing excessive divergence and maintaining high-quality outputs. Curri-DPO [53] and sDPO [55] adopt" + }, + { + "type": "list", + "bbox": [ + 0.073, + 0.477, + 0.493, + 0.945 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.054, + 0.918, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.563, + 0.338, + 0.866, + 0.351 + ], + "angle": 0, + "content": "Fig. 4: An overview of DPO constraint mechanism." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.358, + 0.925, + 0.52 + ], + "angle": 0, + "content": "curriculum learning by sorting data samples from simpler to more complex based on predefined metrics. At each training iteration, the model from the previous step serves as the updated reference model to provide constraints, facilitating progressive learning. Similarly, MPO [47] partitions datasets according to task difficulty, employing a two-stage training procedure. The model trained in the initial stage serves as the reference for the subsequent stage. Additionally, M-DPO [89] compares the performance of a fixed reference model versus a dynamic reference model, finding that the latter yields superior results." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.532, + 0.719, + 0.546 + ], + "angle": 0, + "content": "3.3.2 Divergence Constraint" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.549, + 0.925, + 0.636 + ], + "angle": 0, + "content": "Divergence constraints in DPO play a crucial role in constraining model optimization, balancing alignment performance and model stability. In the following subsections, we introduce two modifications to the divergence constraint: one for enhancing diversity and the other for improving generalization." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.637, + 0.925, + 0.827 + ], + "angle": 0, + "content": "(a) Diversity. Standard DPO typically uses reverse KL divergence equivalent to RLHF. However, the mode-seeking nature of reverse KL divergence reduces the diversity of the generated outputs. To overcome this limitation, f-DPO [169] explores various divergences, including forward KL divergence, reverse KL divergence, Jensen-Shannon divergence, and \\(\\alpha\\)-divergence, to achieve a better trade-off between alignment performance and diversity. Slocum et al. [170] further proposes splitting the KL divergence term into entropy and cross-entropy terms. This decoupling allows independent control of generation diversity and closeness to the reference model, preserving output diversity without degrading overall model quality." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.827, + 0.927, + 0.945 + ], + "angle": 0, + "content": "(b) Generalization. Over-optimization in DPO can negatively impact generalization, causing reduced performance on inputs outside the training distribution. To mitigate this, Huang et al. [178] introduce \\(\\chi^2\\)-divergence as a more aggressive form of regularization compared to KL divergence, alleviating the over-optimization problem. DPO-Kernels [171] employs data-driven methods to select optimal kernel-divergence pairs dynamically, improving task adaptability" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.637, + 0.927, + 0.945 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "12" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.054, + 0.493, + 0.434 + ], + "angle": 0, + "content": "and robustness. FlipGuard [172] introduces a customized reward characterization to monitor model performance. If performance drops relative to earlier versions, FlipGuard constrains the model's updates to ensure alignment with previous stable behavior. FPO [173] leverages the feature-level constraints using Sparse Autoencoders (SAEs) to improve computational efficiency and training stability. SPO [176] integrates a natural preference loss with a KL divergence-based regularization term computed over the entire model output distribution. By adjusting this divergence term, SPO prevents unwanted shifts beyond the preference dataset, ensuring stable alignment. EXO [175] argues that minimizing the forward KL divergence in DPO introduces bias when approximating the optimal policy. They establish a generalized alignment objective and reveal the equivalence between maximizing KL regularization rewards and minimizing the reverse KL divergence relative to the optimal policy. QDPO [177] utilizes divergence between the quantized model and the full-precision model for preference optimization, effectively addressing the token-flipping issue. Token-flipping refers to the phenomenon where quantization errors skew token distributions, leading to incorrect token selection. GPO [174] constructs a framework that unifies different DPO-related algorithms through theoretical derivations, enabling a deeper understanding of the regularization mechanisms in the DPO family of algorithms." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.447, + 0.25, + 0.461 + ], + "angle": 0, + "content": "3.3.3 Safety Constraint" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.466, + 0.493, + 0.86 + ], + "angle": 0, + "content": "Safety constraints in DPO aim to prevent LLMs from generating harmful, biased, or unethical outputs. However, traditional alignment algorithms often fail to address safety concerns. To enhance the safety alignment, recent studies have introduced several specialized mechanisms based on DPO. SafeDPO [179] introduces a streamlined approach for safety alignment by implicitly optimizing safety objectives within a single stage of policy learning. SACPO [180] addresses safety constraints by explicitly formulating language model alignment as a constrained optimization problem, using DPO to optimize the model under safety constraints. Zhang et al. [184] propose creating a backtracking preference dataset that identifies and reverses unsafe outputs, enhancing the safety and robustness of the model. C-DPO [181] integrates dual gradient descent into DPO to balance safety and utility efficiently. This approach achieves a robust trade-off between helpfulness and harmlessness, offering explicit safety guarantees. ADPO [182] introduces adversarial techniques into DPO. It specifically trains models to reduce the probability of unsafe outputs by deliberately generating harmful responses using controlled toxic tokens. Finally, Lee et al. [183] explore the internal mechanisms through which DPO reduces harmful outputs. Their findings suggest that DPO does not remove harmful behaviors learned during pretraining but instead teaches models to bypass or suppress these behaviors. This insight helps explain certain safety vulnerabilities like jailbreaks." + }, + { + "type": "title", + "bbox": [ + 0.073, + 0.879, + 0.287, + 0.894 + ], + "angle": 0, + "content": "3.4 Model Property of DPO" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.899, + 0.492, + 0.945 + ], + "angle": 0, + "content": "DPO has shown great promise in aligning LLMs with human preferences by directly optimizing model outputs based on preference data. During this process, the underlying models" + }, + { + "type": "image", + "bbox": [ + 0.515, + 0.056, + 0.917, + 0.328 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.581, + 0.337, + 0.846, + 0.351 + ], + "angle": 0, + "content": "Fig. 5: An overview of DPO model property." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.357, + 0.924, + 0.447 + ], + "angle": 0, + "content": "exhibit certain properties that are crucial for understanding their behavior and effectiveness. These properties can be broadly categorized into two aspects: the generation property and the optimization property, as shown in Fig. 5. In the following sections, we explore these two properties in more detail, analyzing their implications for model alignment." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.459, + 0.704, + 0.474 + ], + "angle": 0, + "content": "3.4.1 Generation Property" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.476, + 0.924, + 0.579 + ], + "angle": 0, + "content": "The generation property of DPO primarily concerns issues related to distribution shifts and length biases. DPO is sensitive to distribution shifts between the base model outputs and the preference data, which may reduce diversity and generalization. Additionally, DPO has a tendency to favor longer responses, a phenomenon known as morbidity, which can negatively impact performance and user experience." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.579, + 0.924, + 0.854 + ], + "angle": 0, + "content": "(a) Distribution Shift. In RLHF, the reward model is trained on a static set of preference data collected offline. During fine-tuning, the generated responses often differ from this original training data, resulting in a distribution shift. This shift can cause inaccurate reward predictions and lead to over-optimization. The implicit reward model in DPO also suffers from this distribution shift issue. Moreover, Lin et al. [188] have shown that the implicit reward model in DPO performs poorly on Out-Of-Distribution (OOD) data compared to explicit reward models. Experimental results indicate that DPO can transfer probability mass to the highreward response regions covered by the preference data, but it may also cause the distribution of responses generated by the model to deviate significantly from that of the reference policy, resulting in responses that do not meet expectations [189]. To address these problems, many researchers are now exploring online DPO approaches [109, 121, 122, 125], aiming to mitigate OOD by continuously updating preference data during training." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.855, + 0.925, + 0.945 + ], + "angle": 0, + "content": "Existing DPO methods also face significant limitations due to their dependence on specific training tasks. Their optimal solutions lack robustness when applied to OOD tasks. Thus, SRPO [163] reframes alignment as a self-improvement process, which optimizes a self-improvement policy and a generative policy using a min-max objective, ensuring" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.924, + 0.044 + ], + "angle": 0, + "content": "13" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.17 + ], + "angle": 0, + "content": "robustness by making the solution independent of training tasks. Zhang et al. [139] also identify notable issues in DPO when handling OOD tasks. First, DPO tends to overly favor novel content it has not seen during training. Second, it easily gets stuck in local optima, limiting exploration. To address these problems, they propose Self-Exploring Language Models (SELM), incorporating an optimism term to encourage broader exploration of new responses." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.17, + 0.493, + 0.273 + ], + "angle": 0, + "content": "Another significant challenge of DPO is preference drift, where human preferences evolve, changing data distributions over time. Traditional DPO algorithms typically overlook such temporal shifts, mistakenly interpreting them as noise. To address this, NS-DPO [185] propose to assign higher weights to recent data, allowing models to better adjust to evolving preferences." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.273, + 0.493, + 0.405 + ], + "angle": 0, + "content": "(b) Length Bias. Length bias in DPO refers to the tendency of model-generated outputs to become excessively long during training. This issue is similar to the length bias observed in RLHF [197] and is particularly pronounced in DPO. Length bias affects response quality and overall model performance. To mitigate this issue, researchers have developed several solutions, which can be categorized into three main approaches: length regularization, length normalization, and length sampling." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.405, + 0.493, + 0.637 + ], + "angle": 0, + "content": "Length regularization is a common approach to controlling length bias in DPO. By introducing regularization terms into the objective function, the model can constrain response length and reduce morbidity, thereby alleviating the length bias problem. R-DPO [191] introduces a length-based penalty term to the DPO objective function, explicitly discouraging morbidity. \\(\\mathrm{D}^2\\mathrm{PO}\\) [99] introduces a dynamic weighting mechanism by incorporating a temporal decay factor. Unlike previous methods that apply uniform reward contributions across sequences, \\(\\mathrm{D}^2\\mathrm{PO}\\) adjusts the influence of each reward based on its position in the response. Higher weights are assigned to rewards associated with earlier tokens, as they are more critical for model alignment, while later rewards gradually receive lower weights. This adaptive approach prevents overfitting to less relevant tokens, thereby addressing length bias in DPO." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.637, + 0.495, + 0.945 + ], + "angle": 0, + "content": "Length normalization aims to eliminate the loss bias caused by response length differences, allowing the model to evaluate texts of varying lengths more fairly. This approach prevents the model from developing an unreasonable preference for either long or short responses [198]. RRHF [190] and SimPO [166] first propose to apply length normalization to responses, ensuring that the loss remains unaffected by response length. LN-DPO [194] further integrates SimPO-like length normalization into DPO, demonstrating that this approach enhances response quality while mitigating morbidity. LD-DPO [195] achieves length desensitization by reparameterizing the likelihood in DPO. Specifically, it decomposes the likelihood of the longer response in a preference pair into the product of the likelihood of the public-length portion and the likelihood of the excessive portion. It then introduces a hyperparameter to mitigate the morbidity preference. This adjustment smooths the relationship between likelihood and response length, reducing its impact on optimization. For multi-turn dialogue tasks, DMPO [115] introduces length normalization for the number of turns in multi-turn preference optimization." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.925, + 0.346 + ], + "angle": 0, + "content": "An alternative approach to controlling length bias in DPO is through sampling-based methods. SamPO [192] introduces a down-sampling method to compute regularized KL divergences. By balancing token-level probability distributions between preferred and rejected responses, SamPO reduces length bias in DPO training. Yuan et al. [193] propose Length-Instruction Fine-Tuning (LIFT), a method to improve instruction-following models' ability to adhere to length constraints by augmenting existing training data with explicit length instructions and using DPO for training. This enables the model to generalize across prompts requiring different response lengths. For long-context tasks, LongPO [196] enables short-context LLMs to self-evolve for long-context tasks by learning from self-generated short-to-long preference data, which includes paired responses for long-context inputs and their compressed short-context counterparts. LongPO incorporates a short-to-long KL constraint to prevent degradation of short-context performance during long-context alignment, achieving strong performance on both short- and long-context tasks." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.357, + 0.715, + 0.373 + ], + "angle": 0, + "content": "3.4.2 Optimization Property" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.374, + 0.925, + 0.476 + ], + "angle": 0, + "content": "The optimization property of DPO involves likelihood collapse and alignment tax. While DPO aims to increase the likelihood of preferred responses and decrease dispreferred ones, the actual optimization process does not explicitly enforce this balance. Moreover, alignment improvements often come at the cost of the original capabilities of LLMs, known as alignment tax." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.477, + 0.925, + 0.768 + ], + "angle": 0, + "content": "(a) Likelihood Collapse. Likelihood collapse refers to the unintended reduction in the likelihood of both preferred and dispreferred responses during DPO training [92]. This phenomenon can lead to unintentional unalignment, where the model's outputs deviate from human preferences, potentially producing undesirable or harmful responses. This phenomenon is also referred to as likelihood displacement in prior studies [204]. Additionally, the gradients associated with increasing the likelihood of preferred responses and decreasing that of dispreferred responses can become entangled, hindering effective learning. This entanglement complicates the optimization process, making it challenging to achieve the desired alignment [203]. Theoretical analyses have further elucidated the underlying causes of likelihood collapse. In particular, Feng et al. [202] developed an analytical framework grounded in field theory. Their analysis of the gradient vector field of the DPO loss function revealed that the loss function decreases the probability of generating human-disliked data at a faster rate than it increases the probability of generating human-like data." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.768, + 0.925, + 0.943 + ], + "angle": 0, + "content": "Several strategies have been proposed to address likelihood collapse. Pal et al. [200] introduce DPO-Positive (DPOP), which adds a penalty term to maintain a high log-likelihood for preferred examples. Similarly, LLaMA [235] augments DPO training with a negative log-likelihood term to stabilize training and preserve the log-likelihood of chosen responses [109]. Flex-DPO [201] adaptively adjusts parameters to slow the decline in the likelihood of dispreferred responses and balance gradients for both chosen and rejected outputs. D'Oosterlinck et al. [199] propose Anchored Preference Optimization (APO), which provides fine-grained control over probability updates: APO-zero increases the" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "14" + }, + { + "type": "table_caption", + "bbox": [ + 0.245, + 0.048, + 0.751, + 0.062 + ], + "angle": 0, + "content": "TABLE 1: An overview of datasets (upper row) and benchmarks (lower row) for DPO." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.063, + 0.921, + 0.378 + ], + "angle": 0, + "content": "
DatasetTask DescriptionData Size (Training & Test)Data SourceData StructureEvaluation Metric
UltraFeedback [237]Instruction-Following, Helpful64K&-AIList-
SafeRLHF [238]Harmless, Helpful73.9K&8.21KHuman&AIPair-
HelpSteer [239]Helpful35.3K&1.8KHumanPoint-
PRM800K [240]Mathematical Reasoning800K&-HumanPoint-
SHP-2 [241]Q&A From Reddit3600K&241KHumanPair-
Nectar [242]Conversations183K&-AIList-
OpenOrca [243]Conversations2940K&-AISample-
Capybara [244]Multi-Turn Conversations16K&-Human&AISample-
Step-DPO [100]Mathematical Reasoning10.8K&-Human&AIPair-
BeaverTails [245]Harmless, Helpful330K&36KHuman&AIPoint-
IMDb [246]Movie Reviews25K&25KHumanSampleAccuracy
Reddit TL;DR [247]Summarization1330K&-HumanSampleWin Rate
Anthropic-HH [248]Harmless, Helpful161K&8.55KAIPairWin Rate
GSM8K [249]Mathematical Reasoning7.47K&1.32KHumanSampleAccuracy
AlpacaEval2 [250]Automatic Evaluation-&0.8KAISampleWin Rate
MT-Bench [251]Multi-Turn Question-&3.3KHumanPairWin Rate
AdvBench [252]Harmful Behaviors-&0.5KHumanSampleAttack Success
Arena-Hard [253]Updating Evaluation-&0.5KAISampleWin Rate
TruthfulQA [254]Truthful-&0.8KHumanPairAccuracy
IFEval [255]Instruction-Following-&0.5KHumanSampleAccuracy
BBH [256]Multistep Reasoning-&23 TasksHumanSampleAccuracy
MATH [257]Mathematical Reasoning7.5K&5KHumanSampleAccuracy
GPQA [258]Biology, Physics, and Chemistry-&0.45KHumanSampleAccuracy
MUSR [259]Multistep Reasoning-&0.76KAISampleAccuracy
MMLU-Pro [260]Language Understanding-&12KHuman&AISampleAccuracy
" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.39, + 0.492, + 0.435 + ], + "angle": 0, + "content": "probability of winning outputs and decreases that of losing outputs, whereas APO-down decreases both, but with a stronger decline for losing outputs." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.44, + 0.493, + 0.734 + ], + "angle": 0, + "content": "Another notable challenge related to likelihood collapse is likelihood over-optimization, where the performance of a model on a proxy metric (such as its own likelihood estimates) improves, while its true performance does not. Zhang and Ranganath [236] show that reductions in the likelihood loss of DPO do not necessarily translate into higher win rates. Similarly, Shi et al. [205] further investigates the problem of likelihood over-optimization in DPO, demonstrating that higher completion likelihoods do not necessarily correlate with better model performance and may even degrade it. This study identifies key indicators of over-optimization and highlights the need to balance likelihood optimization with output diversity. e-DPO [187] also shows that DPO can lead to degenerate policies due to overfitting, and proposes a solution using reward model distillation to regularize the implicit reward of the language model. The method trains the language model to match the probability distribution induced by a reward model and introduces a pessimistic extension to handle uncertainty in the reward model, thereby improving the robustness of DPO." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.738, + 0.496, + 0.945 + ], + "angle": 0, + "content": "(b) Alignment Tax. Alignment tax refers to the unintended consequence where improving a model's preference alignment degrades its general capabilities acquired during pretraining [206]. Thakkar et al. [207] demonstrate the sensitivity of DPO to training data composition, showing significantly worse performance degradation than SFT when using mixed-preference datasets. Furthermore, Chen et al. [209] identify that DPO struggles with optimizing ranking tasks. While DPO improves ranking accuracy, it disproportionately harms generative capabilities. Pentyala et al. [118] also observes capability forgetting during sequential training, where DPO objectives conflict with previously learned SFT patterns. To address this, researchers propose model merging strategies that balance alignment and performance." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.39, + 0.926, + 0.524 + ], + "angle": 0, + "content": "PAFT [118] separately trains SFT and DPO objectives on a pretrained model using distinct datasets, then merges the parameters through weighted averaging. Additionally, Lu et al. [208] proposes online merging optimizers, which integrate model merging into each optimization step of DPO to balance human preferences and basic capabilities. By merging gradients with parameter differences between SFT and pretrained models, these optimizers effectively enhance alignment while mitigating alignment tax." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.545, + 0.78, + 0.56 + ], + "angle": 0, + "content": "4 BENCHMARKS AND ANALYSIS" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.567, + 0.925, + 0.656 + ], + "angle": 0, + "content": "In this section, we provide a comprehensive overview of existing benchmarks and evaluation for DPO methods. We first introduce the key datasets and benchmarks used to train or evaluate DPO models. We then present a comparative analysis of the performance of different DPO methods on these benchmarks, highlighting their strengths and limitations." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.675, + 0.742, + 0.69 + ], + "angle": 0, + "content": "4.1 Datasets and Benchmarks" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.695, + 0.925, + 0.945 + ], + "angle": 0, + "content": "A diverse range of datasets and benchmarks has been specifically curated to facilitate research in DPO. Table 1 summarizes these datasets and benchmarks, highlighting their task descriptions, dataset sizes, data sources, data structures, and evaluation metrics. These datasets and benchmarks span a broad range of tasks, such as harmlessness and helpfulness evaluation and mathematical reasoning. They also exhibit significant diversity in scale, ranging from smaller, specialized datasets to large-scale collections such as SHP-2, which contains over 3.6 million samples. Additionally, datasets differ in their sources: some rely purely on human annotations, others on AI-generated content, and many adopt a hybrid approach combining human and AI-generated data. The data structures employed across these datasets include single-sample without preference label, point-wise annotations, pair-wise comparisons, and list-wise comparisons. Common evaluation metrics include accuracy" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "15" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.048, + 0.884, + 0.062 + ], + "angle": 0, + "content": "TABLE 2: Experimental results of different DPO variants on Open LLM Leaderboard. The underline indicates the best performance." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.063, + 0.921, + 0.325 + ], + "angle": 0, + "content": "
ModelMistral-7B-BaseLLaMA-3-8B-Base
IFEvalBBHMATHGPQAMUSRMMLU-ProAVERAGEIFEvalBBHMATHGPQAMUSRMMLU-ProAVERAGE
SFT3.441.19.228.842.027.725.429.046.315.328.641.331.031.9
RRHF [190]10.040.61.726.446.326.125.231.046.813.931.436.830.531.7
SLiC-HF [230]11.044.09.929.242.628.127.541.749.517.530.439.731.735.1
DPO [74]11.143.77.128.543.826.726.834.348.217.231.940.131.533.9
IPO [75]9.442.89.729.739.727.826.535.349.015.932.841.431.934.4
CPO [116]8.042.79.628.942.127.326.432.446.916.830.639.131.832.9
KTO [67]12.943.712.028.946.128.328.640.248.318.031.040.131.134.8
ORPO [117]28.446.413.530.241.429.531.640.049.116.830.738.432.034.5
R-DPO [191]10.043.07.628.739.327.226.036.448.817.231.640.631.534.4
SimPO [166]11.143.18.428.939.527.226.440.848.615.831.040.531.834.7
ModelMistral-7B-InstructLLaMA-3-8B-Instruct
IFEvalBBHMATHGPQAMUSRMMLU-ProAVERAGEIFEvalBBHMATHGPQAMUSRMMLU-ProAVERAGE
SFT48.446.210.929.147.627.134.950.749.326.931.037.935.738.6
RRHF [190]45.245.310.128.544.226.233.351.349.327.229.639.535.338.7
SLiC-HF [230]39.446.211.428.749.026.833.641.650.926.331.339.235.337.4
DPO [74]49.045.611.026.946.126.834.248.950.125.829.438.736.038.2
IPO [75]42.645.311.827.849.327.234.050.449.526.329.637.935.738.2
CPO [116]38.846.010.128.548.426.933.150.649.126.831.338.135.838.6
KTO [67]46.245.710.927.846.027.334.043.150.126.331.238.135.037.3
ORPO [117]37.645.111.228.246.926.532.643.050.626.929.339.135.137.3
R-DPO [191]46.845.99.928.746.227.634.250.950.325.329.839.035.738.5
SimPO [166]45.445.910.428.345.027.133.748.849.225.029.339.235.137.8
" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.338, + 0.493, + 0.399 + ], + "angle": 0, + "content": "(for tasks like mathematical reasoning found in GSM8K and MATH), win rates derived from pairwise comparisons (such as MT-Bench and Anthropic-HH), and attack success rates used for assessing adversarial robustness (AdvBench)." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.414, + 0.173, + 0.428 + ], + "angle": 0, + "content": "4.2 Results" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.432, + 0.493, + 0.65 + ], + "angle": 0, + "content": "To demonstrate the effectiveness of different DPO variants, we conduct experiments on the Open LLM Leaderboard. We compare different DPO variants using Mistral-7B-Base, Mistral-7B-Instruct [261], LLaMA-3-8B-Base, and LLaMA-3-8B-Instruct [235] as starting points. The overall experimental setup follows Meng et al. [166], ensuring a reproducible evaluation of different DPO variants. For Mistral-7B-Base and LLaMA-3-8B-Base, the SFT models are trained based on the UltraChat-200k dataset [262], and subsequently applied different DPO variants on the SFT models using the UltraFeedback dataset [237]. For Mistral-7B-Instruct and LLaMA-3-8B-Instruct, which have already undergone instruction-tuning, the preference dataset is regenerated by collecting responses from the SFT models using prompts from the UltraFeedback dataset [237]." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.651, + 0.495, + 0.943 + ], + "angle": 0, + "content": "The experimental results, as summarized in Table 2, highlight the performance of different DPO variants across various benchmarks. For the Mistral-7B-Base and LLaMA-3-8B-Base models, ORPO consistently achieves the highest average scores, indicating its effectiveness in aligning models with human preferences. Notably, ORPO outperforms other methods on IFEval, BBH, and MATH, demonstrating its superiority in instruction-following and mathematical reasoning tasks. Meanwhile, SLiC-HF and KTO also achieve competitive results, particularly in BBH and GPQA, suggesting that these methods effectively leverage preference data for enhanced performance. For the Mistral-7B-Instruct and LLaMA-3-8B-Instruct models, the improvements across different DPO variants are more nuanced. While DPO and R-DPO show strong performance in IFEval and MMLU-Pro, IPO and CPO demonstrate robustness in handling complex reasoning tasks like MATH and GPQA. Overall, the results indicate that different DPO variants exhibit varying strengths across benchmarks, with some methods excelling in base models while others are more effective for instruct models." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.337, + 0.66, + 0.352 + ], + "angle": 0, + "content": "5 APPLICATIONS" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.356, + 0.927, + 0.446 + ], + "angle": 0, + "content": "In this section, we discuss the applications of DPO in various domains, including different LLM-based applications, diffusion models, and multi-modal LLMs. We provide an overview of the key challenges and opportunities in each domain and highlight the potential impact of DPO on real-world applications." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.463, + 0.729, + 0.479 + ], + "angle": 0, + "content": "5.1 LLM-based Applications" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.481, + 0.925, + 0.716 + ], + "angle": 0, + "content": "DPO has emerged as a powerful paradigm for aligning LLMs with human preferences across diverse applications [116, 235, 263, 264]. In code generation, DPO enhances control over code quality by optimizing based on preferences from automated tests [265, 266, 267]. In mathematical reasoning, DPO reduces errors in complex problem-solving by emphasizing step-level preference optimization [100, 101, 129, 268]. Multilingual applications leverage DPO to synchronize cross-lingual preferences, thereby improving translation accuracy and cultural relevance [107, 269]. Recommendation systems utilize DPO to refine personalization by incorporating user preference data to optimize item rankings, thereby enhancing the model ability to distinguish preferred items from less preferred ones [270, 271]. These examples highlight the adaptability of DPO in achieving human-aligned outputs across diverse tasks." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.734, + 0.673, + 0.748 + ], + "angle": 0, + "content": "5.2 Diffusion Models" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.752, + 0.925, + 0.945 + ], + "angle": 0, + "content": "In the realm of diffusion models, DPO has been adapted to better align generated content with user expectations [272, 273, 274, 275]. By optimizing preferences over image-text pairs, DPO enhances the semantic accuracy of generated images and mitigates the production of undesirable or biased content. Studies have demonstrated that diffusion models fine-tuned with DPO respond more accurately to complex prompts compared to those trained with traditional techniques. Moreover, the efficiency of DPO allows for the fine-tuning of large-scale models using limited preference data, addressing significant computational challenges in training diffusion models [276, 277, 278]. While scaling DPO for high-resolution and dynamic content generation remains" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.924, + 0.044 + ], + "angle": 0, + "content": "16" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.085 + ], + "angle": 0, + "content": "challenging, its ability to simplify reward modeling makes it a promising method for controlled content creation [279]." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.103, + 0.248, + 0.117 + ], + "angle": 0, + "content": "5.3 Multi-Modal LLMs" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.122, + 0.493, + 0.284 + ], + "angle": 0, + "content": "For multi-modal LLMs, DPO plays a crucial role in aligning preferences across different data types, thereby improving coherence in tasks such as visual question answering and image captioning [89, 280, 281, 282, 283]. By optimizing alignment between textual responses and visual inputs, DPO reduces hallucinations in multi-modal interactions, ensuring outputs remain faithful to the given context. Although reconciling different types of feedback can be challenging, DPO offers a practical framework for lightweight adaptation, making it well-suited to preference-intensive multi-modal applications [280, 284, 285]." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.305, + 0.434, + 0.32 + ], + "angle": 0, + "content": "6 CHALLENGES AND FUTURE DIRECTIONS" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.325, + 0.492, + 0.414 + ], + "angle": 0, + "content": "In this section, we discuss the key challenges and future directions in DPO research. We identify several critical issues that need to be addressed to further advance the field. Moreover, we propose several promising research directions that can help overcome these challenges and accelerate the adoption of DPO in the future." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.433, + 0.362, + 0.448 + ], + "angle": 0, + "content": "6.1 Efficient Preference Optimization" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.451, + 0.495, + 0.716 + ], + "angle": 0, + "content": "Efficient preference optimization remains a pivotal challenge, as current DPO methods hinge on the availability of high-quality preference data, yet the manual collection of human annotations is both time-consuming and labor-intensive while automatically model-generated datasets often suffer from issues such as limited diversity, inherent biases, and insufficient fidelity to human judgment [121, 122, 128, 129]. Moreover, even though DPO circumvents the intricacies of reward model engineering common in RL, it does not fully leverage the exploratory strengths that RL methods offer, as evidenced by recent advances in reasoning approaches where RL-based training has achieved notable successes [18, 19]. This opens up an avenue for future research to not only enhance data efficiency through advanced learning techniques but also to integrate novel exploration mechanisms [138, 141], potentially through hybrid models that amalgamate the direct preference optimization benefits of DPO with the robust exploratory capabilities characteristic of RL." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.734, + 0.387, + 0.749 + ], + "angle": 0, + "content": "6.2 Multi-Modal Preference Optimization" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.753, + 0.496, + 0.945 + ], + "angle": 0, + "content": "Multi-Modal Preference Optimization presents another frontier, given that existing DPO frameworks have primarily targeted text-based modalities while many real-world applications demand the alignment of diverse human preferences across text, images, audio, and even video [280, 284, 285, 286, 287]. In scenarios where cross-modal cues might conflict, such as the need for concise text paired with richly detailed imagery, the challenge lies in constructing a unified preference representation space that can intelligently and automatically recalibrate the priority of different modalities based on the contextual demands of the task at hand [89, 282, 283]. Future directions in this area could involve the development of innovative multi-modal preference encoding architectures," + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.925, + 0.114 + ], + "angle": 0, + "content": "which are capable of disentangling compound preferences into modality-specific and cross-modal components that align conflicting preferences while also adapting dynamically to changing inputs." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.137, + 0.817, + 0.152 + ], + "angle": 0, + "content": "6.3 Continuous Preference Optimization" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.159, + 0.925, + 0.393 + ], + "angle": 0, + "content": "Continuous preference optimization addresses the dynamic nature of human preferences that evolve over time or vary with different phases of a task, a factor that static DPO models often fail to capture [123, 135, 137, 185]. As social norms and individual preferences shift, there is an increasing need for systems that can continuously recalibrate their alignment strategies in real time while simultaneously mitigating the risk of catastrophic forgetting. Future research in this domain may focus on meta-learning approaches that enable models to learn not only from the current state of preferences but also how to efficiently adapt when these preferences change. By integrating online learning frameworks with mechanisms for detecting temporal shifts and contextual variability in user behavior, researchers can pave the way toward systems that remain consistently relevant and effective in the face of evolving societal and individual expectations." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.417, + 0.825, + 0.433 + ], + "angle": 0, + "content": "6.4 Interpretable Preference Optimization" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.438, + 0.927, + 0.688 + ], + "angle": 0, + "content": "Interpretable preference optimization is critical for building trust in models that implicitly align human values, as the opaque nature of current DPO complicates the ability to audit and control the alignment process. In practice, human preferences are multi-dimensional [150, 151, 154], encompassing aspects such as factual accuracy, fairness, creativity, and beyond, and there is a pressing need to decompose these complex preferences into interpretable components that can be individually examined and fine-tuned. Future research could leverage advances in explainable techniques to develop models that not only achieve fine-grained alignment across diverse values but also provide transparent insights into how different preference dimensions interact to shape final decisions. This level of interpretability would allow stakeholders to balance competing values more effectively, ensuring that the alignment process remains both accountable and adaptable as societal norms continue to evolve." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.714, + 0.649, + 0.729 + ], + "angle": 0, + "content": "7 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.738, + 0.925, + 0.945 + ], + "angle": 0, + "content": "In recent years, DPO has emerged as a promising paradigm for aligning LLMs with human preferences by directly optimizing model policies using preference data. Despite its potential, the DPO research landscape remains fragmented, with a lack of systematic organization and comparative analysis. In this survey, we present a comprehensive overview of DPO and introduce a novel taxonomy that categorizes existing works into four key dimensions: data strategy, learning framework, constraint mechanism, and model property. We have also discussed the key benchmarks, evaluation results, and applications of DPO, highlighting the challenges and future directions in this field. By providing a systematic analysis of the existing DPO methods, we aim to facilitate further research and development in this area." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "17" + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.053, + 0.188, + 0.067 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.077, + 0.492, + 0.099 + ], + "angle": 0, + "content": "[1] Wayne Xin Zhao et al. A survey of large language models. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.101, + 0.492, + 0.123 + ], + "angle": 0, + "content": "[2] Humza Naveed et al. A comprehensive overview of large language models. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.124, + 0.492, + 0.146 + ], + "angle": 0, + "content": "[3] Yupeng Chang et al. A survey on evaluation of large language models. TIIS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.146, + 0.492, + 0.168 + ], + "angle": 0, + "content": "[4] Shervin Minaee et al. Large language models: A survey. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.169, + 0.492, + 0.191 + ], + "angle": 0, + "content": "[5] Shukang Yin et al. A survey on multimodal large language models. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.192, + 0.492, + 0.214 + ], + "angle": 0, + "content": "[6] Duzhen Zhang et al. Mm-llms: Recent advances in multimodal large language models. ACL, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.215, + 0.492, + 0.237 + ], + "angle": 0, + "content": "[7] Jingyi Zhang et al. Vision-language models for vision tasks: A survey. TPAMI, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.237, + 0.492, + 0.271 + ], + "angle": 0, + "content": "[8] Zhehui Wang et al. Enabling energy-efficient deployment of large language models on memristor crossbar: A synergy of large and small. TPAMI, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.272, + 0.492, + 0.294 + ], + "angle": 0, + "content": "[9] Hongru Wang et al. A survey of the evolution of language model-based dialogue systems. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.295, + 0.492, + 0.316 + ], + "angle": 0, + "content": "[10] Zihao Yi et al. A survey on recent advances in llm-based multi-turn dialogue systems. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.317, + 0.492, + 0.35 + ], + "angle": 0, + "content": "[11] Jiawei Liu et al. Is your code generated by chatgpt really correct? rigorous evaluation of large language models for code generation. NeurIPS, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.351, + 0.492, + 0.373 + ], + "angle": 0, + "content": "[12] Daya Guo et al. Deepseek-coder: When the large language model meets programming-the rise of code intelligence. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.374, + 0.492, + 0.395 + ], + "angle": 0, + "content": "[13] Xue Jiang et al. Self-planning code generation with large language models. TOSEM, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.396, + 0.492, + 0.43 + ], + "angle": 0, + "content": "[14] Dave Van Veen et al. Adapted large language models can outperform medical experts in clinical text summarization. Nature Medicine, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.431, + 0.492, + 0.464 + ], + "angle": 0, + "content": "[15] Jesutofunmi A Omiye et al. Large language models in medicine: the potentials and pitfalls: a narrative review. Annals of Internal Medicine, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.465, + 0.492, + 0.487 + ], + "angle": 0, + "content": "[16] Karan Singhal et al. Toward expert-level medical question answering with large language models. Nature Medicine, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.488, + 0.492, + 0.51 + ], + "angle": 0, + "content": "[17] Fenglin Liu et al. Aligning, autoencoding and prompting large language models for novel disease reporting. TPAMI, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.511, + 0.43, + 0.521 + ], + "angle": 0, + "content": "[18] Aaron Jaech et al. Openai o1 system card. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.522, + 0.492, + 0.544 + ], + "angle": 0, + "content": "[19] Daya Guo et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.545, + 0.492, + 0.567 + ], + "angle": 0, + "content": "[20] Julia Hirschberg and Christopher D Manning. Advances in natural language processing. Science, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.568, + 0.492, + 0.6 + ], + "angle": 0, + "content": "[21] Xiaowei Huang et al. A survey of safety and trustworthiness of large language models through the lens of verification and validation. Artificial Intelligence Review, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.601, + 0.492, + 0.624 + ], + "angle": 0, + "content": "[22] Yue Zhang et al. Siren's song in the ai ocean: a survey on hallucination in large language models. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.625, + 0.492, + 0.646 + ], + "angle": 0, + "content": "[23] Isabel O Gallegos et al. Bias and fairness in large language models: A survey. Computational Linguistics, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.647, + 0.492, + 0.668 + ], + "angle": 0, + "content": "[24] Yufei Wang et al. Aligning large language models with human: A survey. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.669, + 0.492, + 0.692 + ], + "angle": 0, + "content": "[25] Yang Liu et al. Trustworthy llms: A survey and guideline for evaluating large language models' alignment. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.692, + 0.492, + 0.714 + ], + "angle": 0, + "content": "[26] Tianhao Shen et al. Large language model alignment: A survey. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.715, + 0.492, + 0.748 + ], + "angle": 0, + "content": "[27] Hannah Rose Kirk et al. The benefits, risks and bounds of personalizing the alignment of large language models to individuals. Nature Machine Intelligence, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.749, + 0.492, + 0.771 + ], + "angle": 0, + "content": "[28] Usman Anwar et al. Foundational challenges in assuring alignment and safety of large language models. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.772, + 0.492, + 0.794 + ], + "angle": 0, + "content": "[29] Bofei Gao et al. Towards a unified view of preference learning for large language models: A survey. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.795, + 0.492, + 0.816 + ], + "angle": 0, + "content": "[30] Ruili Jiang et al. A survey on human preference learning for large language models. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.817, + 0.492, + 0.84 + ], + "angle": 0, + "content": "[31] Zhichao Wang et al. A comprehensive survey of llm alignment techniques: Rlhf, rlaif, ppo, dpo and more. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.841, + 0.492, + 0.862 + ], + "angle": 0, + "content": "[32] Genta Indra Winata et al. Preference tuning with human feedback on language, speech, and vision tasks: A survey. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.863, + 0.492, + 0.884 + ], + "angle": 0, + "content": "[33] Yue Huang et al. Position: TrustLLM: Trustworthiness in large language models. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.885, + 0.492, + 0.907 + ], + "angle": 0, + "content": "[34] Paul F Christiano et al. Deep reinforcement learning from human preferences. NeurIPS, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.908, + 0.492, + 0.93 + ], + "angle": 0, + "content": "[35] Long Ouyang et al. Training language models to follow instructions with human feedback. NeurIPS, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.931, + 0.492, + 0.942 + ], + "angle": 0, + "content": "[36] Nisan Stiennon et al. Learning to summarize with human" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.077, + 0.492, + 0.942 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.544, + 0.056, + 0.69, + 0.067 + ], + "angle": 0, + "content": "feedback. NeurIPS, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.068, + 0.858, + 0.079 + ], + "angle": 0, + "content": "[37] Josh Achiam et al. Gpt-4 technical report. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.08, + 0.922, + 0.101 + ], + "angle": 0, + "content": "[38] Yuntao Bai et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.102, + 0.922, + 0.113 + ], + "angle": 0, + "content": "[39] Anthropic. The claude 3 model family: Opus, sonnet, haiku, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.114, + 0.922, + 0.136 + ], + "angle": 0, + "content": "[40] Yuchun Miao et al. Inform: Mitigating reward hacking in rlhf via information-theoretic reward modeling. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.137, + 0.922, + 0.158 + ], + "angle": 0, + "content": "[41] Stephen Casper et al. Open problems and fundamental limitations of reinforcement learning from human feedback. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.159, + 0.922, + 0.181 + ], + "angle": 0, + "content": "[42] Keertana Chidambaram et al. Direct preference optimization with unobserved preference heterogeneity. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.182, + 0.922, + 0.204 + ], + "angle": 0, + "content": "[43] Haoxian Chen et al. Mallowspo: Fine-tune your llm with preference dispersions. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.205, + 0.922, + 0.226 + ], + "angle": 0, + "content": "[44] Shyam Sundhar Ramesh et al. Group robust preference optimization in reward-free rlhf. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.227, + 0.922, + 0.249 + ], + "angle": 0, + "content": "[45] Binwei Yao et al. No preference left behind: Group distributional preference optimization. *ICLR*, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.25, + 0.922, + 0.272 + ], + "angle": 0, + "content": "[46] Afra Amini et al. Direct preference optimization with an offset. ACL Findings, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.273, + 0.922, + 0.305 + ], + "angle": 0, + "content": "[47] Qi Gou and Cam-Tu Nguyen. Mixed preference optimization: Reinforcement learning with data selection and better reference model. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.306, + 0.922, + 0.328 + ], + "angle": 0, + "content": "[48] Shiqi Wang et al. Reward difference optimization for sample reweighting in offline RLHF. EMNLP Findings, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.33, + 0.922, + 0.351 + ], + "angle": 0, + "content": "[49] Junkang Wu et al. \\(\\alpha\\)-dpo: Adaptive reward margin is what direct preference optimization needs. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.352, + 0.922, + 0.374 + ], + "angle": 0, + "content": "[50] Hiroki Furuta et al. Geometric-averaged preference optimization for soft preference labels. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.375, + 0.922, + 0.396 + ], + "angle": 0, + "content": "[51] Junkang Wu et al. Beta-dpo: Direct preference optimization with dynamic beta. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.397, + 0.922, + 0.419 + ], + "angle": 0, + "content": "[52] Tetsuro Morimura et al. Filtered direct preference optimization, EMNLP, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.42, + 0.922, + 0.442 + ], + "angle": 0, + "content": "[53] Pulkit Pattnaik et al. Enhancing alignment using curriculum learning & ranked preferences. EMNLP, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.443, + 0.922, + 0.465 + ], + "angle": 0, + "content": "[54] Ilgee Hong et al. Adaptive preference scaling for reinforcement learning with human feedback. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.466, + 0.922, + 0.487 + ], + "angle": 0, + "content": "[55] Dahiyun Kim et al. Sdpo: Don't use your data all at once. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.488, + 0.922, + 0.51 + ], + "angle": 0, + "content": "[56] Runsheng Yu et al. Direct alignment of language models via quality-aware self-refinement. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.511, + 0.922, + 0.533 + ], + "angle": 0, + "content": "[57] Lou Jieming et al. Gap-aware preference optimization: Enhancing model alignment with perception margin. OpenReview, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.534, + 0.922, + 0.555 + ], + "angle": 0, + "content": "[58] Jingyuan Ma et al. Plug-and-play training framework for preference optimization. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.556, + 0.922, + 0.579 + ], + "angle": 0, + "content": "[59] Sayak Ray Chowdhury et al. Provably robust DPO: Aligning language models with noisy feedback. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.58, + 0.922, + 0.601 + ], + "angle": 0, + "content": "[60] Keyi Kong et al. Perplexity-aware correction for robust alignment with noisy preferences. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.602, + 0.922, + 0.624 + ], + "angle": 0, + "content": "[61] Xize Liang et al. Ropo: Robust preference optimization for large language models. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.625, + 0.922, + 0.647 + ], + "angle": 0, + "content": "[62] Dongyoung Kim et al. Spread preference annotation: Direct preference judgment for efficient LLM alignment. ICLR, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.648, + 0.922, + 0.669 + ], + "angle": 0, + "content": "[63] Lingfan Zhang et al. Combating inherent noise for direct preference optimization. OpenReview, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.67, + 0.922, + 0.692 + ], + "angle": 0, + "content": "[64] Shawn Im and Yixuan Li. Understanding generalization of preference optimization under noisy feedback. OpenReview, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.693, + 0.922, + 0.715 + ], + "angle": 0, + "content": "[65] Yang Gao et al. Impact of preference noise on the alignment performance of generative language models. COLM, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.716, + 0.922, + 0.748 + ], + "angle": 0, + "content": "[66] Junkang Wu et al. Towards robust alignment of language models: Distributionally robustifying direct preference optimization. ICLR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.749, + 0.922, + 0.771 + ], + "angle": 0, + "content": "[67] Kawin Ethayarajh et al. Model alignment as prospect theoretic optimization. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.772, + 0.922, + 0.794 + ], + "angle": 0, + "content": "[68] Seungjae Jung et al. Binary classifier optimization for large language model alignment. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.795, + 0.922, + 0.817 + ], + "angle": 0, + "content": "[69] Teng Xiao et al. Cal-dpo: Calibrated direct preference optimization for language model alignment. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.818, + 0.922, + 0.84 + ], + "angle": 0, + "content": "[70] Igor Melnyk et al. Distributional preference alignment of llms via optimal transport. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.841, + 0.922, + 0.863 + ], + "angle": 0, + "content": "[71] Tianchi Cai et al. Ulma: Unified language model alignment with human demonstration and point-wise preference. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.864, + 0.922, + 0.885 + ], + "angle": 0, + "content": "[72] Huayu Chen et al. Noise contrastive alignment of language models with explicit rewards. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.886, + 0.922, + 0.908 + ], + "angle": 0, + "content": "[73] Yifan Zhang et al. General preference modeling with preference representations for aligning language models. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.909, + 0.922, + 0.931 + ], + "angle": 0, + "content": "[74] Rafael Rafailov et al. Direct preference optimization: Your language model is secretly a reward model. NeurIPS, 2023." + }, + { + "type": "list", + "bbox": [ + 0.509, + 0.056, + 0.922, + 0.931 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "18" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.055, + 0.492, + 0.08 + ], + "angle": 0, + "content": "[75] Mohammad Gheshlaghi Azar et al. A general theoretical paradigm to understand learning from human preferences. AISTATS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.079, + 0.491, + 0.102 + ], + "angle": 0, + "content": "[76] Jinghong Chen et al. On extending direct preference optimization to accommodate ties. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.101, + 0.492, + 0.124 + ], + "angle": 0, + "content": "[77] Yuxin Jiang et al. Bridging and modeling correlations in pairwise data for direct preference optimization. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.124, + 0.492, + 0.158 + ], + "angle": 0, + "content": "[78] Xinghua Zhang et al. Iopo: Empowering llms with complex instruction following via input-output preference optimization. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.158, + 0.492, + 0.181 + ], + "angle": 0, + "content": "[79] Abbas Abdelmaleki et al. Preference optimization as probabilistic inference. ICLR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.181, + 0.492, + 0.204 + ], + "angle": 0, + "content": "[80] Yueqin Yin et al. Self-augmented preference optimization: Off-policy paradigms for language model alignment. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.204, + 0.492, + 0.238 + ], + "angle": 0, + "content": "[81] Shitong Duan et al. Negating negatives: Alignment with human negative samples via distributional dispreference optimization. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.238, + 0.492, + 0.261 + ], + "angle": 0, + "content": "[82] Ruiqi Zhang et al. Negative preference optimization: From catastrophic collapse to effective unlearning. COLM, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.261, + 0.492, + 0.284 + ], + "angle": 0, + "content": "[83] Chongyu Fan et al. Simplicity prevails: Rethinking negative preference optimization for lmm unlearning. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.284, + 0.492, + 0.306 + ], + "angle": 0, + "content": "[84] Yifan Zhong et al. Panacea: Pareto alignment via preference adaptation for llms. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.306, + 0.492, + 0.329 + ], + "angle": 0, + "content": "[85] Tianqi Liu et al. Lipo: Listwise preference optimization through learning-to-rank, 2024. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.329, + 0.492, + 0.352 + ], + "angle": 0, + "content": "[86] Mingye Zhu et al. LIRE: listwise reward enhancement for preference alignment. ACL, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.352, + 0.492, + 0.375 + ], + "angle": 0, + "content": "[87] Yang Zhao et al. Ordinal preference optimization: Aligning human preferences via ndcg. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.374, + 0.492, + 0.398 + ], + "angle": 0, + "content": "[88] Jiacong Zhou et al. Optimizing preference alignment with differentiable ndcg ranking. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.398, + 0.492, + 0.42 + ], + "angle": 0, + "content": "[89] Fei Wang et al. mDPO: Conditional preference optimization for multimodal large language models. EMNLP, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.42, + 0.492, + 0.454 + ], + "angle": 0, + "content": "[90] Yueqin Yin et al. Relative preference optimization: Enhancing llm alignment through contrasting responses across identical and diverse prompts. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.454, + 0.492, + 0.477 + ], + "angle": 0, + "content": "[91] Yuxiang Guo et al. Todo: Enhancing llm alignment with ternary preferences. ICLR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.477, + 0.492, + 0.499 + ], + "angle": 0, + "content": "[92] Rafael Rafailov et al. From r to \\(q^*\\): Your language model is secretly a q-function. COLM, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.499, + 0.492, + 0.522 + ], + "angle": 0, + "content": "[93] Yongcheng Zeng et al. Token-level direct preference optimization. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.522, + 0.492, + 0.545 + ], + "angle": 0, + "content": "[94] Aiwei Liu et al. Tis-dpo: Token-level importance sampling for direct preference optimization with estimated weights. ICLR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.545, + 0.492, + 0.568 + ], + "angle": 0, + "content": "[95] Fenia Christopoulou et al. Sparsepo: Controlling preference alignment of llms via sparse token masks. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.568, + 0.492, + 0.59 + ], + "angle": 0, + "content": "[96] Han Zhong et al. Dpo meets ppo: Reinforced token optimization for rlhf. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.59, + 0.492, + 0.613 + ], + "angle": 0, + "content": "[97] Kailai Yang et al. Selective preference optimization via token-level reward function estimation. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.613, + 0.492, + 0.636 + ], + "angle": 0, + "content": "[98] Qi Zhao et al. EPO: hierarchical LLM agents with environment preference optimization. EMNLP, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.636, + 0.492, + 0.669 + ], + "angle": 0, + "content": "[99] Ruichen Shao et al. Earlier tokens contribute more: Learning direct preference optimization from temporal decay perspective. *ICLR*, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.669, + 0.492, + 0.693 + ], + "angle": 0, + "content": "[100] Xin Lai et al. Step-dpo: Step-wise preference optimization for long-chain reasoning of llms. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.693, + 0.492, + 0.716 + ], + "angle": 0, + "content": "[101] Zimu Lu et al. Step-controlled dpo: Leveraging stepwise error for enhanced mathematical reasoning. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.716, + 0.492, + 0.739 + ], + "angle": 0, + "content": "[102] Xuan Zhang et al. Chain of preference optimization: Improving chain-of-thought reasoning in llms. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.739, + 0.492, + 0.761 + ], + "angle": 0, + "content": "[103] Yuxi Xie et al. Monte carlo tree search boosts reasoning via iterative preference learning. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.761, + 0.492, + 0.784 + ], + "angle": 0, + "content": "[104] Weibin Liao et al. Tpo: Aligning large language models with multi-branch & multi-step preference trees. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.784, + 0.492, + 0.806 + ], + "angle": 0, + "content": "[105] Hoang Anh Just et al. Data-centric human preference optimization with rationales. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.806, + 0.492, + 0.84 + ], + "angle": 0, + "content": "[106] Jiacai Liu et al. Improving multi-step reasoning abilities of large language models with direct advantage policy optimization. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.84, + 0.492, + 0.874 + ], + "angle": 0, + "content": "[107] Shuaijie She et al. MAPO: advancing multilingual reasoning through multilingual-alignment-as-preference optimization. ACL, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.874, + 0.492, + 0.898 + ], + "angle": 0, + "content": "[108] Lifan Yuan et al. Advancing llm reasoning generalists with preference trees. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.897, + 0.492, + 0.92 + ], + "angle": 0, + "content": "[109] Richard Yuanzhe Pang et al. Iterative reasoning preference optimization. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.92, + 0.492, + 0.933 + ], + "angle": 0, + "content": "[110] Chao-Wei Huang and Yun-Nung Chen. Factalign: Long-form" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.055, + 0.492, + 0.933 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.543, + 0.055, + 0.888, + 0.068 + ], + "angle": 0, + "content": "factuality alignment of large language models. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.068, + 0.923, + 0.091 + ], + "angle": 0, + "content": "[111] Wei Xiong et al. Building math agents with multi-turn iterative preference learning. *ICLR*, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.091, + 0.923, + 0.113 + ], + "angle": 0, + "content": "[112] Yifan Song et al. Trial and error: Exploration-based trajectory optimization for lIm agents. ACL, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.113, + 0.923, + 0.136 + ], + "angle": 0, + "content": "[113] Aobo Kong et al. Sdpo: Segment-level direct preference optimization for social agents. arXiv, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.136, + 0.923, + 0.158 + ], + "angle": 0, + "content": "[114] Pranav Putta et al. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.158, + 0.923, + 0.181 + ], + "angle": 0, + "content": "[115] Wentao Shi et al. Direct multi-turn preference optimization for language agents. EMNLP, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.181, + 0.923, + 0.214 + ], + "angle": 0, + "content": "[116] Haoran Xu et al. Contrastive preference optimization: Pushing the boundaries of LLM performance in machine translation. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.214, + 0.923, + 0.238 + ], + "angle": 0, + "content": "[117] Jiwoo Hong et al. ORPO: Monolithic preference optimization without reference model. EMNLP, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.238, + 0.923, + 0.261 + ], + "angle": 0, + "content": "[118] Shiva Kumar Pentyala et al. Paft: A parallel training paradigm for effective llm fine-tuning. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.261, + 0.923, + 0.295 + ], + "angle": 0, + "content": "[119] Songyang Gao et al. Linear alignment: A closed-form solution for aligning human preferences without tuning and feedback. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.295, + 0.923, + 0.328 + ], + "angle": 0, + "content": "[120] Feifan Song et al. Icdpo: Effectively borrowing alignment capability of others via in-context direct preference optimization. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.328, + 0.923, + 0.352 + ], + "angle": 0, + "content": "[121] Shangmin Guo et al. Direct language model alignment from online ai feedback. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.352, + 0.923, + 0.375 + ], + "angle": 0, + "content": "[122] Biqing Qi et al. Online dpo: Online direct preference optimization with fast-slow chasing. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.374, + 0.923, + 0.387 + ], + "angle": 0, + "content": "[123] Weizhe Yuan et al. Self-rewarding language models. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.386, + 0.923, + 0.409 + ], + "angle": 0, + "content": "[124] Wenda Xu et al. BPO: Staying close to the behavior LLM creates better online LLM alignment. EMNLP, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.409, + 0.923, + 0.442 + ], + "angle": 0, + "content": "[125] Saeed Khaki et al. RS-DPO: A hybrid rejection sampling and direct preference optimization method for alignment of large language models. NAACL, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.442, + 0.923, + 0.465 + ], + "angle": 0, + "content": "[126] Tianqi Liu et al. Statistical rejection sampling improves preference optimization. ICLR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.465, + 0.923, + 0.488 + ], + "angle": 0, + "content": "[127] Ruizhe Shi et al. The crucial role of samplers in online direct preference optimization. *ICLR*, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.488, + 0.923, + 0.511 + ], + "angle": 0, + "content": "[128] Lichang Chen et al. Optune: Efficient online preference tuning. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.511, + 0.923, + 0.534 + ], + "angle": 0, + "content": "[129] Tianduo Wang et al. Self-training with direct preference optimization improves chain-of-thought reasoning. ACL, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.534, + 0.923, + 0.556 + ], + "angle": 0, + "content": "[130] Jiafan He et al. Accelerated preference optimization for large language model alignment. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.556, + 0.923, + 0.59 + ], + "angle": 0, + "content": "[131] Wei Xiong et al. Iterative preference learning from human feedback: Bridging theory and practice for RLHF under KL-constraint. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.59, + 0.923, + 0.613 + ], + "angle": 0, + "content": "[132] Yixin Liu et al. Comal: A convergent meta-algorithm for aligning llms with general preferences. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.613, + 0.923, + 0.636 + ], + "angle": 0, + "content": "[133] Jing Xu et al. Some things are more cringe than others: Iterative preference optimization with the pairwise cringe loss. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.636, + 0.923, + 0.659 + ], + "angle": 0, + "content": "[134] Jongwoo Ko et al. Sera: Self-reviewing and alignment of large language models using implicit reward margins. *ICLR*, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.659, + 0.923, + 0.682 + ], + "angle": 0, + "content": "[135] Zhaoyang Wang et al. Cream: Consistency regularized self-rewarding language models. *ICLR*, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.682, + 0.923, + 0.704 + ], + "angle": 0, + "content": "[136] Prasann Singhal et al. D2PO: Discriminator-guided DPO with response evaluation models. COLM, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.704, + 0.923, + 0.727 + ], + "angle": 0, + "content": "[137] Aiwei Liu et al. Direct large language model alignment through self-rewarding contrastive prompt distillation. ACL, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.727, + 0.923, + 0.761 + ], + "angle": 0, + "content": "[138] Tengyang Xie et al. Exploratory preference optimization: Provably sample-efficient exploration in rlhf with general function approximation. *ICLR*, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.761, + 0.923, + 0.784 + ], + "angle": 0, + "content": "[139] Shenao Zhang et al. Self-exploring language models: Active preference elicitation for online alignment. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.784, + 0.923, + 0.807 + ], + "angle": 0, + "content": "[140] Shicong Cen et al. Value-incentivized preference optimization: A unified approach to online and offline rlhf. *ICLR*, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.807, + 0.923, + 0.829 + ], + "angle": 0, + "content": "[141] Chenjia Bai et al. Online preference alignment for language models via count-based exploration. *ICLR*, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.829, + 0.923, + 0.852 + ], + "angle": 0, + "content": "[142] Yuda Song et al. The importance of online data: Understanding preference fine-tuning via coverage. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.852, + 0.923, + 0.874 + ], + "angle": 0, + "content": "[143] Yaojie Shen et al. Aipo: Improving training objective for iterative preference optimization. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.874, + 0.923, + 0.898 + ], + "angle": 0, + "content": "[144] Yunhao Tang et al. Understanding the performance gap between online and offline alignment algorithms. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.897, + 0.923, + 0.92 + ], + "angle": 0, + "content": "[145] Shusheng Xu et al. Is DPO superior to PPO for LLM alignment? A comprehensive study. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.92, + 0.923, + 0.933 + ], + "angle": 0, + "content": "[146] William Muldrew et al. Active preference learning for large" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.055, + 0.923, + 0.933 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "19" + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.056, + 0.292, + 0.068 + ], + "angle": 0, + "content": "language models. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.068, + 0.492, + 0.09 + ], + "angle": 0, + "content": "[147] Seola Choi et al. Active preference optimization via maximizing learning capacity. OpenReview, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.091, + 0.492, + 0.113 + ], + "angle": 0, + "content": "[148] Kaixuan Ji et al. Reinforcement learning from human feedback with active queries. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.113, + 0.492, + 0.135 + ], + "angle": 0, + "content": "[149] Nirjhar Das et al. Active preference optimization for sample efficient rlhf. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.136, + 0.492, + 0.159 + ], + "angle": 0, + "content": "[150] Zhanhui Zhou et al. Beyond one-preference-fits-all alignment: Multi-objective direct preference optimization. ACL Findings, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.159, + 0.492, + 0.181 + ], + "angle": 0, + "content": "[151] Xingzhou Lou et al. Spo: Multi-dimensional preference sequential alignment with implicit reward modeling. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.182, + 0.492, + 0.204 + ], + "angle": 0, + "content": "[152] Yu Zhang et al. MOSLIM: Align with diverse preferences in prompts through reward classification. OpenReview, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.205, + 0.492, + 0.237 + ], + "angle": 0, + "content": "[153] Anirudhan Badrinath et al. Hybrid preference optimization: Aug-mentation direct preference optimization with auxiliary objectives. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.238, + 0.492, + 0.261 + ], + "angle": 0, + "content": "[154] Yiju Guo et al. Controllable preference optimization: Toward controllable multi-objective alignment. EMNLP, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.262, + 0.492, + 0.295 + ], + "angle": 0, + "content": "[155] Abhijnan Nath et al. Simultaneous reward distillation and preference learning: Get you a language model who can do both. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.295, + 0.492, + 0.318 + ], + "angle": 0, + "content": "[156] Zixiang Chen et al. Self-play fine-tuning converts weak language models to strong language models. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.318, + 0.492, + 0.34 + ], + "angle": 0, + "content": "[157] Yue Wu et al. Self-play preference optimization for language model alignment. ICLR, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.341, + 0.492, + 0.363 + ], + "angle": 0, + "content": "[158]Gokul Swamy et al. A minimaximalist approach to reinforcement learning from human feedback. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.364, + 0.492, + 0.386 + ], + "angle": 0, + "content": "[159] Lin Gui et al. Bonbon alignment for large language models and the sweetness of best-of-n sampling. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.386, + 0.492, + 0.408 + ], + "angle": 0, + "content": "[160] Remi Munos et al. Nash learning from human feedback. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.409, + 0.492, + 0.432 + ], + "angle": 0, + "content": "[161] Corby Rosset et al. Direct nash optimization: Teaching language models to self-improve with general preferences. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.432, + 0.492, + 0.454 + ], + "angle": 0, + "content": "[162] Daniele Calandriello et al. Human alignment of large language models through online preference optimisation. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.455, + 0.492, + 0.476 + ], + "angle": 0, + "content": "[163] Eugene Choi et al. Self-improving robust preference optimization. *ICLR*, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.477, + 0.492, + 0.499 + ], + "angle": 0, + "content": "[164] Haoyan Yang et al. Dynamic noise preference optimization for llm self-improvement via synthetic data. arXiv, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.5, + 0.492, + 0.522 + ], + "angle": 0, + "content": "[165] Alexey Gorbatovski et al. Learn your reference model for real good alignment. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.523, + 0.492, + 0.544 + ], + "angle": 0, + "content": "[166] Yu Meng et al. Simpo: Simple preference optimization with a reference-free reward. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.545, + 0.492, + 0.568 + ], + "angle": 0, + "content": "[167] Teng Xiao et al. SimPER: A minimalist approach to preference alignment without hyperparameters. *ICLR*, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.568, + 0.492, + 0.591 + ], + "angle": 0, + "content": "[168] Yixin Liu et al. Understanding reference policies in direct preference optimization. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.591, + 0.492, + 0.623 + ], + "angle": 0, + "content": "[169] Chaoqi Wang et al. Beyond reverse kl: Generalizing direct preference optimization with diverse divergence constraints. *ICLR*, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.625, + 0.492, + 0.647 + ], + "angle": 0, + "content": "[170] Stewart Slocum et al. Diverse preference learning for capabilities and alignment. ICLR, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.648, + 0.492, + 0.68 + ], + "angle": 0, + "content": "[171] Amitava Das et al. Dpo kernels: A semantically-aware, kernel-enhanced, and divergence-rich paradigm for direct preference optimization. arXiv, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.681, + 0.492, + 0.714 + ], + "angle": 0, + "content": "[172] Mingye Zhu et al. FlipGuard: Defending preference alignment against update regression with constrained optimization. EMNLP, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.715, + 0.492, + 0.738 + ], + "angle": 0, + "content": "[173] Qingyu Yin et al. Direct preference optimization using sparse feature-level constraints. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.739, + 0.492, + 0.761 + ], + "angle": 0, + "content": "[174] Yunhao Tang et al. Generalized preference optimization: A unified approach to offline alignment. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.761, + 0.492, + 0.783 + ], + "angle": 0, + "content": "[175] Haozhe Ji et al. Towards efficient exact optimization of language model alignment. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.784, + 0.492, + 0.806 + ], + "angle": 0, + "content": "[176] Arsalan Sharifnassab et al. Soft preference optimization: Aligning language models to expert distributions. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.807, + 0.492, + 0.839 + ], + "angle": 0, + "content": "[177] Janghwan Lee et al. Improving conversational abilities of quantized large language models via direct preference alignment. ACL, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.84, + 0.492, + 0.874 + ], + "angle": 0, + "content": "[178] Audrey Huang et al. Correcting the mythos of kl-regularization: Direct alignment without overoptimization via chi-squared preference optimization. arXiv, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.875, + 0.492, + 0.898 + ], + "angle": 0, + "content": "[179] Geon-Hyeong Kim et al. SafeDPO: A simple approach to direct preference optimization with enhanced safety. OpenReview, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.898, + 0.492, + 0.92 + ], + "angle": 0, + "content": "[180] Akifumi Wachi et al. Stepwise alignment for constrained language model policy optimization. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.92, + 0.492, + 0.932 + ], + "angle": 0, + "content": "[181] Zixuan Liu et al. Enhancing llm safety via constrained direct" + }, + { + "type": "list", + "bbox": [ + 0.076, + 0.056, + 0.492, + 0.932 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.543, + 0.056, + 0.76, + 0.068 + ], + "angle": 0, + "content": "preference optimization. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.068, + 0.923, + 0.102 + ], + "angle": 0, + "content": "[182] San Kim and Gary Geunbae Lee. Adversarial dpo: Harnessing harmful data for reducing toxicity with minimal impact on coherence and evasiveness in dialogue agents. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.102, + 0.923, + 0.124 + ], + "angle": 0, + "content": "[183] Andrew Lee et al. A mechanistic understanding of alignment algorithms: a case study on dpo and toxicity. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.125, + 0.923, + 0.146 + ], + "angle": 0, + "content": "[184] Yiming Zhang et al. Backtracking improves generation safety. ICLR, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.147, + 0.923, + 0.17 + ], + "angle": 0, + "content": "[185] Seongho Son et al. Right now, wrong then: Non-stationary direct preference optimization under preference drift. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.17, + 0.923, + 0.192 + ], + "angle": 0, + "content": "[186] Eugene Choi et al. Self-improving robust preference optimization. *ICLR*, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.192, + 0.923, + 0.215 + ], + "angle": 0, + "content": "[187] Adam Fisch et al. Robust preference optimization through reward model distillation. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.215, + 0.923, + 0.249 + ], + "angle": 0, + "content": "[188] Yong Lin et al. On the limited generalization capability of the implicit reward model induced by direct preference optimization. EMNLP Findings, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.25, + 0.923, + 0.272 + ], + "angle": 0, + "content": "[189] Fahim Tajwar et al. Preference fine-tuning of llms should leverage suboptimal, on-policy data. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.272, + 0.923, + 0.295 + ], + "angle": 0, + "content": "[190] Hongyi Yuan et al. Rrrh: Rank responses to align language models with human feedback. NeurIPS, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.295, + 0.923, + 0.318 + ], + "angle": 0, + "content": "[191] Ryan Park et al. Disentangling length from quality in direct preference optimization. ACL Findings, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.318, + 0.923, + 0.351 + ], + "angle": 0, + "content": "[192] Junru Lu et al. Eliminating biased length reliance of direct preference optimization via down-sampled KL divergence. EMNLP, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.352, + 0.923, + 0.374 + ], + "angle": 0, + "content": "[193] Weizhe Yuan et al. Following length constraints in instructions. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.374, + 0.923, + 0.397 + ], + "angle": 0, + "content": "[194] Kian Ahrabian et al. The hitchhiker's guide to human alignment with* po. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.397, + 0.923, + 0.42 + ], + "angle": 0, + "content": "[195] Wei Liu et al. Length desensitization in directed preference optimization. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.42, + 0.923, + 0.453 + ], + "angle": 0, + "content": "[196] Guanzheng Chen et al. LongPO: Long context self-evolution of large language models through short-to-long preference optimization. ICLR, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.454, + 0.923, + 0.476 + ], + "angle": 0, + "content": "[197] Prasann Singhal et al. A long way to go: Investigating length correlations in RLHF. COLM, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.476, + 0.923, + 0.499 + ], + "angle": 0, + "content": "[198] Kyle Richardson et al. Understanding the logic of direct preference alignment through logic. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.499, + 0.923, + 0.533 + ], + "angle": 0, + "content": "[199] Karel D'Oosterlinck et al. Anchored preference optimization and contrastive revisions: Addressing underspecification in alignment. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.534, + 0.923, + 0.556 + ], + "angle": 0, + "content": "[200] Arka Pal et al. Smaug: Fixing failure modes of preference optimisation with dpo-positive. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.556, + 0.923, + 0.579 + ], + "angle": 0, + "content": "[201] Yuzi Yan et al. 3d-properties: Identifying challenges in DPO and charting a path forward. ICLR, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.579, + 0.923, + 0.602 + ], + "angle": 0, + "content": "[202] Duanyu Feng et al. Towards analyzing and understanding the limitations of dpo: A theoretical perspective. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.602, + 0.923, + 0.624 + ], + "angle": 0, + "content": "[203] Hui Yuan et al. A common pitfall of margin-based language model alignment: Gradient entanglement. *ICLR*, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.625, + 0.923, + 0.647 + ], + "angle": 0, + "content": "[204] Noam Razin et al. Unintentional unalignment: Likelihood displacement in direct preference optimization. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.647, + 0.923, + 0.67 + ], + "angle": 0, + "content": "[205] Zhengyan Shi et al. Understanding likelihood over-optimisation in direct alignment algorithms. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.67, + 0.923, + 0.692 + ], + "angle": 0, + "content": "[206] Yong Lin et al. Mitigating the alignment tax of RLHF. EMNLP, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.692, + 0.923, + 0.715 + ], + "angle": 0, + "content": "[207] Megh Thakkar et al. A deep dive into the trade-offs of parameter-efficient preference alignment techniques. ACL, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.715, + 0.923, + 0.739 + ], + "angle": 0, + "content": "[208] Keming Lu et al. Online merging optimizers for boosting rewards and mitigating tax in alignment. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.739, + 0.923, + 0.761 + ], + "angle": 0, + "content": "[209] Angelica Chen et al. Preference learning algorithms do not learn preference rankings. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.761, + 0.923, + 0.794 + ], + "angle": 0, + "content": "[210] Wenyi Xiao et al. A comprehensive survey of direct preference optimization: Datasets, theories, variants, and applications. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.795, + 0.923, + 0.818 + ], + "angle": 0, + "content": "[211] Pierre Harvey Richemond et al. Offline regularised reinforcement learning for large language models alignment. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.818, + 0.923, + 0.84 + ], + "angle": 0, + "content": "[212] Christian Wirth et al. A survey of preference-based reinforcement learning methods. JMLR, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.84, + 0.923, + 0.862 + ], + "angle": 0, + "content": "[213] Jiaming Ji et al. Ai alignment: A comprehensive survey. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.863, + 0.923, + 0.886 + ], + "angle": 0, + "content": "[214] Xinpeng Wang et al. On the essence and prospect: An investigation of alignment approaches for big models. *IJCAI*, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.886, + 0.923, + 0.92 + ], + "angle": 0, + "content": "[215] Hannah Rose Kirk et al. The past, present and better future of feedback learning in large language models for subjective human preferences and values. EMNLP, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.92, + 0.923, + 0.932 + ], + "angle": 0, + "content": "[216] Patrick Fernandes et al. Bridging the gap: A survey on integrating" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.056, + 0.923, + 0.932 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "20" + }, + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.056, + 0.485, + 0.068 + ], + "angle": 0, + "content": "(human) feedback for natural language generation. TACL, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.068, + 0.492, + 0.09 + ], + "angle": 0, + "content": "[217] Timo Kaufmann et al. A survey of reinforcement learning from human feedback. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.091, + 0.492, + 0.124 + ], + "angle": 0, + "content": "[218] Ralph Allan Bradley and Milton E Terry. Rank analysis of incomplete block designs: I. the method of paired comparisons. Biometrika, 1952." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.125, + 0.492, + 0.147 + ], + "angle": 0, + "content": "[219] John Schulman et al. Proximal policy optimization algorithms. arXiv, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.147, + 0.492, + 0.18 + ], + "angle": 0, + "content": "[220] Arash Ahmadian et al. Back to basics: Revisiting reinforce style optimization for learning from human feedback in llms. ACL, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.181, + 0.492, + 0.214 + ], + "angle": 0, + "content": "[221] Ziniu Li et al. ReMax: A simple, effective, and efficient reinforcement learning method for aligning large language models. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.215, + 0.492, + 0.239 + ], + "angle": 0, + "content": "[222] Zhihong Shao et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.239, + 0.492, + 0.261 + ], + "angle": 0, + "content": "[223] Jian Hu. Reinforce++: A simple and efficient approach for aligning large language models. arXiv, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.262, + 0.492, + 0.283 + ], + "angle": 0, + "content": "[224] Chris Lu et al. Discovering preference optimization algorithms with and for large language models. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.284, + 0.492, + 0.307 + ], + "angle": 0, + "content": "[225] Hanyang Zhao et al. RainbowPO: A unified framework for combining improvements in preference optimization. ICLR, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.307, + 0.492, + 0.329 + ], + "angle": 0, + "content": "[226] Hamish Ivison et al. Unpacking dpo and ppo: Disentangling best practices for learning from preference feedback. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.33, + 0.492, + 0.352 + ], + "angle": 0, + "content": "[227] Amir Saeidi et al. Insights into alignment: Evaluating dpo and its variants across multiple tasks. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.352, + 0.492, + 0.385 + ], + "angle": 0, + "content": "[228] Andi Nika et al. Reward model learning vs. direct policy optimization: a comparative analysis of learning from human preferences. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.386, + 0.492, + 0.409 + ], + "angle": 0, + "content": "[229] Ziniu Li et al. When is rl better than dpo in rlhf? a representation and optimization perspective. *ICLR Tiny Papers*, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.409, + 0.492, + 0.431 + ], + "angle": 0, + "content": "[230] Yao Zhao et al. Slic-hf: Sequence likelihood calibration with human feedback. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.432, + 0.492, + 0.453 + ], + "angle": 0, + "content": "[231] Feifan Song et al. Preference ranking optimization for human alignment. AAAI, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.454, + 0.492, + 0.476 + ], + "angle": 0, + "content": "[232] Chaoqi Wang et al. Preference optimization with multi-sample comparisons. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.477, + 0.492, + 0.499 + ], + "angle": 0, + "content": "[233] Ziniu Li et al. Policy optimization in rlhf: The impact of out-of-preference data. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.5, + 0.492, + 0.533 + ], + "angle": 0, + "content": "[234] Lei Li et al. Improving reasoning ability of large language models via iterative uncertainty-based preference optimization. OpenReview, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.534, + 0.492, + 0.545 + ], + "angle": 0, + "content": "[235] Abhimanyu Dubey et al. The llama 3 herd of models. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.546, + 0.492, + 0.568 + ], + "angle": 0, + "content": "[236] Lily H Zhang and Rajesh Ranganath. Win rate is all that can matter from preference data alone. OpenReview, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.568, + 0.492, + 0.591 + ], + "angle": 0, + "content": "[237] Ganqu Cui et al. Ultrafeedback: Boosting language models with high-quality feedback. ICML, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.591, + 0.492, + 0.613 + ], + "angle": 0, + "content": "[238] Jiaming Ji et al. Pku-saferlhf: Towards multi-level safety alignment for llms with human preference. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.614, + 0.492, + 0.635 + ], + "angle": 0, + "content": "[239] Zhilin Wang et al. Helpsteer: Multi-attribute helpfulness dataset for steerlm. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.636, + 0.467, + 0.648 + ], + "angle": 0, + "content": "[240] Hunter Lightman et al. Let's verify step by step. ICLR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.648, + 0.492, + 0.669 + ], + "angle": 0, + "content": "[241] Kawin Ethayarajh et al. Understanding dataset difficulty with v-usable information. ICML, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.67, + 0.492, + 0.692 + ], + "angle": 0, + "content": "[242] Banghua Zhu et al. Starling-7b: Improving llm helpfulness & harmlessness with rlaif, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.693, + 0.492, + 0.715 + ], + "angle": 0, + "content": "[243] Wing Lian et al. Openorca: An open dataset of gpt augmented flan reasoning traces, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.716, + 0.492, + 0.749 + ], + "angle": 0, + "content": "[244] Luigi Daniele and Suphavadeeprasit. Amplify-instruct: Synthetically generated diverse multi-turn conversations for efficient llm training., 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.749, + 0.492, + 0.772 + ], + "angle": 0, + "content": "[245] Jiaming Ji et al. Beavertails: Towards improved safety alignment of llm via a human-preference dataset. NeurIPS, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.773, + 0.492, + 0.794 + ], + "angle": 0, + "content": "[246] Andrew Maas et al. Learning word vectors for sentiment analysis. ACL, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.795, + 0.492, + 0.817 + ], + "angle": 0, + "content": "[247] Michael Volske et al. Tl; dr: Mining reddit to learn automatic summarization. EMNLP Workshop, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.818, + 0.492, + 0.851 + ], + "angle": 0, + "content": "[248] Deep Ganguli et al. Red teaming language models to reduce harms: Methods, scaling behaviors, and lessons learned. arXiv, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.852, + 0.492, + 0.874 + ], + "angle": 0, + "content": "[249] Karl Cobbe et al. Training verifiers to solve math word problems. arXiv, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.875, + 0.492, + 0.897 + ], + "angle": 0, + "content": "[250] Yann Dubois et al. Length-controlled alpacaeval: A simple way to debias automatic evaluators. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.898, + 0.492, + 0.919 + ], + "angle": 0, + "content": "[251] Lianmin Zheng et al. Judging llm-as-a-judge with mt-bench and chatbot arena. NeurIPS, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.92, + 0.492, + 0.932 + ], + "angle": 0, + "content": "[252] Andy Zou et al. Universal and transferable adversarial attacks on" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.056, + 0.492, + 0.932 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.544, + 0.056, + 0.767, + 0.068 + ], + "angle": 0, + "content": "aligned language models. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.068, + 0.922, + 0.09 + ], + "angle": 0, + "content": "[253] Tianle Li et al. From live data to high-quality benchmarks: The arena-hard pipeline. 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.091, + 0.922, + 0.112 + ], + "angle": 0, + "content": "[254] Stephanie Lin et al. Truthfulqa: Measuring how models mimic human falsehoods. arXiv, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.113, + 0.922, + 0.135 + ], + "angle": 0, + "content": "[255] Jeffrey Zhou et al. Instruction-following evaluation for large language models. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.136, + 0.922, + 0.158 + ], + "angle": 0, + "content": "[256] Mirac Suzgun et al. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.159, + 0.922, + 0.181 + ], + "angle": 0, + "content": "[257] Dan Hendrycks et al. Measuring mathematical problem solving with the math dataset. arXiv, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.182, + 0.922, + 0.203 + ], + "angle": 0, + "content": "[258] David Rein et al. Gpqa: A graduate-level google-proof q&a benchmark. COLM, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.204, + 0.922, + 0.226 + ], + "angle": 0, + "content": "[259] Zayne Sprague et al. Musr: Testing the limits of chain-of-thought with multistep soft reasoning. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.227, + 0.922, + 0.249 + ], + "angle": 0, + "content": "[260] Yubo Wang et al. Mmlu-pro: A more robust and challenging multi-task language understanding benchmark. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.25, + 0.922, + 0.272 + ], + "angle": 0, + "content": "[261] Fengqing Jiang et al. Identifying and mitigating vulnerabilities in llm-integrated applications. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.273, + 0.922, + 0.295 + ], + "angle": 0, + "content": "[262] Ning Ding et al. Enhancing chat language models by scaling high-quality instructional conversations. arXiv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.296, + 0.922, + 0.317 + ], + "angle": 0, + "content": "[263] Qiyu Wu et al. Word alignment as preference for machine translation. EMNLP, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.318, + 0.922, + 0.34 + ], + "angle": 0, + "content": "[264] Yinghao Hu et al. Fine-tuning large language models for improving factuality in legal question answering. COLING, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.341, + 0.922, + 0.363 + ], + "angle": 0, + "content": "[265] Leonidas Gee et al. Code-optimise: Self-generated preference data for correctness and efficiency. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.364, + 0.922, + 0.385 + ], + "angle": 0, + "content": "[266] Yibo Miao et al. Aligning codellms with direct preference optimization. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.386, + 0.922, + 0.409 + ], + "angle": 0, + "content": "[267] Kechi Zhang et al. Codedpo: Aligning code models with self generated and verified source code. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.41, + 0.922, + 0.431 + ], + "angle": 0, + "content": "[268] Guoxin Chen et al. Step-level value preference optimization for mathematical reasoning. EMNLP, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.432, + 0.922, + 0.464 + ], + "angle": 0, + "content": "[269] Wen Lai et al. LLMs beyond English: Scaling the multilingual capability of LLMs with cross-lingual feedback. ACL Findings, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.466, + 0.922, + 0.488 + ], + "angle": 0, + "content": "[270] Yuxin Chen et al. On softmax direct preference optimization for recommendation. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.489, + 0.922, + 0.511 + ], + "angle": 0, + "content": "[271] Zhuoxi Bai et al. Finetuning large language model for personalized ranking. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.511, + 0.922, + 0.533 + ], + "angle": 0, + "content": "[272] Yi Gu et al. Diffusion-rpo: Aligning diffusion models through relative preference optimization. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.534, + 0.922, + 0.556 + ], + "angle": 0, + "content": "[273] Shivanshu Shekhar et al. See-dpo: Self entropy enhanced direct preference optimization. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.557, + 0.922, + 0.579 + ], + "angle": 0, + "content": "[274] Shufan Li et al. Aligning diffusion models by optimizing human utility. NeurIPS, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.58, + 0.922, + 0.612 + ], + "angle": 0, + "content": "[275] Navonil Majumder et al. Tango 2: Aligning diffusion-based text-to-audio generations through direct preference optimization. ACM MM, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.613, + 0.922, + 0.636 + ], + "angle": 0, + "content": "[276] Bram Wallace et al. Diffusion model alignment using direct preference optimization. CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.636, + 0.922, + 0.658 + ], + "angle": 0, + "content": "[277] Shentao Yang et al. A dense reward view on aligning text-to-image diffusion with preference. ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.659, + 0.922, + 0.681 + ], + "angle": 0, + "content": "[278] Kai Yang et al. Using human feedback to fine-tune diffusion models without any reward model. CVPR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.682, + 0.922, + 0.704 + ], + "angle": 0, + "content": "[279] Buhua Liu et al. Alignment of diffusion models: Fundamentals, challenges, and future. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.705, + 0.922, + 0.737 + ], + "angle": 0, + "content": "[280] Shengzhi Li et al. Multi-modal preference alignment remedies degradation of visual instruction tuning on language models. ACL, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.738, + 0.922, + 0.76 + ], + "angle": 0, + "content": "[281] Ziqi Liang et al. AlignCap: Aligning speech emotion captioning to human preferences. EMNLP, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.761, + 0.922, + 0.783 + ], + "angle": 0, + "content": "[282] Elmira Amirloo et al. Understanding alignment in multimodal llms: A comprehensive study. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.784, + 0.922, + 0.806 + ], + "angle": 0, + "content": "[283] Jinlan Fu et al. Chip: Cross-modal hierarchical direct preference optimization for multimodal llms. arXiv, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.807, + 0.922, + 0.839 + ], + "angle": 0, + "content": "[284] Ruohong Zhang et al. Direct preference optimization of video large multimodal models from language model reward. arXiv, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.84, + 0.922, + 0.874 + ], + "angle": 0, + "content": "[285] Yuxi Xie et al. V-DPO: Mitigating hallucination in large vision language models via vision-guided direct preference optimization. EMNLP Findings, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.875, + 0.922, + 0.897 + ], + "angle": 0, + "content": "[286] Peng Xu et al. Lvlm-ehub: A comprehensive evaluation benchmark for large vision-language models. TPAMI, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.898, + 0.922, + 0.93 + ], + "angle": 0, + "content": "[287] Zhongzhan Huang et al. A causality-aware paradigm for evaluating creativity of multimodal large language models. TPAMI, 2025." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.056, + 0.922, + 0.93 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_11xxx/2503.11701/f561bad2-8e9b-4fb7-9083-b32d2bfd8f1f_origin.pdf b/data/2025/2503_11xxx/2503.11701/f561bad2-8e9b-4fb7-9083-b32d2bfd8f1f_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..bc35b2196f2cc55d5afcb7fe0b91739d68f939f5 --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/f561bad2-8e9b-4fb7-9083-b32d2bfd8f1f_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33d9968dce449e0efc3a3bb1450fe7ea34dcec36ed6d534a445ab4dd321f41f8 +size 1136222 diff --git a/data/2025/2503_11xxx/2503.11701/full.md b/data/2025/2503_11xxx/2503.11701/full.md new file mode 100644 index 0000000000000000000000000000000000000000..d760d6a043a7a4100634ab7679c1e67ed77f281e --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/full.md @@ -0,0 +1,709 @@ +# A Survey of Direct Preference Optimization + +Shunyu Liu, Wenkai Fang, Zetian Hu, Junjie Zhang, Yang Zhou, Kongcheng Zhang, Rongcheng Tu, Ting-En Lin, Fei Huang, Mingli Song, Yongbin Li, and Dacheng Tao, Fellow, IEEE + +Abstract—Large Language Models (LLMs) have demonstrated unprecedented generative capabilities, yet their alignment with human values remains critical for ensuring helpful and harmless deployments. While Reinforcement Learning from Human Feedback (RLHF) has emerged as a powerful paradigm for aligning LLMs with human preferences, its reliance on complex reward modeling introduces inherent trade-offs in computational efficiency and training stability. In this context, Direct Preference Optimization (DPO) has recently gained prominence as a streamlined alternative that directly optimizes LLMs using human preferences, thereby circumventing the need for explicit reward modeling. Owing to its theoretical elegance and computational efficiency, DPO has rapidly attracted substantial research efforts exploring its various implementations and applications. However, this field currently lacks systematic organization and comparative analysis. In this survey, we conduct a comprehensive overview of DPO and introduce a novel taxonomy, categorizing previous works into four key dimensions: data strategy, learning framework, constraint mechanism, and model property. We further present a rigorous empirical analysis of DPO variants across standardized benchmarks. Additionally, we discuss real-world applications, open challenges, and future directions for DPO. This work delivers both a conceptual framework for understanding DPO and practical guidance for practitioners, aiming to advance robust and generalizable alignment paradigms. All collected resources are available and will be continuously updated at https://github.com/liushunyu/awesome-direct-preference-optimization. + +Index Terms—Alignment, Direct Preference Optimization, Large Language Models, Reinforcement Learning from Human Feedback. + +# 1 INTRODUCTION + +The rapid advancement of Large Language Models (LLMs) has revolutionized artificial intelligence [1, 2, 3, 4, 5, 6, 7, 8], enabling unprecedented generative capabilities across diverse applications, such as dialogue systems [9, 10], code generation [11, 12, 13], and medical diagnosis [14, 15, 16, 17]. Models like OpenAI-o1 [18] and DeepSeekR1 [19] have demonstrated remarkable proficiency in understanding and generating human-like text, outperforming traditional language processing techniques [20]. However, their immense power also introduces significant risks: LLMs may inadvertently produce harmful content (e.g., jailbreak suggestion) [21], exhibit hallucination behaviors (e.g., misinformation) [22], or propagate sociocultural stereotypes (e.g., biased recommendations) [23]. Ensuring that these models align with human values (producing outputs that are helpful, harmless, and honest) has thus become a cornerstone of responsible AI development [24]. + +The critical challenge of aligning LLMs with human values stems from the inherent complexity of encoding abstract + +This research is supported by the RIE2025 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) (Award I2301E0026), administered by A\*STAR, as well as supported by Alibaba Group and NTU Singapore through Alibaba-NTU Global e-Sustainability CorpLab (ANGEL). (Corresponding author: Dacheng Tao.) Shunyu Liu, Junjie Zhang, Rongcheng Tu and Dacheng Tao are with Nanyang Technological University, Singapore (e-mail: shunyu.liu@ntu.edu.sg; junjie.zhang@ntu.edu.sg; turongcheng@gmail.com; dacheng.tao@ntu.edu.sg). Wenkai Fang, Yang Zhou, Kongcheng Zhang, and Mingli Song are with the College of Computer Science and Technology, Zhejiang University, China (e-mail: wenkfang@zju.edu.cn; imzhouyang@zju.edu.cn; zhangkc@zju.edu.cn; brooksong@zju.edu.cn). Zetian Hu is with the School of Aerospace Engineering, Tsinghua University, China (e-mail: huzt22@mails.tsinghua.edu.cn). Ting-En Lin, Fei Huang, and Yongbin Li are with the Tongyi Lab, Alibaba Group, China (e-mail: ting-en.lte@alibaba-inc.com; f.huang@alibaba-inc.com; shuide.lyb@alibaba-inc.com). + +ethical principles into concrete model behaviors [25, 26, 27]. Traditional approaches, such as rule-based filtering or supervised learning on curated datasets, often prove inadequate due to their inability to generalize across diverse contexts and adapt to evolving societal norms [28]. The emergence of preference-based alignment paradigms addresses these limitations by framing the problem as optimizing for human feedback rather than inflexible heuristics [29, 30, 31, 32]. This shift recognizes that LLM decision-making often involves nuanced trade-offs between competing values, requiring flexible frameworks capable of incorporating subjective human preferences [33]. + +Building upon these insights, Reinforcement Learning from Human Feedback (RLHF) [34, 35] has emerged as the predominant alignment paradigm, leveraging human preferences to guide model optimization. In the RLHF pipeline, human annotators first rank the outputs generated by the language model, and these comparisons are used to train a reward model that quantifies human preferences. The language model is then fine-tuned using RL guided by this reward model, enabling the language model to align with human values by maximizing the predicted rewards. The success of RLHF in aligning models like ChatGPT [36, 37] and Claude [38, 39] underscores its practical utility. By translating subjective human preferences into an objective reward signal, RLHF facilitates the optimization of model behavior for value alignment. However, this RLHF paradigm suffers from critical limitations of computational complexity and training instability. Training a separate reward model demands substantial computational resources and high-quality human preference data, which scales poorly across different domains. Moreover, the RL phase often struggles with optimization challenges, such as reward hacking [40] and mode collapse [41]. + +These limitations have spurred interest in alternative + +![](images/eec766babe90c18e92b263abf2f3723da32c85a958aae6f3eb98f4722ac37fa7.jpg) +Fig. 1: A taxonomy of DPO. We categorize existing DPO works into four branches: data strategy, learning framework, constraint mechanism, and model property. Different colored boxes indicate different categories and their corresponding representative references. + +alignment methods that bypass reward modeling while preserving the benefits of preference-based learning. Direct Preference Optimization (DPO) [74, 210] represents a groundbreaking shift in this direction. Unlike RLHF, DPO reframes alignment as a supervised learning problem, directly optimizing the LLM policy using preference data without explicit reward modeling. By leveraging a closed-form mapping between reward functions and optimal policies, DPO eliminates the need for iterative RL training, reducing computational overhead and improving stability. Due to its inherent advantages, DPO has rapidly gained increasing attention from research communities. Existing studies vary widely in data strategies (e.g., point-wise v.s. pair-wise feedback) [67, 211], learning frameworks (e.g., offline v.s. online learning) [121, 122, 126], constraint mechanisms (e.g., different divergence constraints) [169, 171], and model properties (e.g., length bias) [191, 195]. Recent advancements in DPO variants have demonstrated remarkable efficacy in enhancing model alignment with human preferences, achieving unprecedented success across diverse domains [32]. + +These developments position DPO-based approaches as a compelling alternative to conventional RLHF paradigms for preference alignment tasks. However, despite its promise, the DPO research landscape remains fragmented. + +Several surveys related to DPO have been published in recent years, yet they exhibit notable limitations in their scope and analysis of DPO. (1) Scope limitations. While an early survey of [212] presents a comprehensive overview of preference-based RL methods, it predates the advent of DPO and does not address its applications to modern LLMs. Recent surveys on alignment [24, 26, 213, 214] provide broad overviews of LLM alignment techniques but only offer cursory summaries of DPO-related approaches without in-depth analysis. Similarly, surveys on learning from human feedback [30, 215, 216, 217] also only briefly mention DPO as a potential alternative. (2) Taxonomy deficiencies. Gao et al. [29] and Winata et al. [32] introduce a simplified taxonomy for preference learning, while overlooking technical distinctions within its broad categorization. In contrast, Wang et al. [31] attempt to classify preference learning across dimensions + +such as reinforcement learning, reward modeling, feedback, and optimization. However, this taxonomy suffers from significant conceptual overlaps (e.g. reinforcement learning inherently involves optimization). A recent work by Xiao et al. [210] categorizes DPO studies through isolated research questions, which, while useful for problem identification, fragments the methodological connections. Our survey addresses these gaps by presenting the first comprehensive analysis specifically focused on DPO. The main contributions of this survey are summarized as follows: + +- In this survey, we introduce a novel taxonomy that categorizes existing DPO works into four key dimensions based on different components of the DPO loss: data strategy, learning framework, constraint mechanism, and model property, as shown in Fig. 1. This taxonomy provides a systematic framework for understanding the methodological evolution of DPO and highlights the key distinctions between different variations. +- We conduct a rigorous empirical analysis of DPO variants across standardized benchmarks, revealing critical insights into their performance in diverse scenarios. This analysis offers a comprehensive evaluation of DPO variants and provides practical guidance for practitioners. +- We discuss real-world applications of DPO and highlight its potential to democratize alignment research by enabling efficient and scalable preference learning across diverse domains. We also outline open challenges and future directions for DPO research, emphasizing the need for robust and generalizable alignment paradigms. + +The remainder of this survey is organized as follows. Section 2 introduces the background and formulation of DPO. Section 3 presents a taxonomy of DPO, categorizing existing works based on key dimensions. Section 4 describes standardized benchmarks for evaluating DPO methods and presents empirical results. Section 5 discusses real-world applications of DPO and highlights its potential. Section 6 outlines open challenges and future directions for DPO research. Finally, Section 7 concludes the survey. + +# 2 BACKGROUND AND FORMULATION + +Preference learning aims to train language model policies to generate responses that better align with human preferences. Specifically, we denote the language model policy as $\pi(y|x)$ , where $x$ represents the input prompt and $y$ is a candidate response (completion). A language model can be viewed as an autoregressive function that sequentially predicts tokens based on prior context. Mathematically, this is expressed as: $\pi(y|x) = \prod_{t=1}^{T} \pi(y_t | y_{ 0$ is a hyperparameter that controls the strength of the Kullback-Leibler (KL) divergence penalty. Here, the term $\log \pi_{\theta}(\cdot |x) / \pi_{\mathrm{ref}}(\cdot |x)$ represents the KL divergence between the current policy $\pi_{\theta}$ and a reference policy $\pi_{\mathrm{ref}}$ . In practice, the reference policy $\pi_{\mathrm{ref}}$ is set to the SFT model $\pi_{\mathrm{sft}}$ , ensuring that the updated policy remains close to the initial model. + +To optimize the above objective, Proximal Policy Optimization (PPO) [219] has emerged as a promising RL algorithm for LLMs. PPO stabilizes training by constraining policy updates within a trust region via a clipped objective, which prevents significant deviations from the previous + +policy. However, PPO requires an additional critic model to estimate value functions for advantage calculation, thereby introducing extra computational and memory overhead. To address this, recent methods, such as RLOO [220], ReMax [221], GRPO [222], and Reinforce++ [223], introduce critic-free advantage estimation to reduce resource demands while maintaining stable optimization, making them more scalable for large-scale LLM training. + +# 2.2 Direct Preference Optimization + +DPO offers an alternative that streamlines the training process by directly optimizing the policy with preference data [74, 224, 225, 226, 227, 228, 229], thereby eliminating the need for explicit reward modeling in RLHF. The key idea of DPO is a closed-form solution of Eq. 3 that connects reward with the optimal policies. Specifically, the optimal policy corresponding to a given $r$ is defined as follows: + +$$ +\pi^ {*} (y | x) = \frac {1}{Z (x)} \pi_ {\mathrm {r e f}} (y | x) \exp \left(\frac {1}{\beta} r (x, y)\right), \tag {4} +$$ + +where the partition function $Z(x)$ is defined as: + +$$ +Z (x) = \sum_ {y} \pi_ {\mathrm {r e f}} (y | x) \exp \left(\frac {1}{\beta} r (x, y)\right). \tag {5} +$$ + +By rearranging the above equation, the reward $r$ can be recovered from the optimal policy $\pi^{*}$ : + +$$ +r (x, y) = \beta \log \frac {\pi^ {*} (y | x)}{\pi_ {\operatorname {r e f}} (y | x)} + \beta \log Z (x). \tag {6} +$$ + +Notice that the partition function $Z(x)$ depends only on the prompt $x$ . By substituting this expression into the preference model of Eq. 1, the preference probability model that $y_{w}$ is preferred over $y_{l}$ becomes: + +$$ +P \left(y _ {w} \succ y _ {l} | x\right) = \sigma \left(\beta \log \frac {\pi^ {*} \left(y _ {w} \mid x\right)}{\pi_ {\text {r e f}} \left(y _ {w} \mid x\right)} - \beta \log \frac {\pi^ {*} \left(y _ {l} \mid x\right)}{\pi_ {\text {r e f}} \left(y _ {l} \mid x\right)}\right). \tag {7} +$$ + +Based on the above preference probability model, DPO directly optimizes the language mode policy $\pi_{\theta}$ by minimizing the following negative log-likelihood loss function: + +$$ +\begin{array}{l} \mathcal {L} _ {\mathrm {D P O}} (\theta) = \\ - \mathbb {E} _ {(x, y _ {w}, y _ {l}) \sim \mathcal {D}} \left[ \log \sigma \left(\beta \log \frac {\pi_ {\theta} (y _ {w} | x)}{\pi_ {\text {r e f}} (y _ {w} | x)} - \beta \log \frac {\pi_ {\theta} (y _ {l} | x)}{\pi_ {\text {r e f}} (y _ {l} | x)}\right) \right], \tag {8} \\ \end{array} +$$ + +where the KL constraint is implicitly integrated through the use of the reference model $\pi_{\mathrm{ref}}$ . By minimizing this DPO loss, we directly train the policy to satisfy human preferences without resorting to a separate reward modeling stage or using reinforcement learning optimization as in RLHF, significantly reducing implementation complexity while improving training stability. + +# 2.3 Other Preference Optimization + +In addition to DPO, several concurrent preference optimization methods [190, 230, 231] have been proposed that offer alternative approaches to RLHF. These methods explore different strategies for optimizing LLMs to align with human preference without RL. Below, we provide a brief introduction to these approaches. + +# 2.3.1 Sequence Likelihood Calibration + +Zhao et al. [230] propose Sequence Likelihood Calibration with Human Feedback (SLiC-HF) to directly align LLMs with human preferences. Specifically, the loss function of SLiC-HF is defined as follows: + +$$ +\begin{array}{l} \mathcal {L} _ {\mathrm {S L i C - H F}} (\theta) = \max (0, \delta - \log \pi_ {\theta} (y _ {w} | x) + \log \pi_ {\theta} (y _ {l} | x)) \\ - \lambda \log \pi_ {\theta} (y ^ {*} | x), \tag {9} \\ \end{array} +$$ + +where the first term is the rank calibration loss with $\delta$ as a margin hyperparameter, and the second term is the cross-entropy regularization loss with $\lambda$ as a regularization weight. $y^{*}$ is obtained from either high-quality supervised responses in the SFT dataset or the top-ranked candidate response generated by the SFT model. + +# 2.3.2 Rank Responses to Align Human Feedback + +Yuan et al. [190] introduce Rank Responses to align Human Feedback (RRHF) for LLMs. RRHF extends pair-wise ranking by considering the list-wise ranking order of multiple responses, thus better utilizing the preference information. For an input prompt $x$ and $N$ candidate responses $\{y_i\}_{i=1}^N$ , it optimizes the model to assign higher probabilities to higher-ranked responses via a ranking loss and directly supervises the best response using cross-entropy as follows: + +$$ +\begin{array}{l} \mathcal {L} _ {\mathrm {R R H F}} (\theta) = \sum_ {r _ {i} < r _ {j}} \max \left(0, \frac {\log \pi_ {\theta} (y _ {i} | x)}{| | y _ {i} | |} - \frac {\log \pi_ {\theta} (y _ {j} | x)}{| | y _ {j} | |}\right) \\ - \lambda \log \pi_ {\theta} \left(y ^ {*} \mid x\right), \tag {10} \\ \end{array} +$$ + +where $r_i = r_\phi(x, y_i)$ represents the reward of the response $y_i$ and $y^* = \arg \max_{y_i} r_i$ is the response with the highest reward. Although RRHF avoids the need for reinforcement learning in RLHF, it still utilizes a reward model $r_\phi$ to rank candidate responses based on human preferences. + +# 2.3.3 Preference Ranking Optimization + +Similarly, Song et al. [231] propose Preference Ranking Optimization (PRO) to align LLMs with human preferences by leveraging multiple responses $\{y_{i}\}_{i = 1}^{N}$ with the human-annotated order $y_{1} > y_{2} > \dots >y_{N}$ . The loss function of PRO is defined as follows: + +$$ +\mathcal {L} _ {\mathrm {P R O}} (\theta) = - \sum_ {i = 1} ^ {N - 1} \log \frac {\exp \left(\frac {1}{\| y _ {i} \|} \log \pi_ {\theta} \left(y _ {i} | x\right) / \mathcal {T} _ {i} ^ {i}\right)}{\sum_ {j = i} ^ {N} \exp \left(\frac {1}{\| y _ {j} \|} \log \pi_ {\theta} \left(y _ {j} | x\right) / \mathcal {T} _ {i} ^ {j}\right)}, \tag {11} +$$ + +where the dynamic penalty temperature is defined as $\mathcal{T}_i^j = 1 / (r_\phi (x,y^j) - r_\phi (x,y^i))$ and $\mathcal{T}_i^i = \min_{i < j}\mathcal{T}_i^j$ . This temperature ensures that the probability gap between higher-ranked and lower-ranked responses is adaptively scaled according to their reward differences, thereby stabilizing the optimization process. + +# 3 A TAXONOMY OF DPO + +In this section, we introduce a novel taxonomy that categorizes existing DPO works based on four key dimensions: data strategy, learning framework, constraint mechanism, and model property. As illustrated in Fig. 1, these four dimensions are derived from different components of the DPO loss, providing a systematic framework for understanding the methodological evolution of DPO and highlighting the key distinctions between different variations. + +# 3.1 Data Strategy of DPO + +The data strategy constitutes the foundational pillar of DPO, focusing on how to leverage diverse types of preference data for training LLMs. As shown in Fig. 2, our taxonomy identifies three principal axes of data strategy: quality, feedback, and granularity. + +# 3.1.1 Data Quality + +The quality of preference data is a critical factor in determining the effectiveness of DPO training. High-quality data ensures that LLMs effectively learn to align with human preferences, while low-quality data may introduce noise and bias, leading to suboptimal model performance. We categorize data quality considerations into three key aspects: heterogeneity, distinguishability, and noise. + +(a) Data Heterogeneity. Conventional DPO methods assume uniform human preferences when annotating data, thereby overlooking the diversity among annotators. This assumption often skews the model toward the preferences of the majority while neglecting minority viewpoints, potentially leading to biases and unfair treatment of underrepresented groups. To address this issue, Chidambaram et al. [42] propose EM-DPO, which learns the distribution of different preference types and their corresponding response strategies. Building on this, they introduce the MinMax-DPO algorithm, which selects a strategy by minimizing the maximum regret across subgroups, ensuring a more balanced representation of preferences among all groups. MallowsPO [43] decomposes the implicit rewards in DPO into prompt dispersion and response scaling rewards. It introduces a novel objective function to capture human preferences for diverse responses to the same prompt. GRPO [44] formulates an objective function that minimizes the loss for the worst-case group, thereby ensuring fairness by prioritizing the disadvantaged groups in the optimization process. GDPO [45] models the language generation process as a combination of belief distribution prediction and belief-based response generation. The corresponding GDPO loss function consists of belief calibration loss and belief-conditioned preference alignment loss. The former encourages the model to capture the diversity of beliefs across groups, while the latter ensures that generated responses align with the given belief. + +(b) Data Distinguishability. A key limitation of DPO is its inability to account for the distinguishability of preference between responses [46, 50, 51, 56, 57]. In some cases, the preferred response is only marginally better than the dispreferred one, while in others, the dispreferred response contains harmful or misleading content, making it significantly worse. Thus, optimization should focus more on cases with substantial preference differences while reducing the effort spent on minor differences. However, most existing methods treat all samples equally, ignoring this data distinguishability. To address this, ODPO [46] introduces a monotonically increasing offset function, requiring the reward of the preferred response to exceed that of the dispreferred one by a certain margin. This ensures stronger updates for larger preference gaps. Similarly, Ada-DPO [54] introduces an instance-specific nonlinear scaling parameter, assigning larger weights to strong preference pairs and smaller weights to ambiguous ones based on the reward differences, thereby capturing + +![](images/7d0b0c1f1f41595c2153dd474b54b2117d70052e28f86633cea08504dba7746a.jpg) +Fig. 2: An overview of DPO data strategy. + +different levels of data distinguishability. DPO-rc [48] also incorporates the preference reward difference as a coefficient in the loss function. $\alpha$ -DPO [49] introduces an adaptive preference distribution to obtain dynamic reward margins based on the distribution difference between the policy and reference models. $\beta$ -DPO [51] analyzes the optimal $\beta$ parameter for datasets with different reward margins, which dynamically adjusts $\beta$ based on batch-level reward differences. They also introduce $\beta$ -guided data filtering to prioritize valuable training data. Curri-DPO [53] sorts preference pairs by reward differences and trains progressively from large to small differences, enabling curricular learning. Similarly, MPO [47] utilizes a reward model to score responses generated by the SFT model, constructing a preference dataset and partitioning it based on preference differences to learn from simple to complex tasks. sDPO [55] computes reward accuracy for different datasets based on an initial target model and partitions the dataset in descending order of accuracy, allowing the model to first optimize on simpler samples. Ma et al. [58] propose a preference dataset construction method that adjusts update weights based on response accuracy, assigning lower weights when the model demonstrates higher proficiency. Furthermore, fDPO [52] enhances DPO training by filtering out samples where the generated response of the model policy surpasses the preferred dataset response in reward score. + +(c) Data Noise. Human-generated preference annotations often contain inconsistencies, errors, or noise, negatively affecting the performance of DPO. Such noisy data can mislead models, impairing their ability to accurately capture true preferences and generalize effectively to unseen data. Im and Li [64] analyze how noisy feedback influences the generalization performance of preference optimization, showing that increased noise results in higher generalization risks. Specifically, standard DPO loss functions can yield biased estimates under noisy conditions. To address this + +issue, rDPO [59] proposes to enhance DPO robustness against noisy annotations and improve overall training performance. Zhang et al. [63] introduce a noise-aware strategy leveraging annotator confidence and stability to identify and downweight noisy samples during training. They also propose an adaptive reward margin, emphasizing clean samples to improve learning effectiveness. Complementary to these approaches, PerpCorrect [60] employs a data-driven method to correct noisy annotations directly in the dataset. It trains a proxy language model on both clean and noisy samples, distinguishing noise through perplexity differences to improve dataset quality. To systematically explore noise effects, Gao et al. [65] artificially inject various noise types (e.g., Gaussian noise) into datasets, controlling noise intensity via hyperparameters. Their analysis highlights how noise impacts model alignment, guiding future research towards mitigating such negative effects. To address the vulnerability of DPO in noisy environments, ROPO [61] introduces a regularization term to enhance noise tolerance. Additionally, ROPO employs a robust-guided rejection sampling technique. This technique supplements the dataset with samples that contribute minimally to the loss, thereby improving the overall data quality. Kim et al. [62] propose the SPA framework, using model-generated responses and associated confidence scores to detect noise in annotations. SPA further incorporates smoothing techniques into the loss function to alleviate the noise problem. Finally, Wu et al. [66] categorize noise into two types: point noise (single annotation errors) and pairwise noise (errors between annotated pairs). While DPO naturally handles point noise well, it struggles with pairwise noise. Their proposed Dr. DPO introduces a novel loss function explicitly designed for robustness against both point and pairwise noise. + +# 3.1.2 Preference Feedback + +Preference feedback refers to the label signals provided by annotators regarding their preferences for different responses. It can be categorized into point-wise, pair-wise, and list-wise feedback. Point-wise feedback evaluates each response independently, assigning a score or labeling it as positive or negative. Pair-wise feedback compares two responses to determine which one is preferred, while list-wise feedback ranks multiple responses. + +(a) Point-Wise Feedback. Point-wise feedback is the basic form of feedback. It refers to the type of feedback where individual outputs or samples are evaluated independently, rather than through comparisons with other outputs. This form of feedback is characterized by its simplicity and directness, focusing on the quality or relevance of a single response or item. The predominant methodology in RLHF [35] employs point-wise reward signals generated by reward models to optimize policy models. Similarly, KTO [67] directly maximizes the utility of model generations using loss functions based on prospect theory rather than the log-likelihood of preferences. It requires only a binary signal indicating whether an output is desirable or undesirable for a given input. Furthermore, BCO [68] builds upon the concepts introduced in KTO and explores a new approach to aligning with binary signals. While KTO focuses on optimizing human utility, BCO introduces a binary classifier framework incorporating reward shift and distribution matching that implicitly + +minimizes the DPO loss. Chen et al. [72] and GPO [73] adopt explicit rewards using Noise Contrastive Alignment (NCA) and General Preference Model (GRM) respectively, and then directly optimize language model policies from point-wise preference data with rewards. However, some methods leverage implicit reward signals to refine model behaviors. To ensure that the learned implicit rewards are comparable to the ground-truth rewards, Cal-DPO [69] introduces a calibration term to the preference optimization objective, which prevents the likelihood of chosen responses from decreasing during training. ULMA [71] unifies human demonstration and point-wise preference data into a single framework and handles positive and negative samples with a hybrid objective function. Unlike them, DRO [211] adopts a simple mean-squared objective to optimize the model policy and value function jointly for a single trajectory. Additionally, AOT [70] casts the distributional preference constraint as an optimal transport problem with a convex cost function. The key idea is to minimize the violation of stochastic dominance using a smooth, convex cost function. + +(b) Pair-Wise Feedback. Pair-wise feedback focuses on comparing pairs of data or actions to determine their relative quality or preference. Building upon the theoretical framework of RLHF, DPO implements this paradigm through the utilization of pair-wise preference data, thereby fitting an implicit reward model. Azar et al. [75] introduces a general theoretical framework to unify existing RLHF and DPO methods. The proposed Identity-Preference Optimization (IPO) directly optimizes policies from preferences without relying on reward modeling or the Bradley-Terry assumption, thereby avoiding overfitting issues observed in DPO. Subsequently, DPO-RK and DPO-R [76] integrate the Rao-Kupper and Davidson models into the DPO training objective respectively, thereby extending the capabilities of DPO by explicitly modeling ties in pairwise comparisons. BMC [77] further addresses a key limitation of the weak correlation between winning and losing responses in pairwise data. Specifically, BMC uses "Bridging" to enhance the correlation between winning and losing responses by increasing the consistency and informativeness of pairwise preference signals. However, previous attempts for aligning LLMs primarily focus on optimizing the model's output preferences given an instruction, which struggles to effectively perceive the fine-grained constraints within complex instructions. Thus IOPO [78] extends traditional alignment methods by considering both input and output preferences to better understand the constraints within the instructions. As current methods rely heavily on paired preference data (i.e., explicitly labeled preferred vs. dispreferred examples), they can be limiting in scenarios where such paired data is unavailable or insufficient. SAPO [80] addresses this issue based on the concept of self-play, which enhances data exploration and exploitation by automatically generating negative samples and integrating off-policy learning. Furthermore, PMPO [79] extends the EM algorithm to incorporate both preferred and dispreferred outcomes. By introducing the probability distribution of dis-preferred outcomes, PMPO can optimize using both types of samples, even when only negative feedback is available. Similarly, D2O [81] avoids harmful information by maximizing the discrepancy between the generated responses and the negative samples. NPO [82] + +and SimNPO [83] achieve the goal of forgetting the negative impact by regulating the model's prediction probabilities on negative datasets to be as minimal as possible, where SimNPO further eliminates the reference model bias issue inherent in NPO. + +(c) List-Wise Feedback. List-wise feedback refers to the type of feedback where multiple outputs or responses generated by the model for a given input are evaluated collectively as a list. This approach considers the relative ranking or ordering among the outputs, rather than focusing on individual outputs in isolation. Panacea [84] reframes alignment as a Multi-Dimensional Preference Optimization (MDPO) problem and introduces a method that aims to learn the entire Pareto front to accommodate diverse user preferences. In short, Panacea is designed to adapt a single model to list-wise preferences in a Pareto-optimal manner. LiPO [85] and LIRE [86] also treat LM alignment as a list-wise ranking problem, drawing on the rich literature of Learning-To-Rank (LTR). Specifically, LiPO introduces a specific method LiPO-λ, which leverages a list-wise ranking objective that weights each preference pair based on the difference in ranking metrics; while LIRE optimizes the response probability by calculating the exponential probability distribution and uses the reward model to directly guide the optimization process. To better capture the relative proximity within ordinal multiple responses, OPO [87] utilizes the Normalized Discounted Cumulative Gain (NDCG), a widely used ranking metric, to optimize the model's generation probability to match the permutation of responses based on these labels. Similarly, DRPO [88] leverages NDCG as a key metric to optimize the ranking of model outputs. However, DRPO incorporates novel elements like diffNDCG and Adaptive Rank Policy Score to dynamically adjust the score margins between preferred and non-preferred responses based on their ranking positions. mDPO [232] extends preference optimization to multi-sample comparisons and introduces a framework that evaluates and optimizes the collective properties of sample groups. It not only addresses the limitations of single pair-wise methods but also provides a more robust optimization framework, especially for characteristics like diversity and bias. Furthermore, RPO [90] introduces a contrastive weighting mechanism that constructs a contrast matrix within each mini-batch to compare preferred and less-preferred responses across prompts. The weights of these comparisons are dynamically adjusted based on the semantic similarity between prompts. Additionally, TODO [91] integrates a tie ranking system into list-wise preference modeling, significantly improving the capture of nuances of human preferences, especially in the presence of noisy or inconsistent labels and frequent ties. + +# 3.1.3 Preference Granularity + +Preference granularity refers to the granularity of preference labels, which determines the level at which preferences are assigned to data. It can be categorized into token-level, step-level, sentence-level, and turn-level granularity, ranging from fine-grained focus on individual tokens to broader preferences over entire interaction turns. + +(a) Token-Level Granularity. Token-level alignment operates at the character/subword unit of text generation, providing the finest-grained control over model outputs + +Theoretically, Rafailov et al. [92] demonstrate that DPO can represent any dense reward function by reparameterizing it as an optimal advantage function, which allows DPO to optimize policies in the token-level MDP effectively TDPO [93] refines the alignment process from the sentence level to the token level and introduces forward KL divergence constraints. TDPO utilizes the Bradley-Terry model to convert sentence-level preference comparisons into a token-level reward system, which allows the model to dynamically adjust its strategy at each token generation step. Furthermore, TIS-DPO[94] estimates the importance weights of tokens based on the differences in prediction probabilities from contrastive LLMs, performing token-level importance sampling on existing data to approximate optimal distribution by assigning weights to each token based on its reward. Moreover, $\mathrm{D}^2\mathrm{PO}$ [99] proposes a temporal decay mechanism that dynamically adjusts the contribution of each token-level reward based on its position in the sequences. Unlike these, SparsePO [95] directly learns sparse masks during the training process and controls which tokens are more important for preferences through the sparsity of the masks, thereby achieving dynamic optimization. RTO [96] and SePO [97] first learn a token-level reward function from preference data using DPO, and then RTO optimizes PPO based on this reward signal, while SePO selects key tokens through the estimated reward function. To tackle the need for large-scale annotated data in training, EPO [98] proposes a hierarchical framework that decomposes complex tasks into manageable subgoals using separate LLMs for subgoal prediction and low-level action generation, leveraging environment feedback to automatically generate reward signals and preference data for aligning LLMs. + +To conclude, token-level granularity optimizes models at individual token positions to maximize expected objectives, preserving semantic precision and capturing local syntactic dependencies. However, it increases computational complexity, as processing numerous tokens extends training time, and its sensitivity to noise means errors in a single token can affect the entire sequence. Thus, careful loss function design and regularization are essential for stability. + +(b) Step-level Granularity. Step-level granularity focuses on the intermediate steps or stages in a process, particularly effective for complex problem-solving tasks requiring multiple intermediate steps. Step-DPO [100] and SCDPO [101] treat individual reasoning steps as the basic units for preference optimization, where preference pairs of correct and incorrect steps are generated using LLMs. Furthermore, CPO [102] and MCTS-DPO [103] first utilize more powerful inference structures to generate multiple candidate thoughts at each reasoning step following the Tree-of-Thought (ToT) and Monte Carlo Tree Search (MCTS) respectively, and construct preference pairs based on the selected and unselected intermediate steps. Then they finetune LLMs to generate reasoning steps preferred by ToT during inference using DPO. TPO [104] proposes a preference learning algorithm specifically designed for preference trees that have multiple branches and multi-step responses, and introduces the adaptive step reward mechanism to address the issue of small reward margins caused by shared subtrajectories. It adjusts the reward values for each step based on semantic similarity, helping the model better distinguish + +between preference pairs. RDPO [105] extends traditional preference datasets to incorporate a rationale field, which explains why a particular response is preferred. RDPO introduces rationale information into the DPO loss function by maximizing the likelihood of both the preference and the rationale, which allows the model to better understand the logic behind preferences during training. To address the challenges of sparse rewards and training instability, DAPO [106] uses a critic function to generate dense signals for policy optimization and trains the actor and critic independently to avoid instability. + +To conclude, step-level alignment demonstrates unique advantages in multi-step reasoning tasks by decomposing holistic preferences into intermediate decision points. The primary strength of step-level granularity lies in its capacity to decompose complex objectives into verifiable subgoals, enhancing both interpretability and robustness. For instance, in mathematical reasoning, LLMs can receive feedback on equation derivation steps before final answers, reducing error propagation. However, this granularity still have two key challenges: first, the need for precise step segmentation, which may require domain-specific heuristics or auxiliary models to delineate reasoning boundaries; second, the risk of local optima, where over-optimization of individual steps degrades global coherence. + +(c) Sentence-level Granularity. Sentence-level granularity aligns preferences at the complete utterance level, balancing fine-grained control and computational efficiency. This granularity, represented by the original DPO framework, operates on full response sequences as atomic units for preference comparison. MAPO [107] uses a well-trained translation model to calculate alignment scores between answers in nondominant and dominant languages and then employs preference optimization methods to enhance reasoning consistency. EURUS [108] structures each instruction as a preference tree, containing pairs of correct and incorrect actions to facilitate preference learning. Similarly, IRPO [109] focuses on improving the reasoning capabilities of LLMs through an iterative preference optimization on constructed preference pairs such that the winning response has a higher reward than the losing response. FACTALIGN [110] proposes a fine-grained, sentence-level alignment algorithm called fKTO, which extends the KTO method to leverage fine-grained factuality assessments at the sentence level. + +To conclude, the key strength of sentence-level granularity lies in its capacity to preserve holistic semantics while maintaining tractable optimization complexity. Nevertheless, we must carefully consider task requirements. While suitable for short-form generation and classification tasks, sentence-level methods may insufficiently capture fine-grained stylistic nuances or long-range dependencies critical in generation and reasoning domains. + +(d) Turn-level Granularity. Turn-level granularity focuses on the optimization of model behavior at the level of conversational turns, which is particularly relevant for dialogue systems and interactive agents. This granularity level treats each turn of a conversation as a unit for preference alignment, allowing the LLMs to receive feedback on their responses within the context of a single turn. M-DPO [111] introduces a multi-turn direct preference learning framework to enhance the mathematical reasoning capabilities of LLMs when + +integrated with external tools. It leverages feedback from code interpreters and optimizes trajectory-level preferences using signals generated by the Bradley-Terry model to improve model performance in multi-turn reasoning tasks. ETO [112] presents a novel trial-and-error learning method that optimizes LLM agents' policies by contrasting successful and failed trajectories that contain multi-turn interaction. To address the challenges of coarse granularity and training noise in previous methods, SDPO [113] optimizes specific key segments within interactions to improve multi-turn dialogues while minimizing training noise. Specifically, it extracts key segments from the positive sessions that contribute to higher goal and relationship scores and pairs them with corresponding segments from the negative sessions to calculate an adapted DPO loss. Similarly, AgentQ [114] combines MCTS with self-critique mechanisms to provide process-level supervision by ranking actions, and then iterative fine-tuning using DPO. This approach enables LLMs to effectively learn from both successful and unsuccessful trajectories, enhancing their generalization and decision-making capabilities in complex, multi-turn reasoning tasks within interactive environments. DMPO [115] enhances the existing DPO method by replacing the policy constraint with a State-Action Occupancy Measure (SAOM) constraint and incorporating length normalization into the Bradley-Terry model, effectively addressing challenges in multi-turn scenarios. Compared to traditional policy constraints, SAOM constraints better guide the agent to select actions that align with expert trajectories, especially in unexplored states, thereby reducing compounding errors. + +To conclude, turn-level alignment offers critical advantages for interactive systems by optimizing contextually grounded responses while preserving conversational flow. However, in multi-turn dialogue tasks, the turn-level granularity may introduce additional training noise. For example, some correct turns in negative samples may be mistakenly treated as incorrect turns in the loss calculation. Additionally, since each turn needs to be processed independently, this can lead to reduced training efficiency. + +# 3.2 Learning Framework of DPO + +The learning framework of DPO focuses on how the language model policy learns from preference data. In this section, we present an overview of the learning framework in DPO, as shown in Fig. 3, which encompasses the learning paradigm and the learning objectives. + +# 3.2.1 Learning Paradigm + +The learning paradigm in DPO determines how preference data is acquired during model training and falls into three distinct categories: offline learning, where the model learns from pre-collected preference datasets; online Learning, where the model updates based on newly generated data; and active Learning, where the model selectively queries annotators obtain preference data. + +(a) Offline Learning. The original DPO framework [74] itself is an offline learning paradigm, where the model learns from a static, pre-collected dataset of preference pairs. Recent research has explored different approaches to merging preference optimization and supervised fine-tuning + +![](images/ab1cb06bdd578e7bee29a0deff07d4285154eb2afd39c5194af16ae72fc055ce.jpg) +Fig. 3: An overview of DPO learning framework. + +into a single training phase [190]. CPO [116] incorporates a behavior cloning regularizer through KL divergence minimization between the model and preferred data distribution, which effectively combines into adding a negative log-likelihood term on preferred data alongside the contrastive preference loss. Taking a more direct approach, ORPO [117] proposes a monolithic framework that directly augments the standard negative log-likelihood loss with an odds ratio term comparing chosen and rejected responses, eliminating the need for a separate reference policy while preserving SFT's domain adaptation capabilities. ULMA [71] proposes a hybrid method that applies standard SFT loss on positive samples while using a ranking-based DPO loss on negative samples. PAFT [118] introduces a parallel training paradigm where SFT and preference alignment are performed concurrently on the same pre-trained model and then merged using parameter fusion techniques, avoiding the sequential pipeline that can lead to catastrophic forgetting. + +Several advances explore curriculum learning strategies to enhance DPO performance and training efficiency. CurriDPO [53] introduces curriculum learning by ordering multiple preference pairs from easy to hard based on the rating difference between chosen and rejected responses, where pairs with larger rating gaps are presented first, followed by progressively more challenging pairs with smaller rating differences. sDPO [55] implements curriculum learning by partitioning preference datasets into sequential chunks measured by reward accuracy and applying them incrementally. + +To avoid substantial computational and data annotation costs for preference alignment, fine-tuning-free alignment methods have gained popularity. Linear Alignment [119] works by directly estimating the optimal policy through a one-step update to the output distribution during inference without requiring parameter tuning or feedback data. ICDPO [120] reinterprets DPO's reward-policy relationship to create a fine-tuning-free alignment method that harnesses in-context learning, treating models before and after demonstration exposure as amateur and expert policies, respectively, + +then computing their log probability ratio to score and rank candidate responses. + +(b) Online Learning. DPO faces significant limitations when relying solely on static, pre-collected preference datasets. These datasets, generated by different models, cause a distribution shift that leads to ineffective off-policy learning as the model evolves [145, 152]. By contrast, online DPO employs an iterative framework that continuously updates the policy with real-time feedback, ensuring on-policy learning and reducing misalignment [143, 144, 233]. + +As online DPO progresses, researchers have introduced more flexible frameworks to tackle key challenges. For instance, Yuan et al. [123] proposed a self-rewarding language model: the model generates prompts and responses, then serves as its own judge via LLM-as-a-Judge prompting, scoring on a 5-point scale. OAIF [121] uses an LLM as an online annotator for real-time feedback, and OFSDPO [122] addresses catastrophic forgetting by using two Low-Rank Adaptive (LoRA) modules with different optimization speeds. BPO [124] constructs a dynamic trust region around the behavior LLM, adjusting it as preference data is collected, unlike methods that rely on fixed reference models. Furthermore, researchers have refined sampling strategies for online DPO. RSO [126] and RS-DPO [125] employ rejection sampling based on reward gaps. ROPO [61] recovers useful information from discarded queries via robustness-guided rejection sampling. Shi et al. [127] introduced DPO-Mix-R and DPO-Mix-P, demonstrating faster convergence by mixing online samplers with uniform samplers. OPTUNE [128] selectively regenerates responses with low reward scores while reusing high-reward responses. Iterative RPO [109] and DPO-ST [129] enhance CoT reasoning by selecting correct and incorrect answers to form preference pairs at each iteration. Xie et al. [103] used MCTS to collect preference data during training. Researchers have also explored advanced optimization techniques. APO [130] incorporates momentum-based acceleration, using an extrapolation step between the current and previous policies to update the policy. Xiong et al. [131] proposed a two-agent, non-symmetric online DPO framework with a main agent for optimal policy learning and an enhancer agent for exploration. COMAL [132] formulates alignment as a two-player zero-sum game, updating its policy toward a regularized Nash equilibrium in each iteration. PCO [133] iteratively trains the model on preference data with pairwise cringe Loss. + +Recent efforts push for greater autonomy by letting models generate their own feedback [62]. SeRA [134] introduces a self-reviewed preference bootstrapping method, using an implicit reward margin to select informative pairs, and employs an ensemble reward approach across iterations. CREAM [135] mitigates self-improving biases by applying a consistency regularization on the preference rankings of consecutive iterations. D2PO [136] combines human-labeled gold data with concurrently updated, discriminator-labeled data. DLMA [137] uses contrastive prompts to compute self-reward scores via log ratio differences, then integrates these scores directly into the DPO objective. Addressing exploration and uncertainty in online DPO has also been a focus [234]. XPO [138] encourages exploration by adding a bonus for responses outside the initial policy's support, and SELM [139] uses an optimism term in reward fitting to + +actively seek high-reward responses. ETO [112] alternates exploration and training phases to collect failure trajectories, while VPO [140] applies optimism by regularizing the reward model to favor higher-value responses. Xiong et al. [111] extended DPO from single-turn to multi-turn tasks, balancing KL-regularized and non-regularized objectives, and COPO [141] incorporates a count-based bonus to encourage novel responses with low visitation counts. + +Finally, a growing body of work aims to merge online and offline techniques. HyPO [142] uses offline preference data for DPO training while regularizing via online data. MPO [47] combines the strengths of DPO and PPO in a two-stage process: it first trains DPO on an easier dataset, then uses this model as a reference for PPO training on more challenging samples. + +(c) Active Learning. Active learning in DPO is a strategic approach that aims to reduce the annotation cost and improve sample efficiency by selectively querying annotators for the most informative preference examples. Unlike offline learning that uses a fixed dataset or online learning that generates new data continuously, active learning intelligently selects which data points should be labeled based on model uncertainty or other informativeness criteria. + +Muldrew et al. [146] introduced APL, an iterative data acquisition and fine-tuning loop in which batches of prompt/completion pairs are strategically selected using acquisition functions: a predictive entropy-based approach to measure model uncertainty for prompts and a preference certainty measure based on the implicit Bradley-Terry model for completion pairs in DPO. Unlike two-step selection processes in APL that separately select uncertain input prompts and corresponding completions, divAPO [147] integrates both stages into a single selection phase. divAPO maximizes the preference model certainty by simultaneously evaluating the informativeness of input prompts and completion pairs, while also considering the data distribution of the input prompts. Ji et al. [148] proposed ADPO, which selectively queries human preferences only for responses where the model exhibits high uncertainty while using pseudo-labels for confident cases. Das et al. [149] also employed active learning on RLHF, which actively selects the context-action pairs that maximize exploration and minimize uncertainty in the reward model. + +# 3.2.2 Learning Objective + +In what follows, we present the learning objective in DPO, which determines how the model policy is optimized based on preference data. We first discuss multi-objective learning in DPO, which aims to optimize multiple objectives simultaneously. Then, we introduce self-play learning, which leverages self-generated data for preference alignment. + +(a) Multi-Objective Learning. Multi-objective learning in DPO addresses the challenge of simultaneously optimizing the language model for multiple, potentially competing preference dimensions, such as helpfulness, harmlessness, and truthfulness. This approach aims to find a balanced policy that satisfies multiple human values rather than optimizing for a single objective, which more closely mirrors the complexity of real-world human preferences. + +MODPO [150] achieves the sequential optimization of multiple preference objectives by incorporating language + +modeling directly into reward modeling, using a margin-based loss to maintain performance on previously optimized dimensions. SPO [151] takes a similar iterative constrained optimization approach, optimizing each preference dimension while preventing the degradation of prior alignments through regularization terms. MOSLIM [152] takes a different approach by introducing a multi-head classification reward model that assigns different preference dimensions to separate classification heads, enabling simultaneous optimization of multiple preferences without requiring multiple reward or policy models. HPO [153] incorporates auxiliary objectives through offline RL, where the model uses a weighted maximum likelihood objective that combines a preference alignment term with an advantage-weighted term for maximizing arbitrary auxiliary rewards like readability and safety. CPO [154] introduces explicit preference tokens during training that specify desired scores for different objectives, transforming the multi-objective optimization into a conditional optimization problem. DRDO [155] simultaneously models rewards and preferences through a combination of reward distillation and a contrastive log-unlikelihood term in its loss function. + +(b) Self-Play Learning. Self-play learning in DPO represents an approach where the language model interacts with itself or its previous iterations to generate its own preference data for training, reducing or eliminating the need for human annotations [139, 164]. This method enables continuous self-improvement by leveraging the model's own judgment capabilities to identify and learn from better responses, creating a form of autonomous preference learning. + +SPIN [156] involves a self-play mechanism where the LLM generates synthetic data from its prior iterations, then fine-tunes itself to distinguish these self-generated responses from those of human-annotated data. The method resembles a two-player game, where the model's current iteration tries to improve its responses to better match the target distribution, while the previous iteration attempts to generate responses as close to human data as possible. SPPO [157] treats LLM alignment as a constant-sum two-player game and iteratively refines itself by competing against its previous iteration. Instead of maintaining two competing policies or a reward model, SPO [158] uses a single policy to sample multiple trajectories and uses the proportion of wins in pairwise comparisons as the reward signal. BoNBoN [159] Alignment likewise relies on sampling responses from a base model, but it selects the best ones among n candidates and fine-tunes itself to approximate that best-of-n distribution. + +Some works approach the alignment problem by leveraging Nash equilibrium [132]. Nash-MD [160] learns a preference model from pairwise human feedback and then computes a Nash equilibrium policy that consistently produces preferred responses. Its self-play approach updates the policy by having it compete against itself (or a slight variant of itself) under the learned preference model, which measures how often one response is preferred to another. DNO [161] extends this concept by implementing a batched on-policy algorithm where the current policy generates multiple outputs that are compared both against each other and against a teacher model's outputs. IPO-MD [162] combines the strengths of IPO and Nash-MD, where the model generates data using a mixture policy between the online and reference + +policies, and uses a preference model to annotate pairs of generations, making the optimization equivalent to finding a Nash equilibrium through self-play. SRPO [163] modifies Nash-MD by introducing a self-improvement policy that refines model outputs through iterative revisions, enabling offline optimization without a learned reward function. + +# 3.3 Constraint Mechanism of DPO + +The constraint mechanism of DPO derives from its reformulation of RLHF, which includes a KL divergence constraint between the current policy and a reference policy. As shown in Fig. 4, we re-examine the constraint mechanism of DPO from the perspective of the reference model and different divergence constraints. We also explore various DPO variants with different safety constraints. + +# 3.3.1 Reference Model + +The reference model in DPO functions as an anchor to ensure policy updates remain within a controlled range, preventing excessive deviation from initial behaviors. Typically, the reference model is initialized using the SFT model that serves as the starting point for preference optimization. The choice of reference model significantly impacts optimization dynamics. A static reference model ensures stable training but may limit adaptability. In the following subsections, we introduce two advanced approaches: reference-free DPO eliminates reliance on the reference model, while dynamic-reference DPO employs an evolving reference model. + +(a) Reference-Free DPO. To reduce the computational and memory costs associated with a reference model, many algorithms have explored training modes that do not require loading the reference model. Xu et al. [116] replaces the reference model with a uniform prior distribution, adding an SFT loss term on preferred data to maintain consistency with the desired behavior. ORPO [117] integrates an odds ratio-based penalty with traditional SFT loss, increasing the probability of preferred responses while decreasing undesirable ones, thereby enabling single-stage training without a separate reference model. SimPO [166] directly uses the average log probability as implicit rewards. This removes the requirement for a separate reference model, significantly improving computational and memory efficiency. SimPER [167] also directly optimizes reverse perplexity for preferred versus rejected responses, creating a preference optimization approach that does not require a separate reference model, thus simplifying training. Despite these advancements, [168] argue that a reference model remains crucial. They compared two reference-free variants using posterior probabilities and likelihood functions as rewards, respectively, and found the original DPO consistently outperformed both. Their results indicate that a strong, well-aligned reference policy can significantly enhance DPO performance. +(b) Dynamic-Reference DPO. Offline DPO methods often suffer from reward over-optimization, meaning that as the trained model deviates from the reference model, the quality of generated samples tends to degrade. To address this issue, Gorbatovski et al. [165] proposed dynamically updating the reference model using the current model parameters during training, preventing excessive divergence and maintaining high-quality outputs. Curri-DPO [53] and sDPO [55] adopt + +![](images/cddebe4de78a02102cffc5e93cf429138280b5deac316fd35c18e11122d7aa8e.jpg) +Fig. 4: An overview of DPO constraint mechanism. + +curriculum learning by sorting data samples from simpler to more complex based on predefined metrics. At each training iteration, the model from the previous step serves as the updated reference model to provide constraints, facilitating progressive learning. Similarly, MPO [47] partitions datasets according to task difficulty, employing a two-stage training procedure. The model trained in the initial stage serves as the reference for the subsequent stage. Additionally, M-DPO [89] compares the performance of a fixed reference model versus a dynamic reference model, finding that the latter yields superior results. + +# 3.3.2 Divergence Constraint + +Divergence constraints in DPO play a crucial role in constraining model optimization, balancing alignment performance and model stability. In the following subsections, we introduce two modifications to the divergence constraint: one for enhancing diversity and the other for improving generalization. + +(a) Diversity. Standard DPO typically uses reverse KL divergence equivalent to RLHF. However, the mode-seeking nature of reverse KL divergence reduces the diversity of the generated outputs. To overcome this limitation, f-DPO [169] explores various divergences, including forward KL divergence, reverse KL divergence, Jensen-Shannon divergence, and $\alpha$ -divergence, to achieve a better trade-off between alignment performance and diversity. Slocum et al. [170] further proposes splitting the KL divergence term into entropy and cross-entropy terms. This decoupling allows independent control of generation diversity and closeness to the reference model, preserving output diversity without degrading overall model quality. +(b) Generalization. Over-optimization in DPO can negatively impact generalization, causing reduced performance on inputs outside the training distribution. To mitigate this, Huang et al. [178] introduce $\chi^2$ -divergence as a more aggressive form of regularization compared to KL divergence, alleviating the over-optimization problem. DPO-Kernels [171] employs data-driven methods to select optimal kernel-divergence pairs dynamically, improving task adaptability + +and robustness. FlipGuard [172] introduces a customized reward characterization to monitor model performance. If performance drops relative to earlier versions, FlipGuard constrains the model's updates to ensure alignment with previous stable behavior. FPO [173] leverages the feature-level constraints using Sparse Autoencoders (SAEs) to improve computational efficiency and training stability. SPO [176] integrates a natural preference loss with a KL divergence-based regularization term computed over the entire model output distribution. By adjusting this divergence term, SPO prevents unwanted shifts beyond the preference dataset, ensuring stable alignment. EXO [175] argues that minimizing the forward KL divergence in DPO introduces bias when approximating the optimal policy. They establish a generalized alignment objective and reveal the equivalence between maximizing KL regularization rewards and minimizing the reverse KL divergence relative to the optimal policy. QDPO [177] utilizes divergence between the quantized model and the full-precision model for preference optimization, effectively addressing the token-flipping issue. Token-flipping refers to the phenomenon where quantization errors skew token distributions, leading to incorrect token selection. GPO [174] constructs a framework that unifies different DPO-related algorithms through theoretical derivations, enabling a deeper understanding of the regularization mechanisms in the DPO family of algorithms. + +# 3.3.3 Safety Constraint + +Safety constraints in DPO aim to prevent LLMs from generating harmful, biased, or unethical outputs. However, traditional alignment algorithms often fail to address safety concerns. To enhance the safety alignment, recent studies have introduced several specialized mechanisms based on DPO. SafeDPO [179] introduces a streamlined approach for safety alignment by implicitly optimizing safety objectives within a single stage of policy learning. SACPO [180] addresses safety constraints by explicitly formulating language model alignment as a constrained optimization problem, using DPO to optimize the model under safety constraints. Zhang et al. [184] propose creating a backtracking preference dataset that identifies and reverses unsafe outputs, enhancing the safety and robustness of the model. C-DPO [181] integrates dual gradient descent into DPO to balance safety and utility efficiently. This approach achieves a robust trade-off between helpfulness and harmlessness, offering explicit safety guarantees. ADPO [182] introduces adversarial techniques into DPO. It specifically trains models to reduce the probability of unsafe outputs by deliberately generating harmful responses using controlled toxic tokens. Finally, Lee et al. [183] explore the internal mechanisms through which DPO reduces harmful outputs. Their findings suggest that DPO does not remove harmful behaviors learned during pretraining but instead teaches models to bypass or suppress these behaviors. This insight helps explain certain safety vulnerabilities like jailbreaks. + +# 3.4 Model Property of DPO + +DPO has shown great promise in aligning LLMs with human preferences by directly optimizing model outputs based on preference data. During this process, the underlying models + +![](images/e5c5a6a0d780ea4b5c9a8a90db2723f6fdb6c09e14347e19016d55dcced76086.jpg) +Fig. 5: An overview of DPO model property. + +exhibit certain properties that are crucial for understanding their behavior and effectiveness. These properties can be broadly categorized into two aspects: the generation property and the optimization property, as shown in Fig. 5. In the following sections, we explore these two properties in more detail, analyzing their implications for model alignment. + +# 3.4.1 Generation Property + +The generation property of DPO primarily concerns issues related to distribution shifts and length biases. DPO is sensitive to distribution shifts between the base model outputs and the preference data, which may reduce diversity and generalization. Additionally, DPO has a tendency to favor longer responses, a phenomenon known as morbidity, which can negatively impact performance and user experience. + +(a) Distribution Shift. In RLHF, the reward model is trained on a static set of preference data collected offline. During fine-tuning, the generated responses often differ from this original training data, resulting in a distribution shift. This shift can cause inaccurate reward predictions and lead to over-optimization. The implicit reward model in DPO also suffers from this distribution shift issue. Moreover, Lin et al. [188] have shown that the implicit reward model in DPO performs poorly on Out-Of-Distribution (OOD) data compared to explicit reward models. Experimental results indicate that DPO can transfer probability mass to the highreward response regions covered by the preference data, but it may also cause the distribution of responses generated by the model to deviate significantly from that of the reference policy, resulting in responses that do not meet expectations [189]. To address these problems, many researchers are now exploring online DPO approaches [109, 121, 122, 125], aiming to mitigate OOD by continuously updating preference data during training. + +Existing DPO methods also face significant limitations due to their dependence on specific training tasks. Their optimal solutions lack robustness when applied to OOD tasks. Thus, SRPO [163] reframes alignment as a self-improvement process, which optimizes a self-improvement policy and a generative policy using a min-max objective, ensuring + +robustness by making the solution independent of training tasks. Zhang et al. [139] also identify notable issues in DPO when handling OOD tasks. First, DPO tends to overly favor novel content it has not seen during training. Second, it easily gets stuck in local optima, limiting exploration. To address these problems, they propose Self-Exploring Language Models (SELM), incorporating an optimism term to encourage broader exploration of new responses. + +Another significant challenge of DPO is preference drift, where human preferences evolve, changing data distributions over time. Traditional DPO algorithms typically overlook such temporal shifts, mistakenly interpreting them as noise. To address this, NS-DPO [185] propose to assign higher weights to recent data, allowing models to better adjust to evolving preferences. + +(b) Length Bias. Length bias in DPO refers to the tendency of model-generated outputs to become excessively long during training. This issue is similar to the length bias observed in RLHF [197] and is particularly pronounced in DPO. Length bias affects response quality and overall model performance. To mitigate this issue, researchers have developed several solutions, which can be categorized into three main approaches: length regularization, length normalization, and length sampling. + +Length regularization is a common approach to controlling length bias in DPO. By introducing regularization terms into the objective function, the model can constrain response length and reduce morbidity, thereby alleviating the length bias problem. R-DPO [191] introduces a length-based penalty term to the DPO objective function, explicitly discouraging morbidity. $\mathrm{D}^2\mathrm{PO}$ [99] introduces a dynamic weighting mechanism by incorporating a temporal decay factor. Unlike previous methods that apply uniform reward contributions across sequences, $\mathrm{D}^2\mathrm{PO}$ adjusts the influence of each reward based on its position in the response. Higher weights are assigned to rewards associated with earlier tokens, as they are more critical for model alignment, while later rewards gradually receive lower weights. This adaptive approach prevents overfitting to less relevant tokens, thereby addressing length bias in DPO. + +Length normalization aims to eliminate the loss bias caused by response length differences, allowing the model to evaluate texts of varying lengths more fairly. This approach prevents the model from developing an unreasonable preference for either long or short responses [198]. RRHF [190] and SimPO [166] first propose to apply length normalization to responses, ensuring that the loss remains unaffected by response length. LN-DPO [194] further integrates SimPO-like length normalization into DPO, demonstrating that this approach enhances response quality while mitigating morbidity. LD-DPO [195] achieves length desensitization by reparameterizing the likelihood in DPO. Specifically, it decomposes the likelihood of the longer response in a preference pair into the product of the likelihood of the public-length portion and the likelihood of the excessive portion. It then introduces a hyperparameter to mitigate the morbidity preference. This adjustment smooths the relationship between likelihood and response length, reducing its impact on optimization. For multi-turn dialogue tasks, DMPO [115] introduces length normalization for the number of turns in multi-turn preference optimization. + +An alternative approach to controlling length bias in DPO is through sampling-based methods. SamPO [192] introduces a down-sampling method to compute regularized KL divergences. By balancing token-level probability distributions between preferred and rejected responses, SamPO reduces length bias in DPO training. Yuan et al. [193] propose Length-Instruction Fine-Tuning (LIFT), a method to improve instruction-following models' ability to adhere to length constraints by augmenting existing training data with explicit length instructions and using DPO for training. This enables the model to generalize across prompts requiring different response lengths. For long-context tasks, LongPO [196] enables short-context LLMs to self-evolve for long-context tasks by learning from self-generated short-to-long preference data, which includes paired responses for long-context inputs and their compressed short-context counterparts. LongPO incorporates a short-to-long KL constraint to prevent degradation of short-context performance during long-context alignment, achieving strong performance on both short- and long-context tasks. + +# 3.4.2 Optimization Property + +The optimization property of DPO involves likelihood collapse and alignment tax. While DPO aims to increase the likelihood of preferred responses and decrease dispreferred ones, the actual optimization process does not explicitly enforce this balance. Moreover, alignment improvements often come at the cost of the original capabilities of LLMs, known as alignment tax. + +(a) Likelihood Collapse. Likelihood collapse refers to the unintended reduction in the likelihood of both preferred and dispreferred responses during DPO training [92]. This phenomenon can lead to unintentional unalignment, where the model's outputs deviate from human preferences, potentially producing undesirable or harmful responses. This phenomenon is also referred to as likelihood displacement in prior studies [204]. Additionally, the gradients associated with increasing the likelihood of preferred responses and decreasing that of dispreferred responses can become entangled, hindering effective learning. This entanglement complicates the optimization process, making it challenging to achieve the desired alignment [203]. Theoretical analyses have further elucidated the underlying causes of likelihood collapse. In particular, Feng et al. [202] developed an analytical framework grounded in field theory. Their analysis of the gradient vector field of the DPO loss function revealed that the loss function decreases the probability of generating human-disliked data at a faster rate than it increases the probability of generating human-like data. + +Several strategies have been proposed to address likelihood collapse. Pal et al. [200] introduce DPO-Positive (DPOP), which adds a penalty term to maintain a high log-likelihood for preferred examples. Similarly, LLaMA [235] augments DPO training with a negative log-likelihood term to stabilize training and preserve the log-likelihood of chosen responses [109]. Flex-DPO [201] adaptively adjusts parameters to slow the decline in the likelihood of dispreferred responses and balance gradients for both chosen and rejected outputs. D'Oosterlinck et al. [199] propose Anchored Preference Optimization (APO), which provides fine-grained control over probability updates: APO-zero increases the + +TABLE 1: An overview of datasets (upper row) and benchmarks (lower row) for DPO. + +
DatasetTask DescriptionData Size (Training & Test)Data SourceData StructureEvaluation Metric
UltraFeedback [237]Instruction-Following, Helpful64K&-AIList-
SafeRLHF [238]Harmless, Helpful73.9K&8.21KHuman&AIPair-
HelpSteer [239]Helpful35.3K&1.8KHumanPoint-
PRM800K [240]Mathematical Reasoning800K&-HumanPoint-
SHP-2 [241]Q&A From Reddit3600K&241KHumanPair-
Nectar [242]Conversations183K&-AIList-
OpenOrca [243]Conversations2940K&-AISample-
Capybara [244]Multi-Turn Conversations16K&-Human&AISample-
Step-DPO [100]Mathematical Reasoning10.8K&-Human&AIPair-
BeaverTails [245]Harmless, Helpful330K&36KHuman&AIPoint-
IMDb [246]Movie Reviews25K&25KHumanSampleAccuracy
Reddit TL;DR [247]Summarization1330K&-HumanSampleWin Rate
Anthropic-HH [248]Harmless, Helpful161K&8.55KAIPairWin Rate
GSM8K [249]Mathematical Reasoning7.47K&1.32KHumanSampleAccuracy
AlpacaEval2 [250]Automatic Evaluation-&0.8KAISampleWin Rate
MT-Bench [251]Multi-Turn Question-&3.3KHumanPairWin Rate
AdvBench [252]Harmful Behaviors-&0.5KHumanSampleAttack Success
Arena-Hard [253]Updating Evaluation-&0.5KAISampleWin Rate
TruthfulQA [254]Truthful-&0.8KHumanPairAccuracy
IFEval [255]Instruction-Following-&0.5KHumanSampleAccuracy
BBH [256]Multistep Reasoning-&23 TasksHumanSampleAccuracy
MATH [257]Mathematical Reasoning7.5K&5KHumanSampleAccuracy
GPQA [258]Biology, Physics, and Chemistry-&0.45KHumanSampleAccuracy
MUSR [259]Multistep Reasoning-&0.76KAISampleAccuracy
MMLU-Pro [260]Language Understanding-&12KHuman&AISampleAccuracy
+ +probability of winning outputs and decreases that of losing outputs, whereas APO-down decreases both, but with a stronger decline for losing outputs. + +Another notable challenge related to likelihood collapse is likelihood over-optimization, where the performance of a model on a proxy metric (such as its own likelihood estimates) improves, while its true performance does not. Zhang and Ranganath [236] show that reductions in the likelihood loss of DPO do not necessarily translate into higher win rates. Similarly, Shi et al. [205] further investigates the problem of likelihood over-optimization in DPO, demonstrating that higher completion likelihoods do not necessarily correlate with better model performance and may even degrade it. This study identifies key indicators of over-optimization and highlights the need to balance likelihood optimization with output diversity. e-DPO [187] also shows that DPO can lead to degenerate policies due to overfitting, and proposes a solution using reward model distillation to regularize the implicit reward of the language model. The method trains the language model to match the probability distribution induced by a reward model and introduces a pessimistic extension to handle uncertainty in the reward model, thereby improving the robustness of DPO. + +(b) Alignment Tax. Alignment tax refers to the unintended consequence where improving a model's preference alignment degrades its general capabilities acquired during pretraining [206]. Thakkar et al. [207] demonstrate the sensitivity of DPO to training data composition, showing significantly worse performance degradation than SFT when using mixed-preference datasets. Furthermore, Chen et al. [209] identify that DPO struggles with optimizing ranking tasks. While DPO improves ranking accuracy, it disproportionately harms generative capabilities. Pentyala et al. [118] also observes capability forgetting during sequential training, where DPO objectives conflict with previously learned SFT patterns. To address this, researchers propose model merging strategies that balance alignment and performance. + +PAFT [118] separately trains SFT and DPO objectives on a pretrained model using distinct datasets, then merges the parameters through weighted averaging. Additionally, Lu et al. [208] proposes online merging optimizers, which integrate model merging into each optimization step of DPO to balance human preferences and basic capabilities. By merging gradients with parameter differences between SFT and pretrained models, these optimizers effectively enhance alignment while mitigating alignment tax. + +# 4 BENCHMARKS AND ANALYSIS + +In this section, we provide a comprehensive overview of existing benchmarks and evaluation for DPO methods. We first introduce the key datasets and benchmarks used to train or evaluate DPO models. We then present a comparative analysis of the performance of different DPO methods on these benchmarks, highlighting their strengths and limitations. + +# 4.1 Datasets and Benchmarks + +A diverse range of datasets and benchmarks has been specifically curated to facilitate research in DPO. Table 1 summarizes these datasets and benchmarks, highlighting their task descriptions, dataset sizes, data sources, data structures, and evaluation metrics. These datasets and benchmarks span a broad range of tasks, such as harmlessness and helpfulness evaluation and mathematical reasoning. They also exhibit significant diversity in scale, ranging from smaller, specialized datasets to large-scale collections such as SHP-2, which contains over 3.6 million samples. Additionally, datasets differ in their sources: some rely purely on human annotations, others on AI-generated content, and many adopt a hybrid approach combining human and AI-generated data. The data structures employed across these datasets include single-sample without preference label, point-wise annotations, pair-wise comparisons, and list-wise comparisons. Common evaluation metrics include accuracy + +TABLE 2: Experimental results of different DPO variants on Open LLM Leaderboard. The underline indicates the best performance. + +
ModelMistral-7B-BaseLLaMA-3-8B-Base
IFEvalBBHMATHGPQAMUSRMMLU-ProAVERAGEIFEvalBBHMATHGPQAMUSRMMLU-ProAVERAGE
SFT3.441.19.228.842.027.725.429.046.315.328.641.331.031.9
RRHF [190]10.040.61.726.446.326.125.231.046.813.931.436.830.531.7
SLiC-HF [230]11.044.09.929.242.628.127.541.749.517.530.439.731.735.1
DPO [74]11.143.77.128.543.826.726.834.348.217.231.940.131.533.9
IPO [75]9.442.89.729.739.727.826.535.349.015.932.841.431.934.4
CPO [116]8.042.79.628.942.127.326.432.446.916.830.639.131.832.9
KTO [67]12.943.712.028.946.128.328.640.248.318.031.040.131.134.8
ORPO [117]28.446.413.530.241.429.531.640.049.116.830.738.432.034.5
R-DPO [191]10.043.07.628.739.327.226.036.448.817.231.640.631.534.4
SimPO [166]11.143.18.428.939.527.226.440.848.615.831.040.531.834.7
ModelMistral-7B-InstructLLaMA-3-8B-Instruct
IFEvalBBHMATHGPQAMUSRMMLU-ProAVERAGEIFEvalBBHMATHGPQAMUSRMMLU-ProAVERAGE
SFT48.446.210.929.147.627.134.950.749.326.931.037.935.738.6
RRHF [190]45.245.310.128.544.226.233.351.349.327.229.639.535.338.7
SLiC-HF [230]39.446.211.428.749.026.833.641.650.926.331.339.235.337.4
DPO [74]49.045.611.026.946.126.834.248.950.125.829.438.736.038.2
IPO [75]42.645.311.827.849.327.234.050.449.526.329.637.935.738.2
CPO [116]38.846.010.128.548.426.933.150.649.126.831.338.135.838.6
KTO [67]46.245.710.927.846.027.334.043.150.126.331.238.135.037.3
ORPO [117]37.645.111.228.246.926.532.643.050.626.929.339.135.137.3
R-DPO [191]46.845.99.928.746.227.634.250.950.325.329.839.035.738.5
SimPO [166]45.445.910.428.345.027.133.748.849.225.029.339.235.137.8
+ +(for tasks like mathematical reasoning found in GSM8K and MATH), win rates derived from pairwise comparisons (such as MT-Bench and Anthropic-HH), and attack success rates used for assessing adversarial robustness (AdvBench). + +# 4.2 Results + +To demonstrate the effectiveness of different DPO variants, we conduct experiments on the Open LLM Leaderboard. We compare different DPO variants using Mistral-7B-Base, Mistral-7B-Instruct [261], LLaMA-3-8B-Base, and LLaMA-3-8B-Instruct [235] as starting points. The overall experimental setup follows Meng et al. [166], ensuring a reproducible evaluation of different DPO variants. For Mistral-7B-Base and LLaMA-3-8B-Base, the SFT models are trained based on the UltraChat-200k dataset [262], and subsequently applied different DPO variants on the SFT models using the UltraFeedback dataset [237]. For Mistral-7B-Instruct and LLaMA-3-8B-Instruct, which have already undergone instruction-tuning, the preference dataset is regenerated by collecting responses from the SFT models using prompts from the UltraFeedback dataset [237]. + +The experimental results, as summarized in Table 2, highlight the performance of different DPO variants across various benchmarks. For the Mistral-7B-Base and LLaMA-3-8B-Base models, ORPO consistently achieves the highest average scores, indicating its effectiveness in aligning models with human preferences. Notably, ORPO outperforms other methods on IFEval, BBH, and MATH, demonstrating its superiority in instruction-following and mathematical reasoning tasks. Meanwhile, SLiC-HF and KTO also achieve competitive results, particularly in BBH and GPQA, suggesting that these methods effectively leverage preference data for enhanced performance. For the Mistral-7B-Instruct and LLaMA-3-8B-Instruct models, the improvements across different DPO variants are more nuanced. While DPO and R-DPO show strong performance in IFEval and MMLU-Pro, IPO and CPO demonstrate robustness in handling complex reasoning tasks like MATH and GPQA. Overall, the results indicate that different DPO variants exhibit varying strengths across benchmarks, with some methods excelling in base models while others are more effective for instruct models. + +# 5 APPLICATIONS + +In this section, we discuss the applications of DPO in various domains, including different LLM-based applications, diffusion models, and multi-modal LLMs. We provide an overview of the key challenges and opportunities in each domain and highlight the potential impact of DPO on real-world applications. + +# 5.1 LLM-based Applications + +DPO has emerged as a powerful paradigm for aligning LLMs with human preferences across diverse applications [116, 235, 263, 264]. In code generation, DPO enhances control over code quality by optimizing based on preferences from automated tests [265, 266, 267]. In mathematical reasoning, DPO reduces errors in complex problem-solving by emphasizing step-level preference optimization [100, 101, 129, 268]. Multilingual applications leverage DPO to synchronize cross-lingual preferences, thereby improving translation accuracy and cultural relevance [107, 269]. Recommendation systems utilize DPO to refine personalization by incorporating user preference data to optimize item rankings, thereby enhancing the model ability to distinguish preferred items from less preferred ones [270, 271]. These examples highlight the adaptability of DPO in achieving human-aligned outputs across diverse tasks. + +# 5.2 Diffusion Models + +In the realm of diffusion models, DPO has been adapted to better align generated content with user expectations [272, 273, 274, 275]. By optimizing preferences over image-text pairs, DPO enhances the semantic accuracy of generated images and mitigates the production of undesirable or biased content. Studies have demonstrated that diffusion models fine-tuned with DPO respond more accurately to complex prompts compared to those trained with traditional techniques. Moreover, the efficiency of DPO allows for the fine-tuning of large-scale models using limited preference data, addressing significant computational challenges in training diffusion models [276, 277, 278]. While scaling DPO for high-resolution and dynamic content generation remains + +challenging, its ability to simplify reward modeling makes it a promising method for controlled content creation [279]. + +# 5.3 Multi-Modal LLMs + +For multi-modal LLMs, DPO plays a crucial role in aligning preferences across different data types, thereby improving coherence in tasks such as visual question answering and image captioning [89, 280, 281, 282, 283]. By optimizing alignment between textual responses and visual inputs, DPO reduces hallucinations in multi-modal interactions, ensuring outputs remain faithful to the given context. Although reconciling different types of feedback can be challenging, DPO offers a practical framework for lightweight adaptation, making it well-suited to preference-intensive multi-modal applications [280, 284, 285]. + +# 6 CHALLENGES AND FUTURE DIRECTIONS + +In this section, we discuss the key challenges and future directions in DPO research. We identify several critical issues that need to be addressed to further advance the field. Moreover, we propose several promising research directions that can help overcome these challenges and accelerate the adoption of DPO in the future. + +# 6.1 Efficient Preference Optimization + +Efficient preference optimization remains a pivotal challenge, as current DPO methods hinge on the availability of high-quality preference data, yet the manual collection of human annotations is both time-consuming and labor-intensive while automatically model-generated datasets often suffer from issues such as limited diversity, inherent biases, and insufficient fidelity to human judgment [121, 122, 128, 129]. Moreover, even though DPO circumvents the intricacies of reward model engineering common in RL, it does not fully leverage the exploratory strengths that RL methods offer, as evidenced by recent advances in reasoning approaches where RL-based training has achieved notable successes [18, 19]. This opens up an avenue for future research to not only enhance data efficiency through advanced learning techniques but also to integrate novel exploration mechanisms [138, 141], potentially through hybrid models that amalgamate the direct preference optimization benefits of DPO with the robust exploratory capabilities characteristic of RL. + +# 6.2 Multi-Modal Preference Optimization + +Multi-Modal Preference Optimization presents another frontier, given that existing DPO frameworks have primarily targeted text-based modalities while many real-world applications demand the alignment of diverse human preferences across text, images, audio, and even video [280, 284, 285, 286, 287]. In scenarios where cross-modal cues might conflict, such as the need for concise text paired with richly detailed imagery, the challenge lies in constructing a unified preference representation space that can intelligently and automatically recalibrate the priority of different modalities based on the contextual demands of the task at hand [89, 282, 283]. Future directions in this area could involve the development of innovative multi-modal preference encoding architectures, + +which are capable of disentangling compound preferences into modality-specific and cross-modal components that align conflicting preferences while also adapting dynamically to changing inputs. + +# 6.3 Continuous Preference Optimization + +Continuous preference optimization addresses the dynamic nature of human preferences that evolve over time or vary with different phases of a task, a factor that static DPO models often fail to capture [123, 135, 137, 185]. As social norms and individual preferences shift, there is an increasing need for systems that can continuously recalibrate their alignment strategies in real time while simultaneously mitigating the risk of catastrophic forgetting. Future research in this domain may focus on meta-learning approaches that enable models to learn not only from the current state of preferences but also how to efficiently adapt when these preferences change. By integrating online learning frameworks with mechanisms for detecting temporal shifts and contextual variability in user behavior, researchers can pave the way toward systems that remain consistently relevant and effective in the face of evolving societal and individual expectations. + +# 6.4 Interpretable Preference Optimization + +Interpretable preference optimization is critical for building trust in models that implicitly align human values, as the opaque nature of current DPO complicates the ability to audit and control the alignment process. In practice, human preferences are multi-dimensional [150, 151, 154], encompassing aspects such as factual accuracy, fairness, creativity, and beyond, and there is a pressing need to decompose these complex preferences into interpretable components that can be individually examined and fine-tuned. Future research could leverage advances in explainable techniques to develop models that not only achieve fine-grained alignment across diverse values but also provide transparent insights into how different preference dimensions interact to shape final decisions. This level of interpretability would allow stakeholders to balance competing values more effectively, ensuring that the alignment process remains both accountable and adaptable as societal norms continue to evolve. + +# 7 CONCLUSION + +In recent years, DPO has emerged as a promising paradigm for aligning LLMs with human preferences by directly optimizing model policies using preference data. Despite its potential, the DPO research landscape remains fragmented, with a lack of systematic organization and comparative analysis. In this survey, we present a comprehensive overview of DPO and introduce a novel taxonomy that categorizes existing works into four key dimensions: data strategy, learning framework, constraint mechanism, and model property. We have also discussed the key benchmarks, evaluation results, and applications of DPO, highlighting the challenges and future directions in this field. By providing a systematic analysis of the existing DPO methods, we aim to facilitate further research and development in this area. + +# REFERENCES + +[1] Wayne Xin Zhao et al. A survey of large language models. arXiv, 2023. +[2] Humza Naveed et al. A comprehensive overview of large language models. arXiv, 2023. +[3] Yupeng Chang et al. A survey on evaluation of large language models. TIIS, 2024. +[4] Shervin Minaee et al. Large language models: A survey. arXiv, 2024. +[5] Shukang Yin et al. A survey on multimodal large language models. arXiv, 2023. +[6] Duzhen Zhang et al. Mm-llms: Recent advances in multimodal large language models. ACL, 2024. +[7] Jingyi Zhang et al. Vision-language models for vision tasks: A survey. TPAMI, 2024. +[8] Zhehui Wang et al. Enabling energy-efficient deployment of large language models on memristor crossbar: A synergy of large and small. TPAMI, 2024. +[9] Hongru Wang et al. A survey of the evolution of language model-based dialogue systems. arXiv, 2023. +[10] Zihao Yi et al. A survey on recent advances in llm-based multi-turn dialogue systems. arXiv, 2024. +[11] Jiawei Liu et al. Is your code generated by chatgpt really correct? rigorous evaluation of large language models for code generation. NeurIPS, 2023. +[12] Daya Guo et al. Deepseek-coder: When the large language model meets programming-the rise of code intelligence. arXiv, 2024. +[13] Xue Jiang et al. Self-planning code generation with large language models. TOSEM, 2024. +[14] Dave Van Veen et al. Adapted large language models can outperform medical experts in clinical text summarization. Nature Medicine, 2024. +[15] Jesutofunmi A Omiye et al. Large language models in medicine: the potentials and pitfalls: a narrative review. Annals of Internal Medicine, 2024. +[16] Karan Singhal et al. Toward expert-level medical question answering with large language models. Nature Medicine, 2025. +[17] Fenglin Liu et al. Aligning, autoencoding and prompting large language models for novel disease reporting. TPAMI, 2025. +[18] Aaron Jaech et al. Openai o1 system card. arXiv, 2024. +[19] Daya Guo et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv, 2025. +[20] Julia Hirschberg and Christopher D Manning. Advances in natural language processing. Science, 2015. +[21] Xiaowei Huang et al. A survey of safety and trustworthiness of large language models through the lens of verification and validation. Artificial Intelligence Review, 2024. +[22] Yue Zhang et al. Siren's song in the ai ocean: a survey on hallucination in large language models. arXiv, 2023. +[23] Isabel O Gallegos et al. Bias and fairness in large language models: A survey. Computational Linguistics, 2024. +[24] Yufei Wang et al. Aligning large language models with human: A survey. arXiv, 2023. +[25] Yang Liu et al. Trustworthy llms: A survey and guideline for evaluating large language models' alignment. arXiv, 2023. +[26] Tianhao Shen et al. Large language model alignment: A survey. arXiv, 2023. +[27] Hannah Rose Kirk et al. The benefits, risks and bounds of personalizing the alignment of large language models to individuals. Nature Machine Intelligence, 2024. +[28] Usman Anwar et al. Foundational challenges in assuring alignment and safety of large language models. arXiv, 2024. +[29] Bofei Gao et al. Towards a unified view of preference learning for large language models: A survey. arXiv, 2024. +[30] Ruili Jiang et al. A survey on human preference learning for large language models. arXiv, 2024. +[31] Zhichao Wang et al. A comprehensive survey of llm alignment techniques: Rlhf, rlaif, ppo, dpo and more. arXiv, 2024. +[32] Genta Indra Winata et al. Preference tuning with human feedback on language, speech, and vision tasks: A survey. arXiv, 2024. +[33] Yue Huang et al. Position: TrustLLM: Trustworthiness in large language models. ICML, 2024. +[34] Paul F Christiano et al. Deep reinforcement learning from human preferences. NeurIPS, 2017. +[35] Long Ouyang et al. Training language models to follow instructions with human feedback. NeurIPS, 2022. +[36] Nisan Stiennon et al. Learning to summarize with human + +feedback. NeurIPS, 2020. +[37] Josh Achiam et al. Gpt-4 technical report. arXiv, 2023. +[38] Yuntao Bai et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv, 2022. +[39] Anthropic. The claude 3 model family: Opus, sonnet, haiku, 2024. +[40] Yuchun Miao et al. Inform: Mitigating reward hacking in rlhf via information-theoretic reward modeling. NeurIPS, 2024. +[41] Stephen Casper et al. Open problems and fundamental limitations of reinforcement learning from human feedback. arXiv, 2023. +[42] Keertana Chidambaram et al. Direct preference optimization with unobserved preference heterogeneity. arXiv, 2024. +[43] Haoxian Chen et al. Mallowspo: Fine-tune your llm with preference dispersions. arXiv, 2024. +[44] Shyam Sundhar Ramesh et al. Group robust preference optimization in reward-free rlhf. arXiv, 2024. +[45] Binwei Yao et al. No preference left behind: Group distributional preference optimization. *ICLR*, 2025. +[46] Afra Amini et al. Direct preference optimization with an offset. ACL Findings, 2024. +[47] Qi Gou and Cam-Tu Nguyen. Mixed preference optimization: Reinforcement learning with data selection and better reference model. arXiv, 2024. +[48] Shiqi Wang et al. Reward difference optimization for sample reweighting in offline RLHF. EMNLP Findings, 2024. +[49] Junkang Wu et al. $\alpha$ -dpo: Adaptive reward margin is what direct preference optimization needs. arXiv, 2024. +[50] Hiroki Furuta et al. Geometric-averaged preference optimization for soft preference labels. NeurIPS, 2024. +[51] Junkang Wu et al. Beta-dpo: Direct preference optimization with dynamic beta. NeurIPS, 2024. +[52] Tetsuro Morimura et al. Filtered direct preference optimization, EMNLP, 2024. +[53] Pulkit Pattnaik et al. Enhancing alignment using curriculum learning & ranked preferences. EMNLP, 2024. +[54] Ilgee Hong et al. Adaptive preference scaling for reinforcement learning with human feedback. NeurIPS, 2024. +[55] Dahiyun Kim et al. Sdpo: Don't use your data all at once. arXiv, 2024. +[56] Runsheng Yu et al. Direct alignment of language models via quality-aware self-refinement. arXiv, 2024. +[57] Lou Jieming et al. Gap-aware preference optimization: Enhancing model alignment with perception margin. OpenReview, 2024. +[58] Jingyuan Ma et al. Plug-and-play training framework for preference optimization. arXiv, 2024. +[59] Sayak Ray Chowdhury et al. Provably robust DPO: Aligning language models with noisy feedback. ICML, 2024. +[60] Keyi Kong et al. Perplexity-aware correction for robust alignment with noisy preferences. NeurIPS, 2024. +[61] Xize Liang et al. Ropo: Robust preference optimization for large language models. arXiv, 2024. +[62] Dongyoung Kim et al. Spread preference annotation: Direct preference judgment for efficient LLM alignment. ICLR, 2025. +[63] Lingfan Zhang et al. Combating inherent noise for direct preference optimization. OpenReview, 2025. +[64] Shawn Im and Yixuan Li. Understanding generalization of preference optimization under noisy feedback. OpenReview, 2025. +[65] Yang Gao et al. Impact of preference noise on the alignment performance of generative language models. COLM, 2024. +[66] Junkang Wu et al. Towards robust alignment of language models: Distributionally robustifying direct preference optimization. ICLR, 2024. +[67] Kawin Ethayarajh et al. Model alignment as prospect theoretic optimization. ICML, 2024. +[68] Seungjae Jung et al. Binary classifier optimization for large language model alignment. arXiv, 2024. +[69] Teng Xiao et al. Cal-dpo: Calibrated direct preference optimization for language model alignment. NeurIPS, 2024. +[70] Igor Melnyk et al. Distributional preference alignment of llms via optimal transport. NeurIPS, 2024. +[71] Tianchi Cai et al. Ulma: Unified language model alignment with human demonstration and point-wise preference. arXiv, 2023. +[72] Huayu Chen et al. Noise contrastive alignment of language models with explicit rewards. NeurIPS, 2024. +[73] Yifan Zhang et al. General preference modeling with preference representations for aligning language models. arXiv, 2024. +[74] Rafael Rafailov et al. Direct preference optimization: Your language model is secretly a reward model. NeurIPS, 2023. + +[75] Mohammad Gheshlaghi Azar et al. A general theoretical paradigm to understand learning from human preferences. AISTATS, 2024. +[76] Jinghong Chen et al. On extending direct preference optimization to accommodate ties. arXiv, 2024. +[77] Yuxin Jiang et al. Bridging and modeling correlations in pairwise data for direct preference optimization. arXiv, 2024. +[78] Xinghua Zhang et al. Iopo: Empowering llms with complex instruction following via input-output preference optimization. arXiv, 2024. +[79] Abbas Abdelmaleki et al. Preference optimization as probabilistic inference. ICLR, 2024. +[80] Yueqin Yin et al. Self-augmented preference optimization: Off-policy paradigms for language model alignment. arXiv, 2024. +[81] Shitong Duan et al. Negating negatives: Alignment with human negative samples via distributional dispreference optimization. arXiv, 2024. +[82] Ruiqi Zhang et al. Negative preference optimization: From catastrophic collapse to effective unlearning. COLM, 2024. +[83] Chongyu Fan et al. Simplicity prevails: Rethinking negative preference optimization for lmm unlearning. arXiv, 2024. +[84] Yifan Zhong et al. Panacea: Pareto alignment via preference adaptation for llms. NeurIPS, 2024. +[85] Tianqi Liu et al. Lipo: Listwise preference optimization through learning-to-rank, 2024. arXiv, 2024. +[86] Mingye Zhu et al. LIRE: listwise reward enhancement for preference alignment. ACL, 2024. +[87] Yang Zhao et al. Ordinal preference optimization: Aligning human preferences via ndcg. arXiv, 2024. +[88] Jiacong Zhou et al. Optimizing preference alignment with differentiable ndcg ranking. arXiv, 2024. +[89] Fei Wang et al. mDPO: Conditional preference optimization for multimodal large language models. EMNLP, 2024. +[90] Yueqin Yin et al. Relative preference optimization: Enhancing llm alignment through contrasting responses across identical and diverse prompts. arXiv, 2024. +[91] Yuxiang Guo et al. Todo: Enhancing llm alignment with ternary preferences. ICLR, 2024. +[92] Rafael Rafailov et al. From r to $q^*$ : Your language model is secretly a q-function. COLM, 2024. +[93] Yongcheng Zeng et al. Token-level direct preference optimization. ICML, 2024. +[94] Aiwei Liu et al. Tis-dpo: Token-level importance sampling for direct preference optimization with estimated weights. ICLR, 2024. +[95] Fenia Christopoulou et al. Sparsepo: Controlling preference alignment of llms via sparse token masks. arXiv, 2024. +[96] Han Zhong et al. Dpo meets ppo: Reinforced token optimization for rlhf. arXiv, 2024. +[97] Kailai Yang et al. Selective preference optimization via token-level reward function estimation. arXiv, 2024. +[98] Qi Zhao et al. EPO: hierarchical LLM agents with environment preference optimization. EMNLP, 2024. +[99] Ruichen Shao et al. Earlier tokens contribute more: Learning direct preference optimization from temporal decay perspective. *ICLR*, 2025. +[100] Xin Lai et al. Step-dpo: Step-wise preference optimization for long-chain reasoning of llms. arXiv, 2024. +[101] Zimu Lu et al. Step-controlled dpo: Leveraging stepwise error for enhanced mathematical reasoning. arXiv, 2024. +[102] Xuan Zhang et al. Chain of preference optimization: Improving chain-of-thought reasoning in llms. NeurIPS, 2024. +[103] Yuxi Xie et al. Monte carlo tree search boosts reasoning via iterative preference learning. arXiv, 2024. +[104] Weibin Liao et al. Tpo: Aligning large language models with multi-branch & multi-step preference trees. arXiv, 2024. +[105] Hoang Anh Just et al. Data-centric human preference optimization with rationales. arXiv, 2024. +[106] Jiacai Liu et al. Improving multi-step reasoning abilities of large language models with direct advantage policy optimization. arXiv, 2024. +[107] Shuaijie She et al. MAPO: advancing multilingual reasoning through multilingual-alignment-as-preference optimization. ACL, 2024. +[108] Lifan Yuan et al. Advancing llm reasoning generalists with preference trees. arXiv, 2024. +[109] Richard Yuanzhe Pang et al. Iterative reasoning preference optimization. NeurIPS, 2024. +[110] Chao-Wei Huang and Yun-Nung Chen. Factalign: Long-form + +factuality alignment of large language models. arXiv, 2024. +[111] Wei Xiong et al. Building math agents with multi-turn iterative preference learning. *ICLR*, 2025. +[112] Yifan Song et al. Trial and error: Exploration-based trajectory optimization for lIm agents. ACL, 2024. +[113] Aobo Kong et al. Sdpo: Segment-level direct preference optimization for social agents. arXiv, 2025. +[114] Pranav Putta et al. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv, 2024. +[115] Wentao Shi et al. Direct multi-turn preference optimization for language agents. EMNLP, 2024. +[116] Haoran Xu et al. Contrastive preference optimization: Pushing the boundaries of LLM performance in machine translation. ICML, 2024. +[117] Jiwoo Hong et al. ORPO: Monolithic preference optimization without reference model. EMNLP, 2024. +[118] Shiva Kumar Pentyala et al. Paft: A parallel training paradigm for effective llm fine-tuning. arXiv, 2024. +[119] Songyang Gao et al. Linear alignment: A closed-form solution for aligning human preferences without tuning and feedback. ICML, 2024. +[120] Feifan Song et al. Icdpo: Effectively borrowing alignment capability of others via in-context direct preference optimization. arXiv, 2024. +[121] Shangmin Guo et al. Direct language model alignment from online ai feedback. arXiv, 2024. +[122] Biqing Qi et al. Online dpo: Online direct preference optimization with fast-slow chasing. arXiv, 2024. +[123] Weizhe Yuan et al. Self-rewarding language models. ICML, 2024. +[124] Wenda Xu et al. BPO: Staying close to the behavior LLM creates better online LLM alignment. EMNLP, 2024. +[125] Saeed Khaki et al. RS-DPO: A hybrid rejection sampling and direct preference optimization method for alignment of large language models. NAACL, 2024. +[126] Tianqi Liu et al. Statistical rejection sampling improves preference optimization. ICLR, 2024. +[127] Ruizhe Shi et al. The crucial role of samplers in online direct preference optimization. *ICLR*, 2025. +[128] Lichang Chen et al. Optune: Efficient online preference tuning. arXiv, 2024. +[129] Tianduo Wang et al. Self-training with direct preference optimization improves chain-of-thought reasoning. ACL, 2024. +[130] Jiafan He et al. Accelerated preference optimization for large language model alignment. arXiv, 2024. +[131] Wei Xiong et al. Iterative preference learning from human feedback: Bridging theory and practice for RLHF under KL-constraint. ICML, 2024. +[132] Yixin Liu et al. Comal: A convergent meta-algorithm for aligning llms with general preferences. arXiv, 2024. +[133] Jing Xu et al. Some things are more cringe than others: Iterative preference optimization with the pairwise cringe loss. arXiv, 2024. +[134] Jongwoo Ko et al. Sera: Self-reviewing and alignment of large language models using implicit reward margins. *ICLR*, 2025. +[135] Zhaoyang Wang et al. Cream: Consistency regularized self-rewarding language models. *ICLR*, 2025. +[136] Prasann Singhal et al. D2PO: Discriminator-guided DPO with response evaluation models. COLM, 2024. +[137] Aiwei Liu et al. Direct large language model alignment through self-rewarding contrastive prompt distillation. ACL, 2024. +[138] Tengyang Xie et al. Exploratory preference optimization: Provably sample-efficient exploration in rlhf with general function approximation. *ICLR*, 2025. +[139] Shenao Zhang et al. Self-exploring language models: Active preference elicitation for online alignment. arXiv, 2024. +[140] Shicong Cen et al. Value-incentivized preference optimization: A unified approach to online and offline rlhf. *ICLR*, 2025. +[141] Chenjia Bai et al. Online preference alignment for language models via count-based exploration. *ICLR*, 2025. +[142] Yuda Song et al. The importance of online data: Understanding preference fine-tuning via coverage. NeurIPS, 2024. +[143] Yaojie Shen et al. Aipo: Improving training objective for iterative preference optimization. arXiv, 2024. +[144] Yunhao Tang et al. Understanding the performance gap between online and offline alignment algorithms. arXiv, 2024. +[145] Shusheng Xu et al. Is DPO superior to PPO for LLM alignment? A comprehensive study. ICML, 2024. +[146] William Muldrew et al. Active preference learning for large + +language models. ICML, 2024. +[147] Seola Choi et al. Active preference optimization via maximizing learning capacity. OpenReview, 2024. +[148] Kaixuan Ji et al. Reinforcement learning from human feedback with active queries. arXiv, 2024. +[149] Nirjhar Das et al. Active preference optimization for sample efficient rlhf. arXiv, 2024. +[150] Zhanhui Zhou et al. Beyond one-preference-fits-all alignment: Multi-objective direct preference optimization. ACL Findings, 2024. +[151] Xingzhou Lou et al. Spo: Multi-dimensional preference sequential alignment with implicit reward modeling. arXiv, 2024. +[152] Yu Zhang et al. MOSLIM: Align with diverse preferences in prompts through reward classification. OpenReview, 2025. +[153] Anirudhan Badrinath et al. Hybrid preference optimization: Aug-mentation direct preference optimization with auxiliary objectives. arXiv, 2024. +[154] Yiju Guo et al. Controllable preference optimization: Toward controllable multi-objective alignment. EMNLP, 2024. +[155] Abhijnan Nath et al. Simultaneous reward distillation and preference learning: Get you a language model who can do both. arXiv, 2024. +[156] Zixiang Chen et al. Self-play fine-tuning converts weak language models to strong language models. ICML, 2024. +[157] Yue Wu et al. Self-play preference optimization for language model alignment. ICLR, 2025. +[158]Gokul Swamy et al. A minimaximalist approach to reinforcement learning from human feedback. ICML, 2024. +[159] Lin Gui et al. Bonbon alignment for large language models and the sweetness of best-of-n sampling. NeurIPS, 2024. +[160] Remi Munos et al. Nash learning from human feedback. ICML, 2024. +[161] Corby Rosset et al. Direct nash optimization: Teaching language models to self-improve with general preferences. arXiv, 2024. +[162] Daniele Calandriello et al. Human alignment of large language models through online preference optimisation. ICML, 2024. +[163] Eugene Choi et al. Self-improving robust preference optimization. *ICLR*, 2025. +[164] Haoyan Yang et al. Dynamic noise preference optimization for llm self-improvement via synthetic data. arXiv, 2025. +[165] Alexey Gorbatovski et al. Learn your reference model for real good alignment. arXiv, 2024. +[166] Yu Meng et al. Simpo: Simple preference optimization with a reference-free reward. NeurIPS, 2024. +[167] Teng Xiao et al. SimPER: A minimalist approach to preference alignment without hyperparameters. *ICLR*, 2025. +[168] Yixin Liu et al. Understanding reference policies in direct preference optimization. arXiv, 2024. +[169] Chaoqi Wang et al. Beyond reverse kl: Generalizing direct preference optimization with diverse divergence constraints. *ICLR*, 2023. +[170] Stewart Slocum et al. Diverse preference learning for capabilities and alignment. ICLR, 2025. +[171] Amitava Das et al. Dpo kernels: A semantically-aware, kernel-enhanced, and divergence-rich paradigm for direct preference optimization. arXiv, 2025. +[172] Mingye Zhu et al. FlipGuard: Defending preference alignment against update regression with constrained optimization. EMNLP, 2024. +[173] Qingyu Yin et al. Direct preference optimization using sparse feature-level constraints. arXiv, 2024. +[174] Yunhao Tang et al. Generalized preference optimization: A unified approach to offline alignment. ICML, 2024. +[175] Haozhe Ji et al. Towards efficient exact optimization of language model alignment. ICML, 2024. +[176] Arsalan Sharifnassab et al. Soft preference optimization: Aligning language models to expert distributions. arXiv, 2024. +[177] Janghwan Lee et al. Improving conversational abilities of quantized large language models via direct preference alignment. ACL, 2024. +[178] Audrey Huang et al. Correcting the mythos of kl-regularization: Direct alignment without overoptimization via chi-squared preference optimization. arXiv, 2025. +[179] Geon-Hyeong Kim et al. SafeDPO: A simple approach to direct preference optimization with enhanced safety. OpenReview, 2025. +[180] Akifumi Wachi et al. Stepwise alignment for constrained language model policy optimization. NeurIPS, 2024. +[181] Zixuan Liu et al. Enhancing llm safety via constrained direct + +preference optimization. arXiv, 2024. +[182] San Kim and Gary Geunbae Lee. Adversarial dpo: Harnessing harmful data for reducing toxicity with minimal impact on coherence and evasiveness in dialogue agents. arXiv, 2024. +[183] Andrew Lee et al. A mechanistic understanding of alignment algorithms: a case study on dpo and toxicity. ICML, 2024. +[184] Yiming Zhang et al. Backtracking improves generation safety. ICLR, 2025. +[185] Seongho Son et al. Right now, wrong then: Non-stationary direct preference optimization under preference drift. arXiv, 2024. +[186] Eugene Choi et al. Self-improving robust preference optimization. *ICLR*, 2025. +[187] Adam Fisch et al. Robust preference optimization through reward model distillation. arXiv, 2024. +[188] Yong Lin et al. On the limited generalization capability of the implicit reward model induced by direct preference optimization. EMNLP Findings, 2024. +[189] Fahim Tajwar et al. Preference fine-tuning of llms should leverage suboptimal, on-policy data. ICML, 2024. +[190] Hongyi Yuan et al. Rrrh: Rank responses to align language models with human feedback. NeurIPS, 2023. +[191] Ryan Park et al. Disentangling length from quality in direct preference optimization. ACL Findings, 2024. +[192] Junru Lu et al. Eliminating biased length reliance of direct preference optimization via down-sampled KL divergence. EMNLP, 2024. +[193] Weizhe Yuan et al. Following length constraints in instructions. arXiv, 2024. +[194] Kian Ahrabian et al. The hitchhiker's guide to human alignment with* po. arXiv, 2024. +[195] Wei Liu et al. Length desensitization in directed preference optimization. arXiv, 2024. +[196] Guanzheng Chen et al. LongPO: Long context self-evolution of large language models through short-to-long preference optimization. ICLR, 2025. +[197] Prasann Singhal et al. A long way to go: Investigating length correlations in RLHF. COLM, 2024. +[198] Kyle Richardson et al. Understanding the logic of direct preference alignment through logic. arXiv, 2024. +[199] Karel D'Oosterlinck et al. Anchored preference optimization and contrastive revisions: Addressing underspecification in alignment. arXiv, 2024. +[200] Arka Pal et al. Smaug: Fixing failure modes of preference optimisation with dpo-positive. arXiv, 2024. +[201] Yuzi Yan et al. 3d-properties: Identifying challenges in DPO and charting a path forward. ICLR, 2025. +[202] Duanyu Feng et al. Towards analyzing and understanding the limitations of dpo: A theoretical perspective. arXiv, 2024. +[203] Hui Yuan et al. A common pitfall of margin-based language model alignment: Gradient entanglement. *ICLR*, 2025. +[204] Noam Razin et al. Unintentional unalignment: Likelihood displacement in direct preference optimization. arXiv, 2024. +[205] Zhengyan Shi et al. Understanding likelihood over-optimisation in direct alignment algorithms. arXiv, 2024. +[206] Yong Lin et al. Mitigating the alignment tax of RLHF. EMNLP, 2024. +[207] Megh Thakkar et al. A deep dive into the trade-offs of parameter-efficient preference alignment techniques. ACL, 2024. +[208] Keming Lu et al. Online merging optimizers for boosting rewards and mitigating tax in alignment. arXiv, 2024. +[209] Angelica Chen et al. Preference learning algorithms do not learn preference rankings. NeurIPS, 2024. +[210] Wenyi Xiao et al. A comprehensive survey of direct preference optimization: Datasets, theories, variants, and applications. arXiv, 2024. +[211] Pierre Harvey Richemond et al. Offline regularised reinforcement learning for large language models alignment. arXiv, 2024. +[212] Christian Wirth et al. A survey of preference-based reinforcement learning methods. JMLR, 2017. +[213] Jiaming Ji et al. Ai alignment: A comprehensive survey. arXiv, 2023. +[214] Xinpeng Wang et al. On the essence and prospect: An investigation of alignment approaches for big models. *IJCAI*, 2024. +[215] Hannah Rose Kirk et al. The past, present and better future of feedback learning in large language models for subjective human preferences and values. EMNLP, 2023. +[216] Patrick Fernandes et al. Bridging the gap: A survey on integrating + +(human) feedback for natural language generation. TACL, 2023. +[217] Timo Kaufmann et al. A survey of reinforcement learning from human feedback. arXiv, 2023. +[218] Ralph Allan Bradley and Milton E Terry. Rank analysis of incomplete block designs: I. the method of paired comparisons. Biometrika, 1952. +[219] John Schulman et al. Proximal policy optimization algorithms. arXiv, 2017. +[220] Arash Ahmadian et al. Back to basics: Revisiting reinforce style optimization for learning from human feedback in llms. ACL, 2024. +[221] Ziniu Li et al. ReMax: A simple, effective, and efficient reinforcement learning method for aligning large language models. ICML, 2024. +[222] Zhihong Shao et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv, 2024. +[223] Jian Hu. Reinforce++: A simple and efficient approach for aligning large language models. arXiv, 2025. +[224] Chris Lu et al. Discovering preference optimization algorithms with and for large language models. NeurIPS, 2024. +[225] Hanyang Zhao et al. RainbowPO: A unified framework for combining improvements in preference optimization. ICLR, 2025. +[226] Hamish Ivison et al. Unpacking dpo and ppo: Disentangling best practices for learning from preference feedback. NeurIPS, 2024. +[227] Amir Saeidi et al. Insights into alignment: Evaluating dpo and its variants across multiple tasks. arXiv, 2024. +[228] Andi Nika et al. Reward model learning vs. direct policy optimization: a comparative analysis of learning from human preferences. ICML, 2024. +[229] Ziniu Li et al. When is rl better than dpo in rlhf? a representation and optimization perspective. *ICLR Tiny Papers*, 2024. +[230] Yao Zhao et al. Slic-hf: Sequence likelihood calibration with human feedback. arXiv, 2023. +[231] Feifan Song et al. Preference ranking optimization for human alignment. AAAI, 2024. +[232] Chaoqi Wang et al. Preference optimization with multi-sample comparisons. arXiv, 2024. +[233] Ziniu Li et al. Policy optimization in rlhf: The impact of out-of-preference data. arXiv, 2023. +[234] Lei Li et al. Improving reasoning ability of large language models via iterative uncertainty-based preference optimization. OpenReview, 2025. +[235] Abhimanyu Dubey et al. The llama 3 herd of models. arXiv, 2024. +[236] Lily H Zhang and Rajesh Ranganath. Win rate is all that can matter from preference data alone. OpenReview, 2025. +[237] Ganqu Cui et al. Ultrafeedback: Boosting language models with high-quality feedback. ICML, 2023. +[238] Jiaming Ji et al. Pku-saferlhf: Towards multi-level safety alignment for llms with human preference. arXiv, 2024. +[239] Zhilin Wang et al. Helpsteer: Multi-attribute helpfulness dataset for steerlm. arXiv, 2023. +[240] Hunter Lightman et al. Let's verify step by step. ICLR, 2023. +[241] Kawin Ethayarajh et al. Understanding dataset difficulty with v-usable information. ICML, 2022. +[242] Banghua Zhu et al. Starling-7b: Improving llm helpfulness & harmlessness with rlaif, 2023. +[243] Wing Lian et al. Openorca: An open dataset of gpt augmented flan reasoning traces, 2023. +[244] Luigi Daniele and Suphavadeeprasit. Amplify-instruct: Synthetically generated diverse multi-turn conversations for efficient llm training., 2023. +[245] Jiaming Ji et al. Beavertails: Towards improved safety alignment of llm via a human-preference dataset. NeurIPS, 2023. +[246] Andrew Maas et al. Learning word vectors for sentiment analysis. ACL, 2011. +[247] Michael Volske et al. Tl; dr: Mining reddit to learn automatic summarization. EMNLP Workshop, 2017. +[248] Deep Ganguli et al. Red teaming language models to reduce harms: Methods, scaling behaviors, and lessons learned. arXiv, 2022. +[249] Karl Cobbe et al. Training verifiers to solve math word problems. arXiv, 2021. +[250] Yann Dubois et al. Length-controlled alpacaeval: A simple way to debias automatic evaluators. arXiv, 2024. +[251] Lianmin Zheng et al. Judging llm-as-a-judge with mt-bench and chatbot arena. NeurIPS, 2023. +[252] Andy Zou et al. Universal and transferable adversarial attacks on + +aligned language models. arXiv, 2023. +[253] Tianle Li et al. From live data to high-quality benchmarks: The arena-hard pipeline. 2024. +[254] Stephanie Lin et al. Truthfulqa: Measuring how models mimic human falsehoods. arXiv, 2021. +[255] Jeffrey Zhou et al. Instruction-following evaluation for large language models. arXiv, 2023. +[256] Mirac Suzgun et al. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv, 2022. +[257] Dan Hendrycks et al. Measuring mathematical problem solving with the math dataset. arXiv, 2021. +[258] David Rein et al. Gpqa: A graduate-level google-proof q&a benchmark. COLM, 2024. +[259] Zayne Sprague et al. Musr: Testing the limits of chain-of-thought with multistep soft reasoning. arXiv, 2023. +[260] Yubo Wang et al. Mmlu-pro: A more robust and challenging multi-task language understanding benchmark. NeurIPS, 2024. +[261] Fengqing Jiang et al. Identifying and mitigating vulnerabilities in llm-integrated applications. arXiv, 2023. +[262] Ning Ding et al. Enhancing chat language models by scaling high-quality instructional conversations. arXiv, 2023. +[263] Qiyu Wu et al. Word alignment as preference for machine translation. EMNLP, 2024. +[264] Yinghao Hu et al. Fine-tuning large language models for improving factuality in legal question answering. COLING, 2025. +[265] Leonidas Gee et al. Code-optimise: Self-generated preference data for correctness and efficiency. arXiv, 2024. +[266] Yibo Miao et al. Aligning codellms with direct preference optimization. arXiv, 2024. +[267] Kechi Zhang et al. Codedpo: Aligning code models with self generated and verified source code. arXiv, 2024. +[268] Guoxin Chen et al. Step-level value preference optimization for mathematical reasoning. EMNLP, 2024. +[269] Wen Lai et al. LLMs beyond English: Scaling the multilingual capability of LLMs with cross-lingual feedback. ACL Findings, 2024. +[270] Yuxin Chen et al. On softmax direct preference optimization for recommendation. NeurIPS, 2024. +[271] Zhuoxi Bai et al. Finetuning large language model for personalized ranking. arXiv, 2024. +[272] Yi Gu et al. Diffusion-rpo: Aligning diffusion models through relative preference optimization. arXiv, 2024. +[273] Shivanshu Shekhar et al. See-dpo: Self entropy enhanced direct preference optimization. arXiv, 2024. +[274] Shufan Li et al. Aligning diffusion models by optimizing human utility. NeurIPS, 2024. +[275] Navonil Majumder et al. Tango 2: Aligning diffusion-based text-to-audio generations through direct preference optimization. ACM MM, 2024. +[276] Bram Wallace et al. Diffusion model alignment using direct preference optimization. CVPR, 2024. +[277] Shentao Yang et al. A dense reward view on aligning text-to-image diffusion with preference. ICML, 2024. +[278] Kai Yang et al. Using human feedback to fine-tune diffusion models without any reward model. CVPR, 2024. +[279] Buhua Liu et al. Alignment of diffusion models: Fundamentals, challenges, and future. arXiv, 2024. +[280] Shengzhi Li et al. Multi-modal preference alignment remedies degradation of visual instruction tuning on language models. ACL, 2024. +[281] Ziqi Liang et al. AlignCap: Aligning speech emotion captioning to human preferences. EMNLP, 2024. +[282] Elmira Amirloo et al. Understanding alignment in multimodal llms: A comprehensive study. arXiv, 2024. +[283] Jinlan Fu et al. Chip: Cross-modal hierarchical direct preference optimization for multimodal llms. arXiv, 2025. +[284] Ruohong Zhang et al. Direct preference optimization of video large multimodal models from language model reward. arXiv, 2024. +[285] Yuxi Xie et al. V-DPO: Mitigating hallucination in large vision language models via vision-guided direct preference optimization. EMNLP Findings, 2024. +[286] Peng Xu et al. Lvlm-ehub: A comprehensive evaluation benchmark for large vision-language models. TPAMI, 2025. +[287] Zhongzhan Huang et al. A causality-aware paradigm for evaluating creativity of multimodal large language models. TPAMI, 2025. \ No newline at end of file diff --git a/data/2025/2503_11xxx/2503.11701/images/1936fc67f1754632849839cda874e2308bca436fc80fd4dd6aed4bbc4627af5b.jpg b/data/2025/2503_11xxx/2503.11701/images/1936fc67f1754632849839cda874e2308bca436fc80fd4dd6aed4bbc4627af5b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ffba56f3d3af574de75971d24f8b5be7da1b550 --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/1936fc67f1754632849839cda874e2308bca436fc80fd4dd6aed4bbc4627af5b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81667397f0865e40e7cc25e794bae488dd5ff60c0eb5c621c80787f40d53814e +size 9142 diff --git a/data/2025/2503_11xxx/2503.11701/images/2418677a52580d49c1eb7b14d07811b302eac74ef7cbf60a48100266ab6e3a8d.jpg b/data/2025/2503_11xxx/2503.11701/images/2418677a52580d49c1eb7b14d07811b302eac74ef7cbf60a48100266ab6e3a8d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..19ce4ce6b6ff711af17c267bba5d3e151bf180d7 --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/2418677a52580d49c1eb7b14d07811b302eac74ef7cbf60a48100266ab6e3a8d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:140fa4f7937f154c2c9c6d6e2d56df86eb2c63f2f15ed18bc6761b2fa538af18 +size 9980 diff --git a/data/2025/2503_11xxx/2503.11701/images/3fa55e266f94781e62d4d38b6635a107434b9ae21e066958ea4c68c25ad69067.jpg b/data/2025/2503_11xxx/2503.11701/images/3fa55e266f94781e62d4d38b6635a107434b9ae21e066958ea4c68c25ad69067.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2f8bc5ed7027d5160d5f31f03b9b94813d777c57 --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/3fa55e266f94781e62d4d38b6635a107434b9ae21e066958ea4c68c25ad69067.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7eb73c73c65868d8b8b8569b2a10b6a86c819dd8f5fd5f8e97714ccb9ae8f939 +size 10667 diff --git a/data/2025/2503_11xxx/2503.11701/images/51e607ac52e2b1be7f15ae667f9a6db4fcf4fe35b8a1acc7faace182fc1827a0.jpg b/data/2025/2503_11xxx/2503.11701/images/51e607ac52e2b1be7f15ae667f9a6db4fcf4fe35b8a1acc7faace182fc1827a0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f831027c5c3703e90215bbeff1baf32530e341ff --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/51e607ac52e2b1be7f15ae667f9a6db4fcf4fe35b8a1acc7faace182fc1827a0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b587c01739ad691fd0733ba5a0d2b6baee0fe11519959e4497d8862905bcbaa8 +size 6853 diff --git a/data/2025/2503_11xxx/2503.11701/images/522902a82b6177e867e4aff6c3488ea5e1a2da6b4a249cebc164f52549476e7d.jpg b/data/2025/2503_11xxx/2503.11701/images/522902a82b6177e867e4aff6c3488ea5e1a2da6b4a249cebc164f52549476e7d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5f72440576ea3f41cddebc628155f6476fc1d1d8 --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/522902a82b6177e867e4aff6c3488ea5e1a2da6b4a249cebc164f52549476e7d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe8ad2737654d04e5ceda615ffc61bd6dde4feb9f74c5bdf7832708d2f202c89 +size 183682 diff --git a/data/2025/2503_11xxx/2503.11701/images/6caf22a5ddff5209964e015b2e0c1943e8baf3c949727cf79a171be3e313ed4f.jpg b/data/2025/2503_11xxx/2503.11701/images/6caf22a5ddff5209964e015b2e0c1943e8baf3c949727cf79a171be3e313ed4f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4856a7736e4a2dc11e32b956dc403510d25f939f --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/6caf22a5ddff5209964e015b2e0c1943e8baf3c949727cf79a171be3e313ed4f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09e5c56d9d1d6936ba9d84d80df63caebbc5702421e8bed19d22f0ccbd54d561 +size 13196 diff --git a/data/2025/2503_11xxx/2503.11701/images/6f8f061ab915d05683da8b9488cffb80716ed95531ead04552cb45454c8c4d61.jpg b/data/2025/2503_11xxx/2503.11701/images/6f8f061ab915d05683da8b9488cffb80716ed95531ead04552cb45454c8c4d61.jpg new file mode 100644 index 0000000000000000000000000000000000000000..af87e63bcfd931e88dc1d86e99766d8f3e5803b5 --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/6f8f061ab915d05683da8b9488cffb80716ed95531ead04552cb45454c8c4d61.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:013d618d922c733937c4c83df327cb3dff556fa14b2b7fa2ded206b0f383268d +size 162467 diff --git a/data/2025/2503_11xxx/2503.11701/images/70b507821c7cbb1add794e8b294a02e6ae3b31264c7247b117d42453c522c637.jpg b/data/2025/2503_11xxx/2503.11701/images/70b507821c7cbb1add794e8b294a02e6ae3b31264c7247b117d42453c522c637.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3bc794dba6787dd4bd11af6444f486f5895a77ad --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/70b507821c7cbb1add794e8b294a02e6ae3b31264c7247b117d42453c522c637.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58ef39c7fc023d3a383dca0d7bccb820437e9d8080382df80bcb6808d5c12155 +size 6450 diff --git a/data/2025/2503_11xxx/2503.11701/images/7440338027ac4b3b46a5841636b860b019e625af6aaaf80166eaf81d38767941.jpg b/data/2025/2503_11xxx/2503.11701/images/7440338027ac4b3b46a5841636b860b019e625af6aaaf80166eaf81d38767941.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ecd866f0ff7cba7a6a039b3bf5e75373a497db06 --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/7440338027ac4b3b46a5841636b860b019e625af6aaaf80166eaf81d38767941.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0867dc5baebcd3ae7f839551d483efc3adf7cc41db03fb8a5632fe36801d6b9b +size 6827 diff --git a/data/2025/2503_11xxx/2503.11701/images/7d0b0c1f1f41595c2153dd474b54b2117d70052e28f86633cea08504dba7746a.jpg b/data/2025/2503_11xxx/2503.11701/images/7d0b0c1f1f41595c2153dd474b54b2117d70052e28f86633cea08504dba7746a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c3f64bad3f41d74ed53f92174b6fbd7753a1aef8 --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/7d0b0c1f1f41595c2153dd474b54b2117d70052e28f86633cea08504dba7746a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c22da215d1730571e58ce608ee020bf297b2a702a684dfaaef8abfbf3f3d261 +size 97930 diff --git a/data/2025/2503_11xxx/2503.11701/images/97110cbff41a540ea7df8e5cae0833d64c138b3ba8129d9b56e170f6769e5ff0.jpg b/data/2025/2503_11xxx/2503.11701/images/97110cbff41a540ea7df8e5cae0833d64c138b3ba8129d9b56e170f6769e5ff0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b7ae4305f419b0513b2a97cec12f76578e8633f --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/97110cbff41a540ea7df8e5cae0833d64c138b3ba8129d9b56e170f6769e5ff0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95506bd2fc87a94de87fbc8f1f14de08df184879baacc15b8f3769431f39b8b5 +size 8092 diff --git a/data/2025/2503_11xxx/2503.11701/images/9dc889c067202b098696ded0c0e6a5a38cbb79306dce5dcfd06361d0c4acaa7a.jpg b/data/2025/2503_11xxx/2503.11701/images/9dc889c067202b098696ded0c0e6a5a38cbb79306dce5dcfd06361d0c4acaa7a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d7cf2cfaff34aa77738e2911d22c4cf69662f4e1 --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/9dc889c067202b098696ded0c0e6a5a38cbb79306dce5dcfd06361d0c4acaa7a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c3aaa569d98dce09732614e4d5d368f4fce61ae669cb087285a670041c4301d +size 6905 diff --git a/data/2025/2503_11xxx/2503.11701/images/ab1cb06bdd578e7bee29a0deff07d4285154eb2afd39c5194af16ae72fc055ce.jpg b/data/2025/2503_11xxx/2503.11701/images/ab1cb06bdd578e7bee29a0deff07d4285154eb2afd39c5194af16ae72fc055ce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..00b1918091bbc0558dd98782690c643e75a1750e --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/ab1cb06bdd578e7bee29a0deff07d4285154eb2afd39c5194af16ae72fc055ce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c1505e018bc8d61bea3b76f52b3afb718ca958a8c45d2ab991709b5444ec9b8 +size 77739 diff --git a/data/2025/2503_11xxx/2503.11701/images/b4f2681503f528d1e7c0de33c6ba041c4462b7b9b983ced72c8fcdbec711851a.jpg b/data/2025/2503_11xxx/2503.11701/images/b4f2681503f528d1e7c0de33c6ba041c4462b7b9b983ced72c8fcdbec711851a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3593c97ef2ba11887bc3e0c06938939b83416664 --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/b4f2681503f528d1e7c0de33c6ba041c4462b7b9b983ced72c8fcdbec711851a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebd507e0c5664ed60909fda3fb2b4b44d17ac4b792f098f92965c570da45b373 +size 12480 diff --git a/data/2025/2503_11xxx/2503.11701/images/cddebe4de78a02102cffc5e93cf429138280b5deac316fd35c18e11122d7aa8e.jpg b/data/2025/2503_11xxx/2503.11701/images/cddebe4de78a02102cffc5e93cf429138280b5deac316fd35c18e11122d7aa8e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f0a15c4b531ff683d076fe2b8900148dddcb2db6 --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/cddebe4de78a02102cffc5e93cf429138280b5deac316fd35c18e11122d7aa8e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:993f2679106b5fab0cde8cf037ff081e2c6218d7681d769c8d35f3e2a0f8c59d +size 73026 diff --git a/data/2025/2503_11xxx/2503.11701/images/e5c5a6a0d780ea4b5c9a8a90db2723f6fdb6c09e14347e19016d55dcced76086.jpg b/data/2025/2503_11xxx/2503.11701/images/e5c5a6a0d780ea4b5c9a8a90db2723f6fdb6c09e14347e19016d55dcced76086.jpg new file mode 100644 index 0000000000000000000000000000000000000000..be836ba25f0715ee8b4950ebbf354bfedb1e2dc7 --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/e5c5a6a0d780ea4b5c9a8a90db2723f6fdb6c09e14347e19016d55dcced76086.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d9efdd5b5636c82cd2df98961a6e5906544b18cc9388abca400f4b7ee50751c +size 74136 diff --git a/data/2025/2503_11xxx/2503.11701/images/eae7a85ce86a95c4206bcb6c9511e719f11d3ccff68be6fc564d712b45aa1a2d.jpg b/data/2025/2503_11xxx/2503.11701/images/eae7a85ce86a95c4206bcb6c9511e719f11d3ccff68be6fc564d712b45aa1a2d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..61eea3b7991d301b4b6617ffd46f2b0616b48805 --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/eae7a85ce86a95c4206bcb6c9511e719f11d3ccff68be6fc564d712b45aa1a2d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aea5c7bca07bdf8714de096028073649eb6fba2285e610fdab2b2c8535543a69 +size 13997 diff --git a/data/2025/2503_11xxx/2503.11701/images/eec766babe90c18e92b263abf2f3723da32c85a958aae6f3eb98f4722ac37fa7.jpg b/data/2025/2503_11xxx/2503.11701/images/eec766babe90c18e92b263abf2f3723da32c85a958aae6f3eb98f4722ac37fa7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b4c3bd0427197e9378ba9dc15bae4796953fff0f --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/images/eec766babe90c18e92b263abf2f3723da32c85a958aae6f3eb98f4722ac37fa7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05337fb985304429d731fdae3e81cc2ba6506f264535bd07d69e03538dcfafa5 +size 317300 diff --git a/data/2025/2503_11xxx/2503.11701/layout.json b/data/2025/2503_11xxx/2503.11701/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..5d674cee777a3b429790bad65a327e9824cc2417 --- /dev/null +++ b/data/2025/2503_11xxx/2503.11701/layout.json @@ -0,0 +1,18973 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 75, + 53, + 534, + 80 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 53, + 534, + 80 + ], + "spans": [ + { + "bbox": [ + 75, + 53, + 534, + 80 + ], + "type": "text", + "content": "A Survey of Direct Preference Optimization" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 63, + 91, + 547, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 91, + 547, + 120 + ], + "spans": [ + { + "bbox": [ + 63, + 91, + 547, + 120 + ], + "type": "text", + "content": "Shunyu Liu, Wenkai Fang, Zetian Hu, Junjie Zhang, Yang Zhou, Kongcheng Zhang, Rongcheng Tu, Ting-En Lin, Fei Huang, Mingli Song, Yongbin Li, and Dacheng Tao, Fellow, IEEE" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 64, + 137, + 546, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 137, + 546, + 274 + ], + "spans": [ + { + "bbox": [ + 64, + 137, + 546, + 274 + ], + "type": "text", + "content": "Abstract—Large Language Models (LLMs) have demonstrated unprecedented generative capabilities, yet their alignment with human values remains critical for ensuring helpful and harmless deployments. While Reinforcement Learning from Human Feedback (RLHF) has emerged as a powerful paradigm for aligning LLMs with human preferences, its reliance on complex reward modeling introduces inherent trade-offs in computational efficiency and training stability. In this context, Direct Preference Optimization (DPO) has recently gained prominence as a streamlined alternative that directly optimizes LLMs using human preferences, thereby circumventing the need for explicit reward modeling. Owing to its theoretical elegance and computational efficiency, DPO has rapidly attracted substantial research efforts exploring its various implementations and applications. However, this field currently lacks systematic organization and comparative analysis. In this survey, we conduct a comprehensive overview of DPO and introduce a novel taxonomy, categorizing previous works into four key dimensions: data strategy, learning framework, constraint mechanism, and model property. We further present a rigorous empirical analysis of DPO variants across standardized benchmarks. Additionally, we discuss real-world applications, open challenges, and future directions for DPO. This work delivers both a conceptual framework for understanding DPO and practical guidance for practitioners, aiming to advance robust and generalizable alignment paradigms. All collected resources are available and will be continuously updated at https://github.com/liushunyu/awesome-direct-preference-optimization." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 64, + 282, + 537, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 282, + 537, + 293 + ], + "spans": [ + { + "bbox": [ + 64, + 282, + 537, + 293 + ], + "type": "text", + "content": "Index Terms—Alignment, Direct Preference Optimization, Large Language Models, Reinforcement Learning from Human Feedback." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 341, + 140, + 353 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 341, + 140, + 353 + ], + "spans": [ + { + "bbox": [ + 45, + 341, + 140, + 353 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 360, + 301, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 360, + 301, + 556 + ], + "spans": [ + { + "bbox": [ + 44, + 360, + 301, + 556 + ], + "type": "text", + "content": "The rapid advancement of Large Language Models (LLMs) has revolutionized artificial intelligence [1, 2, 3, 4, 5, 6, 7, 8], enabling unprecedented generative capabilities across diverse applications, such as dialogue systems [9, 10], code generation [11, 12, 13], and medical diagnosis [14, 15, 16, 17]. Models like OpenAI-o1 [18] and DeepSeekR1 [19] have demonstrated remarkable proficiency in understanding and generating human-like text, outperforming traditional language processing techniques [20]. However, their immense power also introduces significant risks: LLMs may inadvertently produce harmful content (e.g., jailbreak suggestion) [21], exhibit hallucination behaviors (e.g., misinformation) [22], or propagate sociocultural stereotypes (e.g., biased recommendations) [23]. Ensuring that these models align with human values (producing outputs that are helpful, harmless, and honest) has thus become a cornerstone of responsible AI development [24]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 556, + 301, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 556, + 301, + 581 + ], + "spans": [ + { + "bbox": [ + 45, + 556, + 301, + 581 + ], + "type": "text", + "content": "The critical challenge of aligning LLMs with human values stems from the inherent complexity of encoding abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 591, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 591, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 591, + 301, + 746 + ], + "type": "text", + "content": "This research is supported by the RIE2025 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) (Award I2301E0026), administered by A\\*STAR, as well as supported by Alibaba Group and NTU Singapore through Alibaba-NTU Global e-Sustainability CorpLab (ANGEL). (Corresponding author: Dacheng Tao.) Shunyu Liu, Junjie Zhang, Rongcheng Tu and Dacheng Tao are with Nanyang Technological University, Singapore (e-mail: shunyu.liu@ntu.edu.sg; junjie.zhang@ntu.edu.sg; turongcheng@gmail.com; dacheng.tao@ntu.edu.sg). Wenkai Fang, Yang Zhou, Kongcheng Zhang, and Mingli Song are with the College of Computer Science and Technology, Zhejiang University, China (e-mail: wenkfang@zju.edu.cn; imzhouyang@zju.edu.cn; zhangkc@zju.edu.cn; brooksong@zju.edu.cn). Zetian Hu is with the School of Aerospace Engineering, Tsinghua University, China (e-mail: huzt22@mails.tsinghua.edu.cn). Ting-En Lin, Fei Huang, and Yongbin Li are with the Tongyi Lab, Alibaba Group, China (e-mail: ting-en.lte@alibaba-inc.com; f.huang@alibaba-inc.com; shuide.lyb@alibaba-inc.com)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 342, + 566, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 342, + 566, + 480 + ], + "spans": [ + { + "bbox": [ + 307, + 342, + 566, + 480 + ], + "type": "text", + "content": "ethical principles into concrete model behaviors [25, 26, 27]. Traditional approaches, such as rule-based filtering or supervised learning on curated datasets, often prove inadequate due to their inability to generalize across diverse contexts and adapt to evolving societal norms [28]. The emergence of preference-based alignment paradigms addresses these limitations by framing the problem as optimizing for human feedback rather than inflexible heuristics [29, 30, 31, 32]. This shift recognizes that LLM decision-making often involves nuanced trade-offs between competing values, requiring flexible frameworks capable of incorporating subjective human preferences [33]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 481, + 565, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 481, + 565, + 734 + ], + "spans": [ + { + "bbox": [ + 308, + 481, + 565, + 734 + ], + "type": "text", + "content": "Building upon these insights, Reinforcement Learning from Human Feedback (RLHF) [34, 35] has emerged as the predominant alignment paradigm, leveraging human preferences to guide model optimization. In the RLHF pipeline, human annotators first rank the outputs generated by the language model, and these comparisons are used to train a reward model that quantifies human preferences. The language model is then fine-tuned using RL guided by this reward model, enabling the language model to align with human values by maximizing the predicted rewards. The success of RLHF in aligning models like ChatGPT [36, 37] and Claude [38, 39] underscores its practical utility. By translating subjective human preferences into an objective reward signal, RLHF facilitates the optimization of model behavior for value alignment. However, this RLHF paradigm suffers from critical limitations of computational complexity and training instability. Training a separate reward model demands substantial computational resources and high-quality human preference data, which scales poorly across different domains. Moreover, the RL phase often struggles with optimization challenges, such as reward hacking [40] and mode collapse [41]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 323, + 734, + 565, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 734, + 565, + 746 + ], + "spans": [ + { + "bbox": [ + 323, + 734, + 565, + 746 + ], + "type": "text", + "content": "These limitations have spurred interest in alternative" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 26, + 563, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 563, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 563, + 34 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 14, + 206, + 35, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 206, + 35, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 206, + 35, + 559 + ], + "type": "text", + "content": "arXiv:2503.11701v1 [cs.LG] 12 Mar 2025" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 43, + 563, + 473 + ], + "blocks": [ + { + "bbox": [ + 50, + 43, + 563, + 473 + ], + "lines": [ + { + "bbox": [ + 50, + 43, + 563, + 473 + ], + "spans": [ + { + "bbox": [ + 50, + 43, + 563, + 473 + ], + "type": "image", + "image_path": "eec766babe90c18e92b263abf2f3723da32c85a958aae6f3eb98f4722ac37fa7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 44, + 479, + 565, + 499 + ], + "lines": [ + { + "bbox": [ + 44, + 479, + 565, + 499 + ], + "spans": [ + { + "bbox": [ + 44, + 479, + 565, + 499 + ], + "type": "text", + "content": "Fig. 1: A taxonomy of DPO. We categorize existing DPO works into four branches: data strategy, learning framework, constraint mechanism, and model property. Different colored boxes indicate different categories and their corresponding representative references." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 44, + 508, + 303, + 739 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 508, + 303, + 739 + ], + "spans": [ + { + "bbox": [ + 44, + 508, + 303, + 739 + ], + "type": "text", + "content": "alignment methods that bypass reward modeling while preserving the benefits of preference-based learning. Direct Preference Optimization (DPO) [74, 210] represents a groundbreaking shift in this direction. Unlike RLHF, DPO reframes alignment as a supervised learning problem, directly optimizing the LLM policy using preference data without explicit reward modeling. By leveraging a closed-form mapping between reward functions and optimal policies, DPO eliminates the need for iterative RL training, reducing computational overhead and improving stability. Due to its inherent advantages, DPO has rapidly gained increasing attention from research communities. Existing studies vary widely in data strategies (e.g., point-wise v.s. pair-wise feedback) [67, 211], learning frameworks (e.g., offline v.s. online learning) [121, 122, 126], constraint mechanisms (e.g., different divergence constraints) [169, 171], and model properties (e.g., length bias) [191, 195]. Recent advancements in DPO variants have demonstrated remarkable efficacy in enhancing model alignment with human preferences, achieving unprecedented success across diverse domains [32]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 308, + 508, + 566, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 508, + 566, + 555 + ], + "spans": [ + { + "bbox": [ + 308, + 508, + 566, + 555 + ], + "type": "text", + "content": "These developments position DPO-based approaches as a compelling alternative to conventional RLHF paradigms for preference alignment tasks. However, despite its promise, the DPO research landscape remains fragmented." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 308, + 561, + 565, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 561, + 565, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 561, + 565, + 746 + ], + "type": "text", + "content": "Several surveys related to DPO have been published in recent years, yet they exhibit notable limitations in their scope and analysis of DPO. (1) Scope limitations. While an early survey of [212] presents a comprehensive overview of preference-based RL methods, it predates the advent of DPO and does not address its applications to modern LLMs. Recent surveys on alignment [24, 26, 213, 214] provide broad overviews of LLM alignment techniques but only offer cursory summaries of DPO-related approaches without in-depth analysis. Similarly, surveys on learning from human feedback [30, 215, 216, 217] also only briefly mention DPO as a potential alternative. (2) Taxonomy deficiencies. Gao et al. [29] and Winata et al. [32] introduce a simplified taxonomy for preference learning, while overlooking technical distinctions within its broad categorization. In contrast, Wang et al. [31] attempt to classify preference learning across dimensions" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 157 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 157 + ], + "type": "text", + "content": "such as reinforcement learning, reward modeling, feedback, and optimization. However, this taxonomy suffers from significant conceptual overlaps (e.g. reinforcement learning inherently involves optimization). A recent work by Xiao et al. [210] categorizes DPO studies through isolated research questions, which, while useful for problem identification, fragments the methodological connections. Our survey addresses these gaps by presenting the first comprehensive analysis specifically focused on DPO. The main contributions of this survey are summarized as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 160, + 301, + 380 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 51, + 160, + 301, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 160, + 301, + 252 + ], + "spans": [ + { + "bbox": [ + 51, + 160, + 301, + 252 + ], + "type": "text", + "content": "- In this survey, we introduce a novel taxonomy that categorizes existing DPO works into four key dimensions based on different components of the DPO loss: data strategy, learning framework, constraint mechanism, and model property, as shown in Fig. 1. This taxonomy provides a systematic framework for understanding the methodological evolution of DPO and highlights the key distinctions between different variations." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 253, + 301, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 253, + 301, + 311 + ], + "spans": [ + { + "bbox": [ + 51, + 253, + 301, + 311 + ], + "type": "text", + "content": "- We conduct a rigorous empirical analysis of DPO variants across standardized benchmarks, revealing critical insights into their performance in diverse scenarios. This analysis offers a comprehensive evaluation of DPO variants and provides practical guidance for practitioners." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 311, + 301, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 311, + 301, + 380 + ], + "spans": [ + { + "bbox": [ + 51, + 311, + 301, + 380 + ], + "type": "text", + "content": "- We discuss real-world applications of DPO and highlight its potential to democratize alignment research by enabling efficient and scalable preference learning across diverse domains. We also outline open challenges and future directions for DPO research, emphasizing the need for robust and generalizable alignment paradigms." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 44, + 383, + 301, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 383, + 301, + 487 + ], + "spans": [ + { + "bbox": [ + 44, + 383, + 301, + 487 + ], + "type": "text", + "content": "The remainder of this survey is organized as follows. Section 2 introduces the background and formulation of DPO. Section 3 presents a taxonomy of DPO, categorizing existing works based on key dimensions. Section 4 describes standardized benchmarks for evaluating DPO methods and presents empirical results. Section 5 discusses real-world applications of DPO and highlights its potential. Section 6 outlines open challenges and future directions for DPO research. Finally, Section 7 concludes the survey." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 508, + 235, + 519 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 508, + 235, + 519 + ], + "spans": [ + { + "bbox": [ + 45, + 508, + 235, + 519 + ], + "type": "text", + "content": "2 BACKGROUND AND FORMULATION" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 525, + 302, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 525, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 525, + 302, + 748 + ], + "type": "text", + "content": "Preference learning aims to train language model policies to generate responses that better align with human preferences. Specifically, we denote the language model policy as " + }, + { + "bbox": [ + 44, + 525, + 302, + 748 + ], + "type": "inline_equation", + "content": "\\pi(y|x)" + }, + { + "bbox": [ + 44, + 525, + 302, + 748 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 44, + 525, + 302, + 748 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 44, + 525, + 302, + 748 + ], + "type": "text", + "content": " represents the input prompt and " + }, + { + "bbox": [ + 44, + 525, + 302, + 748 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 44, + 525, + 302, + 748 + ], + "type": "text", + "content": " is a candidate response (completion). A language model can be viewed as an autoregressive function that sequentially predicts tokens based on prior context. Mathematically, this is expressed as: " + }, + { + "bbox": [ + 44, + 525, + 302, + 748 + ], + "type": "inline_equation", + "content": "\\pi(y|x) = \\prod_{t=1}^{T} \\pi(y_t | y_{ 0" + }, + { + "bbox": [ + 307, + 619, + 565, + 689 + ], + "type": "text", + "content": " is a hyperparameter that controls the strength of the Kullback-Leibler (KL) divergence penalty. Here, the term " + }, + { + "bbox": [ + 307, + 619, + 565, + 689 + ], + "type": "inline_equation", + "content": "\\log \\pi_{\\theta}(\\cdot |x) / \\pi_{\\mathrm{ref}}(\\cdot |x)" + }, + { + "bbox": [ + 307, + 619, + 565, + 689 + ], + "type": "text", + "content": " represents the KL divergence between the current policy " + }, + { + "bbox": [ + 307, + 619, + 565, + 689 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 307, + 619, + 565, + 689 + ], + "type": "text", + "content": " and a reference policy " + }, + { + "bbox": [ + 307, + 619, + 565, + 689 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathrm{ref}}" + }, + { + "bbox": [ + 307, + 619, + 565, + 689 + ], + "type": "text", + "content": ". In practice, the reference policy " + }, + { + "bbox": [ + 307, + 619, + 565, + 689 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathrm{ref}}" + }, + { + "bbox": [ + 307, + 619, + 565, + 689 + ], + "type": "text", + "content": " is set to the SFT model " + }, + { + "bbox": [ + 307, + 619, + 565, + 689 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathrm{sft}}" + }, + { + "bbox": [ + 307, + 619, + 565, + 689 + ], + "type": "text", + "content": ", ensuring that the updated policy remains close to the initial model." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 689, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 689, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 689, + 566, + 748 + ], + "type": "text", + "content": "To optimize the above objective, Proximal Policy Optimization (PPO) [219] has emerged as a promising RL algorithm for LLMs. PPO stabilizes training by constraining policy updates within a trust region via a clipped objective, which prevents significant deviations from the previous" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 135 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 135 + ], + "type": "text", + "content": "policy. However, PPO requires an additional critic model to estimate value functions for advantage calculation, thereby introducing extra computational and memory overhead. To address this, recent methods, such as RLOO [220], ReMax [221], GRPO [222], and Reinforce++ [223], introduce critic-free advantage estimation to reduce resource demands while maintaining stable optimization, making them more scalable for large-scale LLM training." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 147, + 209, + 159 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 147, + 209, + 159 + ], + "spans": [ + { + "bbox": [ + 45, + 147, + 209, + 159 + ], + "type": "text", + "content": "2.2 Direct Preference Optimization" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 161, + 300, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 161, + 300, + 243 + ], + "spans": [ + { + "bbox": [ + 44, + 161, + 300, + 243 + ], + "type": "text", + "content": "DPO offers an alternative that streamlines the training process by directly optimizing the policy with preference data [74, 224, 225, 226, 227, 228, 229], thereby eliminating the need for explicit reward modeling in RLHF. The key idea of DPO is a closed-form solution of Eq. 3 that connects reward with the optimal policies. Specifically, the optimal policy corresponding to a given " + }, + { + "bbox": [ + 44, + 161, + 300, + 243 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 44, + 161, + 300, + 243 + ], + "type": "text", + "content": " is defined as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 247, + 301, + 273 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 247, + 301, + 273 + ], + "spans": [ + { + "bbox": [ + 82, + 247, + 301, + 273 + ], + "type": "interline_equation", + "content": "\\pi^ {*} (y | x) = \\frac {1}{Z (x)} \\pi_ {\\mathrm {r e f}} (y | x) \\exp \\left(\\frac {1}{\\beta} r (x, y)\\right), \\tag {4}", + "image_path": "9dc889c067202b098696ded0c0e6a5a38cbb79306dce5dcfd06361d0c4acaa7a.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 276, + 249, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 276, + 249, + 288 + ], + "spans": [ + { + "bbox": [ + 44, + 276, + 249, + 288 + ], + "type": "text", + "content": "where the partition function " + }, + { + "bbox": [ + 44, + 276, + 249, + 288 + ], + "type": "inline_equation", + "content": "Z(x)" + }, + { + "bbox": [ + 44, + 276, + 249, + 288 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 91, + 292, + 301, + 320 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 292, + 301, + 320 + ], + "spans": [ + { + "bbox": [ + 91, + 292, + 301, + 320 + ], + "type": "interline_equation", + "content": "Z (x) = \\sum_ {y} \\pi_ {\\mathrm {r e f}} (y | x) \\exp \\left(\\frac {1}{\\beta} r (x, y)\\right). \\tag {5}", + "image_path": "70b507821c7cbb1add794e8b294a02e6ae3b31264c7247b117d42453c522c637.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 324, + 301, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 324, + 301, + 348 + ], + "spans": [ + { + "bbox": [ + 44, + 324, + 301, + 348 + ], + "type": "text", + "content": "By rearranging the above equation, the reward " + }, + { + "bbox": [ + 44, + 324, + 301, + 348 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 44, + 324, + 301, + 348 + ], + "type": "text", + "content": " can be recovered from the optimal policy " + }, + { + "bbox": [ + 44, + 324, + 301, + 348 + ], + "type": "inline_equation", + "content": "\\pi^{*}" + }, + { + "bbox": [ + 44, + 324, + 301, + 348 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 91, + 352, + 301, + 377 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 352, + 301, + 377 + ], + "spans": [ + { + "bbox": [ + 91, + 352, + 301, + 377 + ], + "type": "interline_equation", + "content": "r (x, y) = \\beta \\log \\frac {\\pi^ {*} (y | x)}{\\pi_ {\\operatorname {r e f}} (y | x)} + \\beta \\log Z (x). \\tag {6}", + "image_path": "7440338027ac4b3b46a5841636b860b019e625af6aaaf80166eaf81d38767941.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 380, + 301, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 380, + 301, + 428 + ], + "spans": [ + { + "bbox": [ + 44, + 380, + 301, + 428 + ], + "type": "text", + "content": "Notice that the partition function " + }, + { + "bbox": [ + 44, + 380, + 301, + 428 + ], + "type": "inline_equation", + "content": "Z(x)" + }, + { + "bbox": [ + 44, + 380, + 301, + 428 + ], + "type": "text", + "content": " depends only on the prompt " + }, + { + "bbox": [ + 44, + 380, + 301, + 428 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 44, + 380, + 301, + 428 + ], + "type": "text", + "content": ". By substituting this expression into the preference model of Eq. 1, the preference probability model that " + }, + { + "bbox": [ + 44, + 380, + 301, + 428 + ], + "type": "inline_equation", + "content": "y_{w}" + }, + { + "bbox": [ + 44, + 380, + 301, + 428 + ], + "type": "text", + "content": " is preferred over " + }, + { + "bbox": [ + 44, + 380, + 301, + 428 + ], + "type": "inline_equation", + "content": "y_{l}" + }, + { + "bbox": [ + 44, + 380, + 301, + 428 + ], + "type": "text", + "content": " becomes:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 431, + 301, + 466 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 431, + 301, + 466 + ], + "spans": [ + { + "bbox": [ + 52, + 431, + 301, + 466 + ], + "type": "interline_equation", + "content": "P \\left(y _ {w} \\succ y _ {l} | x\\right) = \\sigma \\left(\\beta \\log \\frac {\\pi^ {*} \\left(y _ {w} \\mid x\\right)}{\\pi_ {\\text {r e f}} \\left(y _ {w} \\mid x\\right)} - \\beta \\log \\frac {\\pi^ {*} \\left(y _ {l} \\mid x\\right)}{\\pi_ {\\text {r e f}} \\left(y _ {l} \\mid x\\right)}\\right). \\tag {7}", + "image_path": "3fa55e266f94781e62d4d38b6635a107434b9ae21e066958ea4c68c25ad69067.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 44, + 466, + 301, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 466, + 301, + 502 + ], + "spans": [ + { + "bbox": [ + 44, + 466, + 301, + 502 + ], + "type": "text", + "content": "Based on the above preference probability model, DPO directly optimizes the language mode policy " + }, + { + "bbox": [ + 44, + 466, + 301, + 502 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 44, + 466, + 301, + 502 + ], + "type": "text", + "content": " by minimizing the following negative log-likelihood loss function:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 510, + 301, + 564 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 510, + 301, + 564 + ], + "spans": [ + { + "bbox": [ + 47, + 510, + 301, + 564 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\mathrm {D P O}} (\\theta) = \\\\ - \\mathbb {E} _ {(x, y _ {w}, y _ {l}) \\sim \\mathcal {D}} \\left[ \\log \\sigma \\left(\\beta \\log \\frac {\\pi_ {\\theta} (y _ {w} | x)}{\\pi_ {\\text {r e f}} (y _ {w} | x)} - \\beta \\log \\frac {\\pi_ {\\theta} (y _ {l} | x)}{\\pi_ {\\text {r e f}} (y _ {l} | x)}\\right) \\right], \\tag {8} \\\\ \\end{array}", + "image_path": "6caf22a5ddff5209964e015b2e0c1943e8baf3c949727cf79a171be3e313ed4f.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 44, + 568, + 301, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 568, + 301, + 651 + ], + "spans": [ + { + "bbox": [ + 44, + 568, + 301, + 651 + ], + "type": "text", + "content": "where the KL constraint is implicitly integrated through the use of the reference model " + }, + { + "bbox": [ + 44, + 568, + 301, + 651 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathrm{ref}}" + }, + { + "bbox": [ + 44, + 568, + 301, + 651 + ], + "type": "text", + "content": ". By minimizing this DPO loss, we directly train the policy to satisfy human preferences without resorting to a separate reward modeling stage or using reinforcement learning optimization as in RLHF, significantly reducing implementation complexity while improving training stability." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 44, + 662, + 208, + 674 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 662, + 208, + 674 + ], + "spans": [ + { + "bbox": [ + 44, + 662, + 208, + 674 + ], + "type": "text", + "content": "2.3 Other Preference Optimization" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 44, + 677, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 677, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 677, + 301, + 748 + ], + "type": "text", + "content": "In addition to DPO, several concurrent preference optimization methods [190, 230, 231] have been proposed that offer alternative approaches to RLHF. These methods explore different strategies for optimizing LLMs to align with human preference without RL. Below, we provide a brief introduction to these approaches." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 309, + 42, + 480, + 53 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 42, + 480, + 53 + ], + "spans": [ + { + "bbox": [ + 309, + 42, + 480, + 53 + ], + "type": "text", + "content": "2.3.1 Sequence Likelihood Calibration" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 55, + 564, + 101 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 55, + 564, + 101 + ], + "spans": [ + { + "bbox": [ + 308, + 55, + 564, + 101 + ], + "type": "text", + "content": "Zhao et al. [230] propose Sequence Likelihood Calibration with Human Feedback (SLiC-HF) to directly align LLMs with human preferences. Specifically, the loss function of SLiC-HF is defined as follows:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 318, + 109, + 564, + 137 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 109, + 564, + 137 + ], + "spans": [ + { + "bbox": [ + 318, + 109, + 564, + 137 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\mathrm {S L i C - H F}} (\\theta) = \\max (0, \\delta - \\log \\pi_ {\\theta} (y _ {w} | x) + \\log \\pi_ {\\theta} (y _ {l} | x)) \\\\ - \\lambda \\log \\pi_ {\\theta} (y ^ {*} | x), \\tag {9} \\\\ \\end{array}", + "image_path": "1936fc67f1754632849839cda874e2308bca436fc80fd4dd6aed4bbc4627af5b.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 140, + 565, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 140, + 565, + 210 + ], + "spans": [ + { + "bbox": [ + 308, + 140, + 565, + 210 + ], + "type": "text", + "content": "where the first term is the rank calibration loss with " + }, + { + "bbox": [ + 308, + 140, + 565, + 210 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 308, + 140, + 565, + 210 + ], + "type": "text", + "content": " as a margin hyperparameter, and the second term is the cross-entropy regularization loss with " + }, + { + "bbox": [ + 308, + 140, + 565, + 210 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 308, + 140, + 565, + 210 + ], + "type": "text", + "content": " as a regularization weight. " + }, + { + "bbox": [ + 308, + 140, + 565, + 210 + ], + "type": "inline_equation", + "content": "y^{*}" + }, + { + "bbox": [ + 308, + 140, + 565, + 210 + ], + "type": "text", + "content": " is obtained from either high-quality supervised responses in the SFT dataset or the top-ranked candidate response generated by the SFT model." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 309, + 216, + 528, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 216, + 528, + 227 + ], + "spans": [ + { + "bbox": [ + 309, + 216, + 528, + 227 + ], + "type": "text", + "content": "2.3.2 Rank Responses to Align Human Feedback" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 228, + 565, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 228, + 565, + 322 + ], + "spans": [ + { + "bbox": [ + 307, + 228, + 565, + 322 + ], + "type": "text", + "content": "Yuan et al. [190] introduce Rank Responses to align Human Feedback (RRHF) for LLMs. RRHF extends pair-wise ranking by considering the list-wise ranking order of multiple responses, thus better utilizing the preference information. For an input prompt " + }, + { + "bbox": [ + 307, + 228, + 565, + 322 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 307, + 228, + 565, + 322 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 307, + 228, + 565, + 322 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 307, + 228, + 565, + 322 + ], + "type": "text", + "content": " candidate responses " + }, + { + "bbox": [ + 307, + 228, + 565, + 322 + ], + "type": "inline_equation", + "content": "\\{y_i\\}_{i=1}^N" + }, + { + "bbox": [ + 307, + 228, + 565, + 322 + ], + "type": "text", + "content": ", it optimizes the model to assign higher probabilities to higher-ranked responses via a ranking loss and directly supervises the best response using cross-entropy as follows:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 318, + 327, + 564, + 371 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 327, + 564, + 371 + ], + "spans": [ + { + "bbox": [ + 318, + 327, + 564, + 371 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\mathrm {R R H F}} (\\theta) = \\sum_ {r _ {i} < r _ {j}} \\max \\left(0, \\frac {\\log \\pi_ {\\theta} (y _ {i} | x)}{| | y _ {i} | |} - \\frac {\\log \\pi_ {\\theta} (y _ {j} | x)}{| | y _ {j} | |}\\right) \\\\ - \\lambda \\log \\pi_ {\\theta} \\left(y ^ {*} \\mid x\\right), \\tag {10} \\\\ \\end{array}", + "image_path": "b4f2681503f528d1e7c0de33c6ba041c4462b7b9b983ced72c8fcdbec711851a.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 375, + 564, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 375, + 564, + 434 + ], + "spans": [ + { + "bbox": [ + 308, + 375, + 564, + 434 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 308, + 375, + 564, + 434 + ], + "type": "inline_equation", + "content": "r_i = r_\\phi(x, y_i)" + }, + { + "bbox": [ + 308, + 375, + 564, + 434 + ], + "type": "text", + "content": " represents the reward of the response " + }, + { + "bbox": [ + 308, + 375, + 564, + 434 + ], + "type": "inline_equation", + "content": "y_i" + }, + { + "bbox": [ + 308, + 375, + 564, + 434 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 375, + 564, + 434 + ], + "type": "inline_equation", + "content": "y^* = \\arg \\max_{y_i} r_i" + }, + { + "bbox": [ + 308, + 375, + 564, + 434 + ], + "type": "text", + "content": " is the response with the highest reward. Although RRHF avoids the need for reinforcement learning in RLHF, it still utilizes a reward model " + }, + { + "bbox": [ + 308, + 375, + 564, + 434 + ], + "type": "inline_equation", + "content": "r_\\phi" + }, + { + "bbox": [ + 308, + 375, + 564, + 434 + ], + "type": "text", + "content": " to rank candidate responses based on human preferences." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 309, + 439, + 484, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 439, + 484, + 451 + ], + "spans": [ + { + "bbox": [ + 309, + 439, + 484, + 451 + ], + "type": "text", + "content": "2.3.3 Preference Ranking Optimization" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 453, + 565, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 453, + 565, + 510 + ], + "spans": [ + { + "bbox": [ + 308, + 453, + 565, + 510 + ], + "type": "text", + "content": "Similarly, Song et al. [231] propose Preference Ranking Optimization (PRO) to align LLMs with human preferences by leveraging multiple responses " + }, + { + "bbox": [ + 308, + 453, + 565, + 510 + ], + "type": "inline_equation", + "content": "\\{y_{i}\\}_{i = 1}^{N}" + }, + { + "bbox": [ + 308, + 453, + 565, + 510 + ], + "type": "text", + "content": " with the human-annotated order " + }, + { + "bbox": [ + 308, + 453, + 565, + 510 + ], + "type": "inline_equation", + "content": "y_{1} > y_{2} > \\dots >y_{N}" + }, + { + "bbox": [ + 308, + 453, + 565, + 510 + ], + "type": "text", + "content": ". The loss function of PRO is defined as follows:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 512, + 565, + 559 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 512, + 565, + 559 + ], + "spans": [ + { + "bbox": [ + 316, + 512, + 565, + 559 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {P R O}} (\\theta) = - \\sum_ {i = 1} ^ {N - 1} \\log \\frac {\\exp \\left(\\frac {1}{\\| y _ {i} \\|} \\log \\pi_ {\\theta} \\left(y _ {i} | x\\right) / \\mathcal {T} _ {i} ^ {i}\\right)}{\\sum_ {j = i} ^ {N} \\exp \\left(\\frac {1}{\\| y _ {j} \\|} \\log \\pi_ {\\theta} \\left(y _ {j} | x\\right) / \\mathcal {T} _ {i} ^ {j}\\right)}, \\tag {11}", + "image_path": "eae7a85ce86a95c4206bcb6c9511e719f11d3ccff68be6fc564d712b45aa1a2d.jpg" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 559, + 564, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 559, + 564, + 630 + ], + "spans": [ + { + "bbox": [ + 308, + 559, + 564, + 630 + ], + "type": "text", + "content": "where the dynamic penalty temperature is defined as " + }, + { + "bbox": [ + 308, + 559, + 564, + 630 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_i^j = 1 / (r_\\phi (x,y^j) - r_\\phi (x,y^i))" + }, + { + "bbox": [ + 308, + 559, + 564, + 630 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 308, + 559, + 564, + 630 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_i^i = \\min_{i < j}\\mathcal{T}_i^j" + }, + { + "bbox": [ + 308, + 559, + 564, + 630 + ], + "type": "text", + "content": ". This temperature ensures that the probability gap between higher-ranked and lower-ranked responses is adaptively scaled according to their reward differences, thereby stabilizing the optimization process." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 309, + 638, + 441, + 650 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 638, + 441, + 650 + ], + "spans": [ + { + "bbox": [ + 309, + 638, + 441, + 650 + ], + "type": "text", + "content": "3 A TAXONOMY OF DPO" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 654, + 565, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 654, + 565, + 746 + ], + "spans": [ + { + "bbox": [ + 307, + 654, + 565, + 746 + ], + "type": "text", + "content": "In this section, we introduce a novel taxonomy that categorizes existing DPO works based on four key dimensions: data strategy, learning framework, constraint mechanism, and model property. As illustrated in Fig. 1, these four dimensions are derived from different components of the DPO loss, providing a systematic framework for understanding the methodological evolution of DPO and highlighting the key distinctions between different variations." + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 41, + 167, + 54 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 41, + 167, + 54 + ], + "spans": [ + { + "bbox": [ + 45, + 41, + 167, + 54 + ], + "type": "text", + "content": "3.1 Data Strategy of DPO" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 44, + 58, + 301, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 58, + 301, + 118 + ], + "spans": [ + { + "bbox": [ + 44, + 58, + 301, + 118 + ], + "type": "text", + "content": "The data strategy constitutes the foundational pillar of DPO, focusing on how to leverage diverse types of preference data for training LLMs. As shown in Fig. 2, our taxonomy identifies three principal axes of data strategy: quality, feedback, and granularity." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 129, + 132, + 141 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 129, + 132, + 141 + ], + "spans": [ + { + "bbox": [ + 45, + 129, + 132, + 141 + ], + "type": "text", + "content": "3.1.1 Data Quality" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 144, + 301, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 144, + 301, + 226 + ], + "spans": [ + { + "bbox": [ + 44, + 144, + 301, + 226 + ], + "type": "text", + "content": "The quality of preference data is a critical factor in determining the effectiveness of DPO training. High-quality data ensures that LLMs effectively learn to align with human preferences, while low-quality data may introduce noise and bias, leading to suboptimal model performance. We categorize data quality considerations into three key aspects: heterogeneity, distinguishability, and noise." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 227, + 302, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 227, + 302, + 539 + ], + "spans": [ + { + "bbox": [ + 44, + 227, + 302, + 539 + ], + "type": "text", + "content": "(a) Data Heterogeneity. Conventional DPO methods assume uniform human preferences when annotating data, thereby overlooking the diversity among annotators. This assumption often skews the model toward the preferences of the majority while neglecting minority viewpoints, potentially leading to biases and unfair treatment of underrepresented groups. To address this issue, Chidambaram et al. [42] propose EM-DPO, which learns the distribution of different preference types and their corresponding response strategies. Building on this, they introduce the MinMax-DPO algorithm, which selects a strategy by minimizing the maximum regret across subgroups, ensuring a more balanced representation of preferences among all groups. MallowsPO [43] decomposes the implicit rewards in DPO into prompt dispersion and response scaling rewards. It introduces a novel objective function to capture human preferences for diverse responses to the same prompt. GRPO [44] formulates an objective function that minimizes the loss for the worst-case group, thereby ensuring fairness by prioritizing the disadvantaged groups in the optimization process. GDPO [45] models the language generation process as a combination of belief distribution prediction and belief-based response generation. The corresponding GDPO loss function consists of belief calibration loss and belief-conditioned preference alignment loss. The former encourages the model to capture the diversity of beliefs across groups, while the latter ensures that generated responses align with the given belief." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 539, + 302, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 539, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 539, + 302, + 748 + ], + "type": "text", + "content": "(b) Data Distinguishability. A key limitation of DPO is its inability to account for the distinguishability of preference between responses [46, 50, 51, 56, 57]. In some cases, the preferred response is only marginally better than the dispreferred one, while in others, the dispreferred response contains harmful or misleading content, making it significantly worse. Thus, optimization should focus more on cases with substantial preference differences while reducing the effort spent on minor differences. However, most existing methods treat all samples equally, ignoring this data distinguishability. To address this, ODPO [46] introduces a monotonically increasing offset function, requiring the reward of the preferred response to exceed that of the dispreferred one by a certain margin. This ensures stronger updates for larger preference gaps. Similarly, Ada-DPO [54] introduces an instance-specific nonlinear scaling parameter, assigning larger weights to strong preference pairs and smaller weights to ambiguous ones based on the reward differences, thereby capturing" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 313, + 44, + 563, + 296 + ], + "blocks": [ + { + "bbox": [ + 313, + 44, + 563, + 296 + ], + "lines": [ + { + "bbox": [ + 313, + 44, + 563, + 296 + ], + "spans": [ + { + "bbox": [ + 313, + 44, + 563, + 296 + ], + "type": "image", + "image_path": "7d0b0c1f1f41595c2153dd474b54b2117d70052e28f86633cea08504dba7746a.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 361, + 304, + 512, + 316 + ], + "lines": [ + { + "bbox": [ + 361, + 304, + 512, + 316 + ], + "spans": [ + { + "bbox": [ + 361, + 304, + 512, + 316 + ], + "type": "text", + "content": "Fig. 2: An overview of DPO data strategy." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 327, + 566, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 327, + 566, + 628 + ], + "spans": [ + { + "bbox": [ + 307, + 327, + 566, + 628 + ], + "type": "text", + "content": "different levels of data distinguishability. DPO-rc [48] also incorporates the preference reward difference as a coefficient in the loss function. " + }, + { + "bbox": [ + 307, + 327, + 566, + 628 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 307, + 327, + 566, + 628 + ], + "type": "text", + "content": "-DPO [49] introduces an adaptive preference distribution to obtain dynamic reward margins based on the distribution difference between the policy and reference models. " + }, + { + "bbox": [ + 307, + 327, + 566, + 628 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 307, + 327, + 566, + 628 + ], + "type": "text", + "content": "-DPO [51] analyzes the optimal " + }, + { + "bbox": [ + 307, + 327, + 566, + 628 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 307, + 327, + 566, + 628 + ], + "type": "text", + "content": " parameter for datasets with different reward margins, which dynamically adjusts " + }, + { + "bbox": [ + 307, + 327, + 566, + 628 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 307, + 327, + 566, + 628 + ], + "type": "text", + "content": " based on batch-level reward differences. They also introduce " + }, + { + "bbox": [ + 307, + 327, + 566, + 628 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 307, + 327, + 566, + 628 + ], + "type": "text", + "content": "-guided data filtering to prioritize valuable training data. Curri-DPO [53] sorts preference pairs by reward differences and trains progressively from large to small differences, enabling curricular learning. Similarly, MPO [47] utilizes a reward model to score responses generated by the SFT model, constructing a preference dataset and partitioning it based on preference differences to learn from simple to complex tasks. sDPO [55] computes reward accuracy for different datasets based on an initial target model and partitions the dataset in descending order of accuracy, allowing the model to first optimize on simpler samples. Ma et al. [58] propose a preference dataset construction method that adjusts update weights based on response accuracy, assigning lower weights when the model demonstrates higher proficiency. Furthermore, fDPO [52] enhances DPO training by filtering out samples where the generated response of the model policy surpasses the preferred dataset response in reward score." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 631, + 565, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 631, + 565, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 631, + 565, + 746 + ], + "type": "text", + "content": "(c) Data Noise. Human-generated preference annotations often contain inconsistencies, errors, or noise, negatively affecting the performance of DPO. Such noisy data can mislead models, impairing their ability to accurately capture true preferences and generalize effectively to unseen data. Im and Li [64] analyze how noisy feedback influences the generalization performance of preference optimization, showing that increased noise results in higher generalization risks. Specifically, standard DPO loss functions can yield biased estimates under noisy conditions. To address this" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 42, + 301, + 423 + ], + "spans": [ + { + "bbox": [ + 47, + 42, + 301, + 423 + ], + "type": "text", + "content": "issue, rDPO [59] proposes to enhance DPO robustness against noisy annotations and improve overall training performance. Zhang et al. [63] introduce a noise-aware strategy leveraging annotator confidence and stability to identify and downweight noisy samples during training. They also propose an adaptive reward margin, emphasizing clean samples to improve learning effectiveness. Complementary to these approaches, PerpCorrect [60] employs a data-driven method to correct noisy annotations directly in the dataset. It trains a proxy language model on both clean and noisy samples, distinguishing noise through perplexity differences to improve dataset quality. To systematically explore noise effects, Gao et al. [65] artificially inject various noise types (e.g., Gaussian noise) into datasets, controlling noise intensity via hyperparameters. Their analysis highlights how noise impacts model alignment, guiding future research towards mitigating such negative effects. To address the vulnerability of DPO in noisy environments, ROPO [61] introduces a regularization term to enhance noise tolerance. Additionally, ROPO employs a robust-guided rejection sampling technique. This technique supplements the dataset with samples that contribute minimally to the loss, thereby improving the overall data quality. Kim et al. [62] propose the SPA framework, using model-generated responses and associated confidence scores to detect noise in annotations. SPA further incorporates smoothing techniques into the loss function to alleviate the noise problem. Finally, Wu et al. [66] categorize noise into two types: point noise (single annotation errors) and pairwise noise (errors between annotated pairs). While DPO naturally handles point noise well, it struggles with pairwise noise. Their proposed Dr. DPO introduces a novel loss function explicitly designed for robustness against both point and pairwise noise." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 433, + 168, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 433, + 168, + 443 + ], + "spans": [ + { + "bbox": [ + 47, + 433, + 168, + 443 + ], + "type": "text", + "content": "3.1.2 Preference Feedback" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 446, + 300, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 446, + 300, + 538 + ], + "spans": [ + { + "bbox": [ + 47, + 446, + 300, + 538 + ], + "type": "text", + "content": "Preference feedback refers to the label signals provided by annotators regarding their preferences for different responses. It can be categorized into point-wise, pair-wise, and list-wise feedback. Point-wise feedback evaluates each response independently, assigning a score or labeling it as positive or negative. Pair-wise feedback compares two responses to determine which one is preferred, while list-wise feedback ranks multiple responses." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 539, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 539, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 539, + 301, + 746 + ], + "type": "text", + "content": "(a) Point-Wise Feedback. Point-wise feedback is the basic form of feedback. It refers to the type of feedback where individual outputs or samples are evaluated independently, rather than through comparisons with other outputs. This form of feedback is characterized by its simplicity and directness, focusing on the quality or relevance of a single response or item. The predominant methodology in RLHF [35] employs point-wise reward signals generated by reward models to optimize policy models. Similarly, KTO [67] directly maximizes the utility of model generations using loss functions based on prospect theory rather than the log-likelihood of preferences. It requires only a binary signal indicating whether an output is desirable or undesirable for a given input. Furthermore, BCO [68] builds upon the concepts introduced in KTO and explores a new approach to aligning with binary signals. While KTO focuses on optimizing human utility, BCO introduces a binary classifier framework incorporating reward shift and distribution matching that implicitly" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 311, + 42, + 563, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 42, + 563, + 272 + ], + "spans": [ + { + "bbox": [ + 311, + 42, + 563, + 272 + ], + "type": "text", + "content": "minimizes the DPO loss. Chen et al. [72] and GPO [73] adopt explicit rewards using Noise Contrastive Alignment (NCA) and General Preference Model (GRM) respectively, and then directly optimize language model policies from point-wise preference data with rewards. However, some methods leverage implicit reward signals to refine model behaviors. To ensure that the learned implicit rewards are comparable to the ground-truth rewards, Cal-DPO [69] introduces a calibration term to the preference optimization objective, which prevents the likelihood of chosen responses from decreasing during training. ULMA [71] unifies human demonstration and point-wise preference data into a single framework and handles positive and negative samples with a hybrid objective function. Unlike them, DRO [211] adopts a simple mean-squared objective to optimize the model policy and value function jointly for a single trajectory. Additionally, AOT [70] casts the distributional preference constraint as an optimal transport problem with a convex cost function. The key idea is to minimize the violation of stochastic dominance using a smooth, convex cost function." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 311, + 274, + 563, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 274, + 563, + 746 + ], + "spans": [ + { + "bbox": [ + 311, + 274, + 563, + 746 + ], + "type": "text", + "content": "(b) Pair-Wise Feedback. Pair-wise feedback focuses on comparing pairs of data or actions to determine their relative quality or preference. Building upon the theoretical framework of RLHF, DPO implements this paradigm through the utilization of pair-wise preference data, thereby fitting an implicit reward model. Azar et al. [75] introduces a general theoretical framework to unify existing RLHF and DPO methods. The proposed Identity-Preference Optimization (IPO) directly optimizes policies from preferences without relying on reward modeling or the Bradley-Terry assumption, thereby avoiding overfitting issues observed in DPO. Subsequently, DPO-RK and DPO-R [76] integrate the Rao-Kupper and Davidson models into the DPO training objective respectively, thereby extending the capabilities of DPO by explicitly modeling ties in pairwise comparisons. BMC [77] further addresses a key limitation of the weak correlation between winning and losing responses in pairwise data. Specifically, BMC uses \"Bridging\" to enhance the correlation between winning and losing responses by increasing the consistency and informativeness of pairwise preference signals. However, previous attempts for aligning LLMs primarily focus on optimizing the model's output preferences given an instruction, which struggles to effectively perceive the fine-grained constraints within complex instructions. Thus IOPO [78] extends traditional alignment methods by considering both input and output preferences to better understand the constraints within the instructions. As current methods rely heavily on paired preference data (i.e., explicitly labeled preferred vs. dispreferred examples), they can be limiting in scenarios where such paired data is unavailable or insufficient. SAPO [80] addresses this issue based on the concept of self-play, which enhances data exploration and exploitation by automatically generating negative samples and integrating off-policy learning. Furthermore, PMPO [79] extends the EM algorithm to incorporate both preferred and dispreferred outcomes. By introducing the probability distribution of dis-preferred outcomes, PMPO can optimize using both types of samples, even when only negative feedback is available. Similarly, D2O [81] avoids harmful information by maximizing the discrepancy between the generated responses and the negative samples. NPO [82]" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 299, + 99 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 42, + 299, + 99 + ], + "spans": [ + { + "bbox": [ + 47, + 42, + 299, + 99 + ], + "type": "text", + "content": "and SimNPO [83] achieve the goal of forgetting the negative impact by regulating the model's prediction probabilities on negative datasets to be as minimal as possible, where SimNPO further eliminates the reference model bias issue inherent in NPO." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 99, + 300, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 99, + 300, + 620 + ], + "spans": [ + { + "bbox": [ + 47, + 99, + 300, + 620 + ], + "type": "text", + "content": "(c) List-Wise Feedback. List-wise feedback refers to the type of feedback where multiple outputs or responses generated by the model for a given input are evaluated collectively as a list. This approach considers the relative ranking or ordering among the outputs, rather than focusing on individual outputs in isolation. Panacea [84] reframes alignment as a Multi-Dimensional Preference Optimization (MDPO) problem and introduces a method that aims to learn the entire Pareto front to accommodate diverse user preferences. In short, Panacea is designed to adapt a single model to list-wise preferences in a Pareto-optimal manner. LiPO [85] and LIRE [86] also treat LM alignment as a list-wise ranking problem, drawing on the rich literature of Learning-To-Rank (LTR). Specifically, LiPO introduces a specific method LiPO-λ, which leverages a list-wise ranking objective that weights each preference pair based on the difference in ranking metrics; while LIRE optimizes the response probability by calculating the exponential probability distribution and uses the reward model to directly guide the optimization process. To better capture the relative proximity within ordinal multiple responses, OPO [87] utilizes the Normalized Discounted Cumulative Gain (NDCG), a widely used ranking metric, to optimize the model's generation probability to match the permutation of responses based on these labels. Similarly, DRPO [88] leverages NDCG as a key metric to optimize the ranking of model outputs. However, DRPO incorporates novel elements like diffNDCG and Adaptive Rank Policy Score to dynamically adjust the score margins between preferred and non-preferred responses based on their ranking positions. mDPO [232] extends preference optimization to multi-sample comparisons and introduces a framework that evaluates and optimizes the collective properties of sample groups. It not only addresses the limitations of single pair-wise methods but also provides a more robust optimization framework, especially for characteristics like diversity and bias. Furthermore, RPO [90] introduces a contrastive weighting mechanism that constructs a contrast matrix within each mini-batch to compare preferred and less-preferred responses across prompts. The weights of these comparisons are dynamically adjusted based on the semantic similarity between prompts. Additionally, TODO [91] integrates a tie ranking system into list-wise preference modeling, significantly improving the capture of nuances of human preferences, especially in the presence of noisy or inconsistent labels and frequent ties." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 629, + 174, + 640 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 629, + 174, + 640 + ], + "spans": [ + { + "bbox": [ + 47, + 629, + 174, + 640 + ], + "type": "text", + "content": "3.1.3 Preference Granularity" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 643, + 299, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 643, + 299, + 711 + ], + "spans": [ + { + "bbox": [ + 47, + 643, + 299, + 711 + ], + "type": "text", + "content": "Preference granularity refers to the granularity of preference labels, which determines the level at which preferences are assigned to data. It can be categorized into token-level, step-level, sentence-level, and turn-level granularity, ranging from fine-grained focus on individual tokens to broader preferences over entire interaction turns." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 712, + 299, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 712, + 299, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 712, + 299, + 746 + ], + "type": "text", + "content": "(a) Token-Level Granularity. Token-level alignment operates at the character/subword unit of text generation, providing the finest-grained control over model outputs" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 310, + 42, + 563, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 42, + 563, + 411 + ], + "spans": [ + { + "bbox": [ + 310, + 42, + 563, + 411 + ], + "type": "text", + "content": "Theoretically, Rafailov et al. [92] demonstrate that DPO can represent any dense reward function by reparameterizing it as an optimal advantage function, which allows DPO to optimize policies in the token-level MDP effectively TDPO [93] refines the alignment process from the sentence level to the token level and introduces forward KL divergence constraints. TDPO utilizes the Bradley-Terry model to convert sentence-level preference comparisons into a token-level reward system, which allows the model to dynamically adjust its strategy at each token generation step. Furthermore, TIS-DPO[94] estimates the importance weights of tokens based on the differences in prediction probabilities from contrastive LLMs, performing token-level importance sampling on existing data to approximate optimal distribution by assigning weights to each token based on its reward. Moreover, " + }, + { + "bbox": [ + 310, + 42, + 563, + 411 + ], + "type": "inline_equation", + "content": "\\mathrm{D}^2\\mathrm{PO}" + }, + { + "bbox": [ + 310, + 42, + 563, + 411 + ], + "type": "text", + "content": " [99] proposes a temporal decay mechanism that dynamically adjusts the contribution of each token-level reward based on its position in the sequences. Unlike these, SparsePO [95] directly learns sparse masks during the training process and controls which tokens are more important for preferences through the sparsity of the masks, thereby achieving dynamic optimization. RTO [96] and SePO [97] first learn a token-level reward function from preference data using DPO, and then RTO optimizes PPO based on this reward signal, while SePO selects key tokens through the estimated reward function. To tackle the need for large-scale annotated data in training, EPO [98] proposes a hierarchical framework that decomposes complex tasks into manageable subgoals using separate LLMs for subgoal prediction and low-level action generation, leveraging environment feedback to automatically generate reward signals and preference data for aligning LLMs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 310, + 412, + 564, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 412, + 564, + 503 + ], + "spans": [ + { + "bbox": [ + 310, + 412, + 564, + 503 + ], + "type": "text", + "content": "To conclude, token-level granularity optimizes models at individual token positions to maximize expected objectives, preserving semantic precision and capturing local syntactic dependencies. However, it increases computational complexity, as processing numerous tokens extends training time, and its sensitivity to noise means errors in a single token can affect the entire sequence. Thus, careful loss function design and regularization are essential for stability." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 310, + 504, + 564, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 504, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 504, + 564, + 746 + ], + "type": "text", + "content": "(b) Step-level Granularity. Step-level granularity focuses on the intermediate steps or stages in a process, particularly effective for complex problem-solving tasks requiring multiple intermediate steps. Step-DPO [100] and SCDPO [101] treat individual reasoning steps as the basic units for preference optimization, where preference pairs of correct and incorrect steps are generated using LLMs. Furthermore, CPO [102] and MCTS-DPO [103] first utilize more powerful inference structures to generate multiple candidate thoughts at each reasoning step following the Tree-of-Thought (ToT) and Monte Carlo Tree Search (MCTS) respectively, and construct preference pairs based on the selected and unselected intermediate steps. Then they finetune LLMs to generate reasoning steps preferred by ToT during inference using DPO. TPO [104] proposes a preference learning algorithm specifically designed for preference trees that have multiple branches and multi-step responses, and introduces the adaptive step reward mechanism to address the issue of small reward margins caused by shared subtrajectories. It adjusts the reward values for each step based on semantic similarity, helping the model better distinguish" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 169 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 169 + ], + "type": "text", + "content": "between preference pairs. RDPO [105] extends traditional preference datasets to incorporate a rationale field, which explains why a particular response is preferred. RDPO introduces rationale information into the DPO loss function by maximizing the likelihood of both the preference and the rationale, which allows the model to better understand the logic behind preferences during training. To address the challenges of sparse rewards and training instability, DAPO [106] uses a critic function to generate dense signals for policy optimization and trains the actor and critic independently to avoid instability." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 44, + 170, + 301, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 170, + 301, + 331 + ], + "spans": [ + { + "bbox": [ + 44, + 170, + 301, + 331 + ], + "type": "text", + "content": "To conclude, step-level alignment demonstrates unique advantages in multi-step reasoning tasks by decomposing holistic preferences into intermediate decision points. The primary strength of step-level granularity lies in its capacity to decompose complex objectives into verifiable subgoals, enhancing both interpretability and robustness. For instance, in mathematical reasoning, LLMs can receive feedback on equation derivation steps before final answers, reducing error propagation. However, this granularity still have two key challenges: first, the need for precise step segmentation, which may require domain-specific heuristics or auxiliary models to delineate reasoning boundaries; second, the risk of local optima, where over-optimization of individual steps degrades global coherence." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 331, + 301, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 331, + 301, + 550 + ], + "spans": [ + { + "bbox": [ + 44, + 331, + 301, + 550 + ], + "type": "text", + "content": "(c) Sentence-level Granularity. Sentence-level granularity aligns preferences at the complete utterance level, balancing fine-grained control and computational efficiency. This granularity, represented by the original DPO framework, operates on full response sequences as atomic units for preference comparison. MAPO [107] uses a well-trained translation model to calculate alignment scores between answers in nondominant and dominant languages and then employs preference optimization methods to enhance reasoning consistency. EURUS [108] structures each instruction as a preference tree, containing pairs of correct and incorrect actions to facilitate preference learning. Similarly, IRPO [109] focuses on improving the reasoning capabilities of LLMs through an iterative preference optimization on constructed preference pairs such that the winning response has a higher reward than the losing response. FACTALIGN [110] proposes a fine-grained, sentence-level alignment algorithm called fKTO, which extends the KTO method to leverage fine-grained factuality assessments at the sentence level." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 550, + 301, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 550, + 301, + 643 + ], + "spans": [ + { + "bbox": [ + 44, + 550, + 301, + 643 + ], + "type": "text", + "content": "To conclude, the key strength of sentence-level granularity lies in its capacity to preserve holistic semantics while maintaining tractable optimization complexity. Nevertheless, we must carefully consider task requirements. While suitable for short-form generation and classification tasks, sentence-level methods may insufficiently capture fine-grained stylistic nuances or long-range dependencies critical in generation and reasoning domains." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 643, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 643, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 643, + 301, + 746 + ], + "type": "text", + "content": "(d) Turn-level Granularity. Turn-level granularity focuses on the optimization of model behavior at the level of conversational turns, which is particularly relevant for dialogue systems and interactive agents. This granularity level treats each turn of a conversation as a unit for preference alignment, allowing the LLMs to receive feedback on their responses within the context of a single turn. M-DPO [111] introduces a multi-turn direct preference learning framework to enhance the mathematical reasoning capabilities of LLMs when" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 307, + 42, + 566, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 566, + 388 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 566, + 388 + ], + "type": "text", + "content": "integrated with external tools. It leverages feedback from code interpreters and optimizes trajectory-level preferences using signals generated by the Bradley-Terry model to improve model performance in multi-turn reasoning tasks. ETO [112] presents a novel trial-and-error learning method that optimizes LLM agents' policies by contrasting successful and failed trajectories that contain multi-turn interaction. To address the challenges of coarse granularity and training noise in previous methods, SDPO [113] optimizes specific key segments within interactions to improve multi-turn dialogues while minimizing training noise. Specifically, it extracts key segments from the positive sessions that contribute to higher goal and relationship scores and pairs them with corresponding segments from the negative sessions to calculate an adapted DPO loss. Similarly, AgentQ [114] combines MCTS with self-critique mechanisms to provide process-level supervision by ranking actions, and then iterative fine-tuning using DPO. This approach enables LLMs to effectively learn from both successful and unsuccessful trajectories, enhancing their generalization and decision-making capabilities in complex, multi-turn reasoning tasks within interactive environments. DMPO [115] enhances the existing DPO method by replacing the policy constraint with a State-Action Occupancy Measure (SAOM) constraint and incorporating length normalization into the Bradley-Terry model, effectively addressing challenges in multi-turn scenarios. Compared to traditional policy constraints, SAOM constraints better guide the agent to select actions that align with expert trajectories, especially in unexplored states, thereby reducing compounding errors." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 308, + 388, + 567, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 388, + 567, + 494 + ], + "spans": [ + { + "bbox": [ + 308, + 388, + 567, + 494 + ], + "type": "text", + "content": "To conclude, turn-level alignment offers critical advantages for interactive systems by optimizing contextually grounded responses while preserving conversational flow. However, in multi-turn dialogue tasks, the turn-level granularity may introduce additional training noise. For example, some correct turns in negative samples may be mistakenly treated as incorrect turns in the loss calculation. Additionally, since each turn needs to be processed independently, this can lead to reduced training efficiency." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 309, + 509, + 463, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 509, + 463, + 521 + ], + "spans": [ + { + "bbox": [ + 309, + 509, + 463, + 521 + ], + "type": "text", + "content": "3.2 Learning Framework of DPO" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 524, + 565, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 524, + 565, + 583 + ], + "spans": [ + { + "bbox": [ + 308, + 524, + 565, + 583 + ], + "type": "text", + "content": "The learning framework of DPO focuses on how the language model policy learns from preference data. In this section, we present an overview of the learning framework in DPO, as shown in Fig. 3, which encompasses the learning paradigm and the learning objectives." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 309, + 594, + 424, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 594, + 424, + 605 + ], + "spans": [ + { + "bbox": [ + 309, + 594, + 424, + 605 + ], + "type": "text", + "content": "3.2.1 Learning Paradigm" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 607, + 565, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 607, + 565, + 689 + ], + "spans": [ + { + "bbox": [ + 307, + 607, + 565, + 689 + ], + "type": "text", + "content": "The learning paradigm in DPO determines how preference data is acquired during model training and falls into three distinct categories: offline learning, where the model learns from pre-collected preference datasets; online Learning, where the model updates based on newly generated data; and active Learning, where the model selectively queries annotators obtain preference data." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 689, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 689, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 689, + 566, + 748 + ], + "type": "text", + "content": "(a) Offline Learning. The original DPO framework [74] itself is an offline learning paradigm, where the model learns from a static, pre-collected dataset of preference pairs. Recent research has explored different approaches to merging preference optimization and supervised fine-tuning" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 44, + 296, + 277 + ], + "blocks": [ + { + "bbox": [ + 50, + 44, + 296, + 277 + ], + "lines": [ + { + "bbox": [ + 50, + 44, + 296, + 277 + ], + "spans": [ + { + "bbox": [ + 50, + 44, + 296, + 277 + ], + "type": "image", + "image_path": "ab1cb06bdd578e7bee29a0deff07d4285154eb2afd39c5194af16ae72fc055ce.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 83, + 285, + 261, + 295 + ], + "lines": [ + { + "bbox": [ + 83, + 285, + 261, + 295 + ], + "spans": [ + { + "bbox": [ + 83, + 285, + 261, + 295 + ], + "type": "text", + "content": "Fig. 3: An overview of DPO learning framework." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 44, + 304, + 301, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 304, + 301, + 514 + ], + "spans": [ + { + "bbox": [ + 44, + 304, + 301, + 514 + ], + "type": "text", + "content": "into a single training phase [190]. CPO [116] incorporates a behavior cloning regularizer through KL divergence minimization between the model and preferred data distribution, which effectively combines into adding a negative log-likelihood term on preferred data alongside the contrastive preference loss. Taking a more direct approach, ORPO [117] proposes a monolithic framework that directly augments the standard negative log-likelihood loss with an odds ratio term comparing chosen and rejected responses, eliminating the need for a separate reference policy while preserving SFT's domain adaptation capabilities. ULMA [71] proposes a hybrid method that applies standard SFT loss on positive samples while using a ranking-based DPO loss on negative samples. PAFT [118] introduces a parallel training paradigm where SFT and preference alignment are performed concurrently on the same pre-trained model and then merged using parameter fusion techniques, avoiding the sequential pipeline that can lead to catastrophic forgetting." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 514, + 301, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 514, + 301, + 631 + ], + "spans": [ + { + "bbox": [ + 44, + 514, + 301, + 631 + ], + "type": "text", + "content": "Several advances explore curriculum learning strategies to enhance DPO performance and training efficiency. CurriDPO [53] introduces curriculum learning by ordering multiple preference pairs from easy to hard based on the rating difference between chosen and rejected responses, where pairs with larger rating gaps are presented first, followed by progressively more challenging pairs with smaller rating differences. sDPO [55] implements curriculum learning by partitioning preference datasets into sequential chunks measured by reward accuracy and applying them incrementally." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 631, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 631, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 631, + 301, + 748 + ], + "type": "text", + "content": "To avoid substantial computational and data annotation costs for preference alignment, fine-tuning-free alignment methods have gained popularity. Linear Alignment [119] works by directly estimating the optimal policy through a one-step update to the output distribution during inference without requiring parameter tuning or feedback data. ICDPO [120] reinterprets DPO's reward-policy relationship to create a fine-tuning-free alignment method that harnesses in-context learning, treating models before and after demonstration exposure as amateur and expert policies, respectively," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "type": "text", + "content": "then computing their log probability ratio to score and rank candidate responses." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 307, + 65, + 565, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 65, + 565, + 157 + ], + "spans": [ + { + "bbox": [ + 307, + 65, + 565, + 157 + ], + "type": "text", + "content": "(b) Online Learning. DPO faces significant limitations when relying solely on static, pre-collected preference datasets. These datasets, generated by different models, cause a distribution shift that leads to ineffective off-policy learning as the model evolves [145, 152]. By contrast, online DPO employs an iterative framework that continuously updates the policy with real-time feedback, ensuring on-policy learning and reducing misalignment [143, 144, 233]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 157, + 565, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 157, + 565, + 562 + ], + "spans": [ + { + "bbox": [ + 307, + 157, + 565, + 562 + ], + "type": "text", + "content": "As online DPO progresses, researchers have introduced more flexible frameworks to tackle key challenges. For instance, Yuan et al. [123] proposed a self-rewarding language model: the model generates prompts and responses, then serves as its own judge via LLM-as-a-Judge prompting, scoring on a 5-point scale. OAIF [121] uses an LLM as an online annotator for real-time feedback, and OFSDPO [122] addresses catastrophic forgetting by using two Low-Rank Adaptive (LoRA) modules with different optimization speeds. BPO [124] constructs a dynamic trust region around the behavior LLM, adjusting it as preference data is collected, unlike methods that rely on fixed reference models. Furthermore, researchers have refined sampling strategies for online DPO. RSO [126] and RS-DPO [125] employ rejection sampling based on reward gaps. ROPO [61] recovers useful information from discarded queries via robustness-guided rejection sampling. Shi et al. [127] introduced DPO-Mix-R and DPO-Mix-P, demonstrating faster convergence by mixing online samplers with uniform samplers. OPTUNE [128] selectively regenerates responses with low reward scores while reusing high-reward responses. Iterative RPO [109] and DPO-ST [129] enhance CoT reasoning by selecting correct and incorrect answers to form preference pairs at each iteration. Xie et al. [103] used MCTS to collect preference data during training. Researchers have also explored advanced optimization techniques. APO [130] incorporates momentum-based acceleration, using an extrapolation step between the current and previous policies to update the policy. Xiong et al. [131] proposed a two-agent, non-symmetric online DPO framework with a main agent for optimal policy learning and an enhancer agent for exploration. COMAL [132] formulates alignment as a two-player zero-sum game, updating its policy toward a regularized Nash equilibrium in each iteration. PCO [133] iteratively trains the model on preference data with pairwise cringe Loss." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 562, + 565, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 562, + 565, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 562, + 565, + 748 + ], + "type": "text", + "content": "Recent efforts push for greater autonomy by letting models generate their own feedback [62]. SeRA [134] introduces a self-reviewed preference bootstrapping method, using an implicit reward margin to select informative pairs, and employs an ensemble reward approach across iterations. CREAM [135] mitigates self-improving biases by applying a consistency regularization on the preference rankings of consecutive iterations. D2PO [136] combines human-labeled gold data with concurrently updated, discriminator-labeled data. DLMA [137] uses contrastive prompts to compute self-reward scores via log ratio differences, then integrates these scores directly into the DPO objective. Addressing exploration and uncertainty in online DPO has also been a focus [234]. XPO [138] encourages exploration by adding a bonus for responses outside the initial policy's support, and SELM [139] uses an optimism term in reward fitting to" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 133 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 133 + ], + "type": "text", + "content": "actively seek high-reward responses. ETO [112] alternates exploration and training phases to collect failure trajectories, while VPO [140] applies optimism by regularizing the reward model to favor higher-value responses. Xiong et al. [111] extended DPO from single-turn to multi-turn tasks, balancing KL-regularized and non-regularized objectives, and COPO [141] incorporates a count-based bonus to encourage novel responses with low visitation counts." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 44, + 134, + 301, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 134, + 301, + 215 + ], + "spans": [ + { + "bbox": [ + 44, + 134, + 301, + 215 + ], + "type": "text", + "content": "Finally, a growing body of work aims to merge online and offline techniques. HyPO [142] uses offline preference data for DPO training while regularizing via online data. MPO [47] combines the strengths of DPO and PPO in a two-stage process: it first trains DPO on an easier dataset, then uses this model as a reference for PPO training on more challenging samples." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 216, + 301, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 216, + 301, + 307 + ], + "spans": [ + { + "bbox": [ + 44, + 216, + 301, + 307 + ], + "type": "text", + "content": "(c) Active Learning. Active learning in DPO is a strategic approach that aims to reduce the annotation cost and improve sample efficiency by selectively querying annotators for the most informative preference examples. Unlike offline learning that uses a fixed dataset or online learning that generates new data continuously, active learning intelligently selects which data points should be labeled based on model uncertainty or other informativeness criteria." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 308, + 302, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 302, + 539 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 302, + 539 + ], + "type": "text", + "content": "Muldrew et al. [146] introduced APL, an iterative data acquisition and fine-tuning loop in which batches of prompt/completion pairs are strategically selected using acquisition functions: a predictive entropy-based approach to measure model uncertainty for prompts and a preference certainty measure based on the implicit Bradley-Terry model for completion pairs in DPO. Unlike two-step selection processes in APL that separately select uncertain input prompts and corresponding completions, divAPO [147] integrates both stages into a single selection phase. divAPO maximizes the preference model certainty by simultaneously evaluating the informativeness of input prompts and completion pairs, while also considering the data distribution of the input prompts. Ji et al. [148] proposed ADPO, which selectively queries human preferences only for responses where the model exhibits high uncertainty while using pseudo-labels for confident cases. Das et al. [149] also employed active learning on RLHF, which actively selects the context-action pairs that maximize exploration and minimize uncertainty in the reward model." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 548, + 160, + 559 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 548, + 160, + 559 + ], + "spans": [ + { + "bbox": [ + 45, + 548, + 160, + 559 + ], + "type": "text", + "content": "3.2.2 Learning Objective" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 561, + 301, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 561, + 301, + 631 + ], + "spans": [ + { + "bbox": [ + 44, + 561, + 301, + 631 + ], + "type": "text", + "content": "In what follows, we present the learning objective in DPO, which determines how the model policy is optimized based on preference data. We first discuss multi-objective learning in DPO, which aims to optimize multiple objectives simultaneously. Then, we introduce self-play learning, which leverages self-generated data for preference alignment." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 632, + 301, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 632, + 301, + 723 + ], + "spans": [ + { + "bbox": [ + 44, + 632, + 301, + 723 + ], + "type": "text", + "content": "(a) Multi-Objective Learning. Multi-objective learning in DPO addresses the challenge of simultaneously optimizing the language model for multiple, potentially competing preference dimensions, such as helpfulness, harmlessness, and truthfulness. This approach aims to find a balanced policy that satisfies multiple human values rather than optimizing for a single objective, which more closely mirrors the complexity of real-world human preferences." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 723, + 301, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 723, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 44, + 723, + 301, + 747 + ], + "type": "text", + "content": "MODPO [150] achieves the sequential optimization of multiple preference objectives by incorporating language" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 42, + 566, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 566, + 295 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 566, + 295 + ], + "type": "text", + "content": "modeling directly into reward modeling, using a margin-based loss to maintain performance on previously optimized dimensions. SPO [151] takes a similar iterative constrained optimization approach, optimizing each preference dimension while preventing the degradation of prior alignments through regularization terms. MOSLIM [152] takes a different approach by introducing a multi-head classification reward model that assigns different preference dimensions to separate classification heads, enabling simultaneous optimization of multiple preferences without requiring multiple reward or policy models. HPO [153] incorporates auxiliary objectives through offline RL, where the model uses a weighted maximum likelihood objective that combines a preference alignment term with an advantage-weighted term for maximizing arbitrary auxiliary rewards like readability and safety. CPO [154] introduces explicit preference tokens during training that specify desired scores for different objectives, transforming the multi-objective optimization into a conditional optimization problem. DRDO [155] simultaneously models rewards and preferences through a combination of reward distillation and a contrastive log-unlikelihood term in its loss function." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 296, + 567, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 296, + 567, + 388 + ], + "spans": [ + { + "bbox": [ + 308, + 296, + 567, + 388 + ], + "type": "text", + "content": "(b) Self-Play Learning. Self-play learning in DPO represents an approach where the language model interacts with itself or its previous iterations to generate its own preference data for training, reducing or eliminating the need for human annotations [139, 164]. This method enables continuous self-improvement by leveraging the model's own judgment capabilities to identify and learn from better responses, creating a form of autonomous preference learning." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 388, + 566, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 388, + 566, + 584 + ], + "spans": [ + { + "bbox": [ + 308, + 388, + 566, + 584 + ], + "type": "text", + "content": "SPIN [156] involves a self-play mechanism where the LLM generates synthetic data from its prior iterations, then fine-tunes itself to distinguish these self-generated responses from those of human-annotated data. The method resembles a two-player game, where the model's current iteration tries to improve its responses to better match the target distribution, while the previous iteration attempts to generate responses as close to human data as possible. SPPO [157] treats LLM alignment as a constant-sum two-player game and iteratively refines itself by competing against its previous iteration. Instead of maintaining two competing policies or a reward model, SPO [158] uses a single policy to sample multiple trajectories and uses the proportion of wins in pairwise comparisons as the reward signal. BoNBoN [159] Alignment likewise relies on sampling responses from a base model, but it selects the best ones among n candidates and fine-tunes itself to approximate that best-of-n distribution." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 585, + 566, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 585, + 566, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 585, + 566, + 746 + ], + "type": "text", + "content": "Some works approach the alignment problem by leveraging Nash equilibrium [132]. Nash-MD [160] learns a preference model from pairwise human feedback and then computes a Nash equilibrium policy that consistently produces preferred responses. Its self-play approach updates the policy by having it compete against itself (or a slight variant of itself) under the learned preference model, which measures how often one response is preferred to another. DNO [161] extends this concept by implementing a batched on-policy algorithm where the current policy generates multiple outputs that are compared both against each other and against a teacher model's outputs. IPO-MD [162] combines the strengths of IPO and Nash-MD, where the model generates data using a mixture policy between the online and reference" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 112 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 112 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 112 + ], + "type": "text", + "content": "policies, and uses a preference model to annotate pairs of generations, making the optimization equivalent to finding a Nash equilibrium through self-play. SRPO [163] modifies Nash-MD by introducing a self-improvement policy that refines model outputs through iterative revisions, enabling offline optimization without a learned reward function." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 128, + 209, + 139 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 128, + 209, + 139 + ], + "spans": [ + { + "bbox": [ + 45, + 128, + 209, + 139 + ], + "type": "text", + "content": "3.3 Constraint Mechanism of DPO" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 143, + 301, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 143, + 301, + 224 + ], + "spans": [ + { + "bbox": [ + 44, + 143, + 301, + 224 + ], + "type": "text", + "content": "The constraint mechanism of DPO derives from its reformulation of RLHF, which includes a KL divergence constraint between the current policy and a reference policy. As shown in Fig. 4, we re-examine the constraint mechanism of DPO from the perspective of the reference model and different divergence constraints. We also explore various DPO variants with different safety constraints." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 236, + 153, + 247 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 236, + 153, + 247 + ], + "spans": [ + { + "bbox": [ + 45, + 236, + 153, + 247 + ], + "type": "text", + "content": "3.3.1 Reference Model" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 249, + 301, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 249, + 301, + 376 + ], + "spans": [ + { + "bbox": [ + 44, + 249, + 301, + 376 + ], + "type": "text", + "content": "The reference model in DPO functions as an anchor to ensure policy updates remain within a controlled range, preventing excessive deviation from initial behaviors. Typically, the reference model is initialized using the SFT model that serves as the starting point for preference optimization. The choice of reference model significantly impacts optimization dynamics. A static reference model ensures stable training but may limit adaptability. In the following subsections, we introduce two advanced approaches: reference-free DPO eliminates reliance on the reference model, while dynamic-reference DPO employs an evolving reference model." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 377, + 301, + 748 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 44, + 377, + 301, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 377, + 301, + 654 + ], + "spans": [ + { + "bbox": [ + 44, + 377, + 301, + 654 + ], + "type": "text", + "content": "(a) Reference-Free DPO. To reduce the computational and memory costs associated with a reference model, many algorithms have explored training modes that do not require loading the reference model. Xu et al. [116] replaces the reference model with a uniform prior distribution, adding an SFT loss term on preferred data to maintain consistency with the desired behavior. ORPO [117] integrates an odds ratio-based penalty with traditional SFT loss, increasing the probability of preferred responses while decreasing undesirable ones, thereby enabling single-stage training without a separate reference model. SimPO [166] directly uses the average log probability as implicit rewards. This removes the requirement for a separate reference model, significantly improving computational and memory efficiency. SimPER [167] also directly optimizes reverse perplexity for preferred versus rejected responses, creating a preference optimization approach that does not require a separate reference model, thus simplifying training. Despite these advancements, [168] argue that a reference model remains crucial. They compared two reference-free variants using posterior probabilities and likelihood functions as rewards, respectively, and found the original DPO consistently outperformed both. Their results indicate that a strong, well-aligned reference policy can significantly enhance DPO performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 654, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 654, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 654, + 301, + 748 + ], + "type": "text", + "content": "(b) Dynamic-Reference DPO. Offline DPO methods often suffer from reward over-optimization, meaning that as the trained model deviates from the reference model, the quality of generated samples tends to degrade. To address this issue, Gorbatovski et al. [165] proposed dynamically updating the reference model using the current model parameters during training, preventing excessive divergence and maintaining high-quality outputs. Curri-DPO [53] and sDPO [55] adopt" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 313, + 42, + 561, + 261 + ], + "blocks": [ + { + "bbox": [ + 313, + 42, + 561, + 261 + ], + "lines": [ + { + "bbox": [ + 313, + 42, + 561, + 261 + ], + "spans": [ + { + "bbox": [ + 313, + 42, + 561, + 261 + ], + "type": "image", + "image_path": "cddebe4de78a02102cffc5e93cf429138280b5deac316fd35c18e11122d7aa8e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 344, + 267, + 529, + 277 + ], + "lines": [ + { + "bbox": [ + 344, + 267, + 529, + 277 + ], + "spans": [ + { + "bbox": [ + 344, + 267, + 529, + 277 + ], + "type": "text", + "content": "Fig. 4: An overview of DPO constraint mechanism." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 283, + 566, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 283, + 566, + 411 + ], + "spans": [ + { + "bbox": [ + 308, + 283, + 566, + 411 + ], + "type": "text", + "content": "curriculum learning by sorting data samples from simpler to more complex based on predefined metrics. At each training iteration, the model from the previous step serves as the updated reference model to provide constraints, facilitating progressive learning. Similarly, MPO [47] partitions datasets according to task difficulty, employing a two-stage training procedure. The model trained in the initial stage serves as the reference for the subsequent stage. Additionally, M-DPO [89] compares the performance of a fixed reference model versus a dynamic reference model, finding that the latter yields superior results." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 309, + 421, + 440, + 432 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 421, + 440, + 432 + ], + "spans": [ + { + "bbox": [ + 309, + 421, + 440, + 432 + ], + "type": "text", + "content": "3.3.2 Divergence Constraint" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 434, + 566, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 434, + 566, + 503 + ], + "spans": [ + { + "bbox": [ + 308, + 434, + 566, + 503 + ], + "type": "text", + "content": "Divergence constraints in DPO play a crucial role in constraining model optimization, balancing alignment performance and model stability. In the following subsections, we introduce two modifications to the divergence constraint: one for enhancing diversity and the other for improving generalization." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 504, + 567, + 748 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 308, + 504, + 566, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 504, + 566, + 654 + ], + "spans": [ + { + "bbox": [ + 308, + 504, + 566, + 654 + ], + "type": "text", + "content": "(a) Diversity. Standard DPO typically uses reverse KL divergence equivalent to RLHF. However, the mode-seeking nature of reverse KL divergence reduces the diversity of the generated outputs. To overcome this limitation, f-DPO [169] explores various divergences, including forward KL divergence, reverse KL divergence, Jensen-Shannon divergence, and " + }, + { + "bbox": [ + 308, + 504, + 566, + 654 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 308, + 504, + 566, + 654 + ], + "type": "text", + "content": "-divergence, to achieve a better trade-off between alignment performance and diversity. Slocum et al. [170] further proposes splitting the KL divergence term into entropy and cross-entropy terms. This decoupling allows independent control of generation diversity and closeness to the reference model, preserving output diversity without degrading overall model quality." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 654, + 567, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 654, + 567, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 654, + 567, + 748 + ], + "type": "text", + "content": "(b) Generalization. Over-optimization in DPO can negatively impact generalization, causing reduced performance on inputs outside the training distribution. To mitigate this, Huang et al. [178] introduce " + }, + { + "bbox": [ + 308, + 654, + 567, + 748 + ], + "type": "inline_equation", + "content": "\\chi^2" + }, + { + "bbox": [ + 308, + 654, + 567, + 748 + ], + "type": "text", + "content": "-divergence as a more aggressive form of regularization compared to KL divergence, alleviating the over-optimization problem. DPO-Kernels [171] employs data-driven methods to select optimal kernel-divergence pairs dynamically, improving task adaptability" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 343 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 343 + ], + "type": "text", + "content": "and robustness. FlipGuard [172] introduces a customized reward characterization to monitor model performance. If performance drops relative to earlier versions, FlipGuard constrains the model's updates to ensure alignment with previous stable behavior. FPO [173] leverages the feature-level constraints using Sparse Autoencoders (SAEs) to improve computational efficiency and training stability. SPO [176] integrates a natural preference loss with a KL divergence-based regularization term computed over the entire model output distribution. By adjusting this divergence term, SPO prevents unwanted shifts beyond the preference dataset, ensuring stable alignment. EXO [175] argues that minimizing the forward KL divergence in DPO introduces bias when approximating the optimal policy. They establish a generalized alignment objective and reveal the equivalence between maximizing KL regularization rewards and minimizing the reverse KL divergence relative to the optimal policy. QDPO [177] utilizes divergence between the quantized model and the full-precision model for preference optimization, effectively addressing the token-flipping issue. Token-flipping refers to the phenomenon where quantization errors skew token distributions, leading to incorrect token selection. GPO [174] constructs a framework that unifies different DPO-related algorithms through theoretical derivations, enabling a deeper understanding of the regularization mechanisms in the DPO family of algorithms." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 354, + 153, + 365 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 354, + 153, + 365 + ], + "spans": [ + { + "bbox": [ + 45, + 354, + 153, + 365 + ], + "type": "text", + "content": "3.3.3 Safety Constraint" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 369, + 301, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 369, + 301, + 681 + ], + "spans": [ + { + "bbox": [ + 44, + 369, + 301, + 681 + ], + "type": "text", + "content": "Safety constraints in DPO aim to prevent LLMs from generating harmful, biased, or unethical outputs. However, traditional alignment algorithms often fail to address safety concerns. To enhance the safety alignment, recent studies have introduced several specialized mechanisms based on DPO. SafeDPO [179] introduces a streamlined approach for safety alignment by implicitly optimizing safety objectives within a single stage of policy learning. SACPO [180] addresses safety constraints by explicitly formulating language model alignment as a constrained optimization problem, using DPO to optimize the model under safety constraints. Zhang et al. [184] propose creating a backtracking preference dataset that identifies and reverses unsafe outputs, enhancing the safety and robustness of the model. C-DPO [181] integrates dual gradient descent into DPO to balance safety and utility efficiently. This approach achieves a robust trade-off between helpfulness and harmlessness, offering explicit safety guarantees. ADPO [182] introduces adversarial techniques into DPO. It specifically trains models to reduce the probability of unsafe outputs by deliberately generating harmful responses using controlled toxic tokens. Finally, Lee et al. [183] explore the internal mechanisms through which DPO reduces harmful outputs. Their findings suggest that DPO does not remove harmful behaviors learned during pretraining but instead teaches models to bypass or suppress these behaviors. This insight helps explain certain safety vulnerabilities like jailbreaks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 696, + 175, + 708 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 696, + 175, + 708 + ], + "spans": [ + { + "bbox": [ + 44, + 696, + 175, + 708 + ], + "type": "text", + "content": "3.4 Model Property of DPO" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 712, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 712, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 712, + 301, + 748 + ], + "type": "text", + "content": "DPO has shown great promise in aligning LLMs with human preferences by directly optimizing model outputs based on preference data. During this process, the underlying models" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 315, + 44, + 561, + 259 + ], + "blocks": [ + { + "bbox": [ + 315, + 44, + 561, + 259 + ], + "lines": [ + { + "bbox": [ + 315, + 44, + 561, + 259 + ], + "spans": [ + { + "bbox": [ + 315, + 44, + 561, + 259 + ], + "type": "image", + "image_path": "e5c5a6a0d780ea4b5c9a8a90db2723f6fdb6c09e14347e19016d55dcced76086.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 355, + 266, + 517, + 277 + ], + "lines": [ + { + "bbox": [ + 355, + 266, + 517, + 277 + ], + "spans": [ + { + "bbox": [ + 355, + 266, + 517, + 277 + ], + "type": "text", + "content": "Fig. 5: An overview of DPO model property." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 308, + 282, + 565, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 282, + 565, + 354 + ], + "spans": [ + { + "bbox": [ + 308, + 282, + 565, + 354 + ], + "type": "text", + "content": "exhibit certain properties that are crucial for understanding their behavior and effectiveness. These properties can be broadly categorized into two aspects: the generation property and the optimization property, as shown in Fig. 5. In the following sections, we explore these two properties in more detail, analyzing their implications for model alignment." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 363, + 430, + 375 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 363, + 430, + 375 + ], + "spans": [ + { + "bbox": [ + 309, + 363, + 430, + 375 + ], + "type": "text", + "content": "3.4.1 Generation Property" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 376, + 565, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 376, + 565, + 458 + ], + "spans": [ + { + "bbox": [ + 307, + 376, + 565, + 458 + ], + "type": "text", + "content": "The generation property of DPO primarily concerns issues related to distribution shifts and length biases. DPO is sensitive to distribution shifts between the base model outputs and the preference data, which may reduce diversity and generalization. Additionally, DPO has a tendency to favor longer responses, a phenomenon known as morbidity, which can negatively impact performance and user experience." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 458, + 565, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 458, + 565, + 676 + ], + "spans": [ + { + "bbox": [ + 308, + 458, + 565, + 676 + ], + "type": "text", + "content": "(a) Distribution Shift. In RLHF, the reward model is trained on a static set of preference data collected offline. During fine-tuning, the generated responses often differ from this original training data, resulting in a distribution shift. This shift can cause inaccurate reward predictions and lead to over-optimization. The implicit reward model in DPO also suffers from this distribution shift issue. Moreover, Lin et al. [188] have shown that the implicit reward model in DPO performs poorly on Out-Of-Distribution (OOD) data compared to explicit reward models. Experimental results indicate that DPO can transfer probability mass to the highreward response regions covered by the preference data, but it may also cause the distribution of responses generated by the model to deviate significantly from that of the reference policy, resulting in responses that do not meet expectations [189]. To address these problems, many researchers are now exploring online DPO approaches [109, 121, 122, 125], aiming to mitigate OOD by continuously updating preference data during training." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 677, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 677, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 677, + 566, + 748 + ], + "type": "text", + "content": "Existing DPO methods also face significant limitations due to their dependence on specific training tasks. Their optimal solutions lack robustness when applied to OOD tasks. Thus, SRPO [163] reframes alignment as a self-improvement process, which optimizes a self-improvement policy and a generative policy using a min-max objective, ensuring" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 134 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 134 + ], + "type": "text", + "content": "robustness by making the solution independent of training tasks. Zhang et al. [139] also identify notable issues in DPO when handling OOD tasks. First, DPO tends to overly favor novel content it has not seen during training. Second, it easily gets stuck in local optima, limiting exploration. To address these problems, they propose Self-Exploring Language Models (SELM), incorporating an optimism term to encourage broader exploration of new responses." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 44, + 134, + 301, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 134, + 301, + 216 + ], + "spans": [ + { + "bbox": [ + 44, + 134, + 301, + 216 + ], + "type": "text", + "content": "Another significant challenge of DPO is preference drift, where human preferences evolve, changing data distributions over time. Traditional DPO algorithms typically overlook such temporal shifts, mistakenly interpreting them as noise. To address this, NS-DPO [185] propose to assign higher weights to recent data, allowing models to better adjust to evolving preferences." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 216, + 301, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 216, + 301, + 320 + ], + "spans": [ + { + "bbox": [ + 44, + 216, + 301, + 320 + ], + "type": "text", + "content": "(b) Length Bias. Length bias in DPO refers to the tendency of model-generated outputs to become excessively long during training. This issue is similar to the length bias observed in RLHF [197] and is particularly pronounced in DPO. Length bias affects response quality and overall model performance. To mitigate this issue, researchers have developed several solutions, which can be categorized into three main approaches: length regularization, length normalization, and length sampling." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 320, + 301, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 320, + 301, + 504 + ], + "spans": [ + { + "bbox": [ + 44, + 320, + 301, + 504 + ], + "type": "text", + "content": "Length regularization is a common approach to controlling length bias in DPO. By introducing regularization terms into the objective function, the model can constrain response length and reduce morbidity, thereby alleviating the length bias problem. R-DPO [191] introduces a length-based penalty term to the DPO objective function, explicitly discouraging morbidity. " + }, + { + "bbox": [ + 44, + 320, + 301, + 504 + ], + "type": "inline_equation", + "content": "\\mathrm{D}^2\\mathrm{PO}" + }, + { + "bbox": [ + 44, + 320, + 301, + 504 + ], + "type": "text", + "content": " [99] introduces a dynamic weighting mechanism by incorporating a temporal decay factor. Unlike previous methods that apply uniform reward contributions across sequences, " + }, + { + "bbox": [ + 44, + 320, + 301, + 504 + ], + "type": "inline_equation", + "content": "\\mathrm{D}^2\\mathrm{PO}" + }, + { + "bbox": [ + 44, + 320, + 301, + 504 + ], + "type": "text", + "content": " adjusts the influence of each reward based on its position in the response. Higher weights are assigned to rewards associated with earlier tokens, as they are more critical for model alignment, while later rewards gradually receive lower weights. This adaptive approach prevents overfitting to less relevant tokens, thereby addressing length bias in DPO." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 504, + 302, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 504, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 504, + 302, + 748 + ], + "type": "text", + "content": "Length normalization aims to eliminate the loss bias caused by response length differences, allowing the model to evaluate texts of varying lengths more fairly. This approach prevents the model from developing an unreasonable preference for either long or short responses [198]. RRHF [190] and SimPO [166] first propose to apply length normalization to responses, ensuring that the loss remains unaffected by response length. LN-DPO [194] further integrates SimPO-like length normalization into DPO, demonstrating that this approach enhances response quality while mitigating morbidity. LD-DPO [195] achieves length desensitization by reparameterizing the likelihood in DPO. Specifically, it decomposes the likelihood of the longer response in a preference pair into the product of the likelihood of the public-length portion and the likelihood of the excessive portion. It then introduces a hyperparameter to mitigate the morbidity preference. This adjustment smooths the relationship between likelihood and response length, reducing its impact on optimization. For multi-turn dialogue tasks, DMPO [115] introduces length normalization for the number of turns in multi-turn preference optimization." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 307, + 42, + 566, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 566, + 274 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 566, + 274 + ], + "type": "text", + "content": "An alternative approach to controlling length bias in DPO is through sampling-based methods. SamPO [192] introduces a down-sampling method to compute regularized KL divergences. By balancing token-level probability distributions between preferred and rejected responses, SamPO reduces length bias in DPO training. Yuan et al. [193] propose Length-Instruction Fine-Tuning (LIFT), a method to improve instruction-following models' ability to adhere to length constraints by augmenting existing training data with explicit length instructions and using DPO for training. This enables the model to generalize across prompts requiring different response lengths. For long-context tasks, LongPO [196] enables short-context LLMs to self-evolve for long-context tasks by learning from self-generated short-to-long preference data, which includes paired responses for long-context inputs and their compressed short-context counterparts. LongPO incorporates a short-to-long KL constraint to prevent degradation of short-context performance during long-context alignment, achieving strong performance on both short- and long-context tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 309, + 282, + 437, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 282, + 437, + 295 + ], + "spans": [ + { + "bbox": [ + 309, + 282, + 437, + 295 + ], + "type": "text", + "content": "3.4.2 Optimization Property" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 296, + 566, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 296, + 566, + 376 + ], + "spans": [ + { + "bbox": [ + 307, + 296, + 566, + 376 + ], + "type": "text", + "content": "The optimization property of DPO involves likelihood collapse and alignment tax. While DPO aims to increase the likelihood of preferred responses and decrease dispreferred ones, the actual optimization process does not explicitly enforce this balance. Moreover, alignment improvements often come at the cost of the original capabilities of LLMs, known as alignment tax." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 377, + 566, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 377, + 566, + 608 + ], + "spans": [ + { + "bbox": [ + 308, + 377, + 566, + 608 + ], + "type": "text", + "content": "(a) Likelihood Collapse. Likelihood collapse refers to the unintended reduction in the likelihood of both preferred and dispreferred responses during DPO training [92]. This phenomenon can lead to unintentional unalignment, where the model's outputs deviate from human preferences, potentially producing undesirable or harmful responses. This phenomenon is also referred to as likelihood displacement in prior studies [204]. Additionally, the gradients associated with increasing the likelihood of preferred responses and decreasing that of dispreferred responses can become entangled, hindering effective learning. This entanglement complicates the optimization process, making it challenging to achieve the desired alignment [203]. Theoretical analyses have further elucidated the underlying causes of likelihood collapse. In particular, Feng et al. [202] developed an analytical framework grounded in field theory. Their analysis of the gradient vector field of the DPO loss function revealed that the loss function decreases the probability of generating human-disliked data at a faster rate than it increases the probability of generating human-like data." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 608, + 566, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 608, + 566, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 608, + 566, + 746 + ], + "type": "text", + "content": "Several strategies have been proposed to address likelihood collapse. Pal et al. [200] introduce DPO-Positive (DPOP), which adds a penalty term to maintain a high log-likelihood for preferred examples. Similarly, LLaMA [235] augments DPO training with a negative log-likelihood term to stabilize training and preserve the log-likelihood of chosen responses [109]. Flex-DPO [201] adaptively adjusts parameters to slow the decline in the likelihood of dispreferred responses and balance gradients for both chosen and rejected outputs. D'Oosterlinck et al. [199] propose Anchored Preference Optimization (APO), which provides fine-grained control over probability updates: APO-zero increases the" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 49, + 563, + 299 + ], + "blocks": [ + { + "bbox": [ + 149, + 38, + 459, + 49 + ], + "lines": [ + { + "bbox": [ + 149, + 38, + 459, + 49 + ], + "spans": [ + { + "bbox": [ + 149, + 38, + 459, + 49 + ], + "type": "text", + "content": "TABLE 1: An overview of datasets (upper row) and benchmarks (lower row) for DPO." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 49, + 563, + 299 + ], + "lines": [ + { + "bbox": [ + 47, + 49, + 563, + 299 + ], + "spans": [ + { + "bbox": [ + 47, + 49, + 563, + 299 + ], + "type": "table", + "html": "
DatasetTask DescriptionData Size (Training & Test)Data SourceData StructureEvaluation Metric
UltraFeedback [237]Instruction-Following, Helpful64K&-AIList-
SafeRLHF [238]Harmless, Helpful73.9K&8.21KHuman&AIPair-
HelpSteer [239]Helpful35.3K&1.8KHumanPoint-
PRM800K [240]Mathematical Reasoning800K&-HumanPoint-
SHP-2 [241]Q&A From Reddit3600K&241KHumanPair-
Nectar [242]Conversations183K&-AIList-
OpenOrca [243]Conversations2940K&-AISample-
Capybara [244]Multi-Turn Conversations16K&-Human&AISample-
Step-DPO [100]Mathematical Reasoning10.8K&-Human&AIPair-
BeaverTails [245]Harmless, Helpful330K&36KHuman&AIPoint-
IMDb [246]Movie Reviews25K&25KHumanSampleAccuracy
Reddit TL;DR [247]Summarization1330K&-HumanSampleWin Rate
Anthropic-HH [248]Harmless, Helpful161K&8.55KAIPairWin Rate
GSM8K [249]Mathematical Reasoning7.47K&1.32KHumanSampleAccuracy
AlpacaEval2 [250]Automatic Evaluation-&0.8KAISampleWin Rate
MT-Bench [251]Multi-Turn Question-&3.3KHumanPairWin Rate
AdvBench [252]Harmful Behaviors-&0.5KHumanSampleAttack Success
Arena-Hard [253]Updating Evaluation-&0.5KAISampleWin Rate
TruthfulQA [254]Truthful-&0.8KHumanPairAccuracy
IFEval [255]Instruction-Following-&0.5KHumanSampleAccuracy
BBH [256]Multistep Reasoning-&23 TasksHumanSampleAccuracy
MATH [257]Mathematical Reasoning7.5K&5KHumanSampleAccuracy
GPQA [258]Biology, Physics, and Chemistry-&0.45KHumanSampleAccuracy
MUSR [259]Multistep Reasoning-&0.76KAISampleAccuracy
MMLU-Pro [260]Language Understanding-&12KHuman&AISampleAccuracy
", + "image_path": "522902a82b6177e867e4aff6c3488ea5e1a2da6b4a249cebc164f52549476e7d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 308, + 301, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 308, + 301, + 344 + ], + "spans": [ + { + "bbox": [ + 44, + 308, + 301, + 344 + ], + "type": "text", + "content": "probability of winning outputs and decreases that of losing outputs, whereas APO-down decreases both, but with a stronger decline for losing outputs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 348, + 301, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 348, + 301, + 581 + ], + "spans": [ + { + "bbox": [ + 44, + 348, + 301, + 581 + ], + "type": "text", + "content": "Another notable challenge related to likelihood collapse is likelihood over-optimization, where the performance of a model on a proxy metric (such as its own likelihood estimates) improves, while its true performance does not. Zhang and Ranganath [236] show that reductions in the likelihood loss of DPO do not necessarily translate into higher win rates. Similarly, Shi et al. [205] further investigates the problem of likelihood over-optimization in DPO, demonstrating that higher completion likelihoods do not necessarily correlate with better model performance and may even degrade it. This study identifies key indicators of over-optimization and highlights the need to balance likelihood optimization with output diversity. e-DPO [187] also shows that DPO can lead to degenerate policies due to overfitting, and proposes a solution using reward model distillation to regularize the implicit reward of the language model. The method trains the language model to match the probability distribution induced by a reward model and introduces a pessimistic extension to handle uncertainty in the reward model, thereby improving the robustness of DPO." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 584, + 303, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 584, + 303, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 584, + 303, + 748 + ], + "type": "text", + "content": "(b) Alignment Tax. Alignment tax refers to the unintended consequence where improving a model's preference alignment degrades its general capabilities acquired during pretraining [206]. Thakkar et al. [207] demonstrate the sensitivity of DPO to training data composition, showing significantly worse performance degradation than SFT when using mixed-preference datasets. Furthermore, Chen et al. [209] identify that DPO struggles with optimizing ranking tasks. While DPO improves ranking accuracy, it disproportionately harms generative capabilities. Pentyala et al. [118] also observes capability forgetting during sequential training, where DPO objectives conflict with previously learned SFT patterns. To address this, researchers propose model merging strategies that balance alignment and performance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 308, + 308, + 566, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 308, + 566, + 415 + ], + "spans": [ + { + "bbox": [ + 308, + 308, + 566, + 415 + ], + "type": "text", + "content": "PAFT [118] separately trains SFT and DPO objectives on a pretrained model using distinct datasets, then merges the parameters through weighted averaging. Additionally, Lu et al. [208] proposes online merging optimizers, which integrate model merging into each optimization step of DPO to balance human preferences and basic capabilities. By merging gradients with parameter differences between SFT and pretrained models, these optimizers effectively enhance alignment while mitigating alignment tax." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 309, + 431, + 477, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 431, + 477, + 443 + ], + "spans": [ + { + "bbox": [ + 309, + 431, + 477, + 443 + ], + "type": "text", + "content": "4 BENCHMARKS AND ANALYSIS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 449, + 566, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 449, + 566, + 519 + ], + "spans": [ + { + "bbox": [ + 308, + 449, + 566, + 519 + ], + "type": "text", + "content": "In this section, we provide a comprehensive overview of existing benchmarks and evaluation for DPO methods. We first introduce the key datasets and benchmarks used to train or evaluate DPO models. We then present a comparative analysis of the performance of different DPO methods on these benchmarks, highlighting their strengths and limitations." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 534, + 454, + 546 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 534, + 454, + 546 + ], + "spans": [ + { + "bbox": [ + 309, + 534, + 454, + 546 + ], + "type": "text", + "content": "4.1 Datasets and Benchmarks" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 550, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 550, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 550, + 566, + 748 + ], + "type": "text", + "content": "A diverse range of datasets and benchmarks has been specifically curated to facilitate research in DPO. Table 1 summarizes these datasets and benchmarks, highlighting their task descriptions, dataset sizes, data sources, data structures, and evaluation metrics. These datasets and benchmarks span a broad range of tasks, such as harmlessness and helpfulness evaluation and mathematical reasoning. They also exhibit significant diversity in scale, ranging from smaller, specialized datasets to large-scale collections such as SHP-2, which contains over 3.6 million samples. Additionally, datasets differ in their sources: some rely purely on human annotations, others on AI-generated content, and many adopt a hybrid approach combining human and AI-generated data. The data structures employed across these datasets include single-sample without preference label, point-wise annotations, pair-wise comparisons, and list-wise comparisons. Common evaluation metrics include accuracy" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 49, + 563, + 257 + ], + "blocks": [ + { + "bbox": [ + 69, + 38, + 541, + 49 + ], + "lines": [ + { + "bbox": [ + 69, + 38, + 541, + 49 + ], + "spans": [ + { + "bbox": [ + 69, + 38, + 541, + 49 + ], + "type": "text", + "content": "TABLE 2: Experimental results of different DPO variants on Open LLM Leaderboard. The underline indicates the best performance." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 49, + 563, + 257 + ], + "lines": [ + { + "bbox": [ + 47, + 49, + 563, + 257 + ], + "spans": [ + { + "bbox": [ + 47, + 49, + 563, + 257 + ], + "type": "table", + "html": "
ModelMistral-7B-BaseLLaMA-3-8B-Base
IFEvalBBHMATHGPQAMUSRMMLU-ProAVERAGEIFEvalBBHMATHGPQAMUSRMMLU-ProAVERAGE
SFT3.441.19.228.842.027.725.429.046.315.328.641.331.031.9
RRHF [190]10.040.61.726.446.326.125.231.046.813.931.436.830.531.7
SLiC-HF [230]11.044.09.929.242.628.127.541.749.517.530.439.731.735.1
DPO [74]11.143.77.128.543.826.726.834.348.217.231.940.131.533.9
IPO [75]9.442.89.729.739.727.826.535.349.015.932.841.431.934.4
CPO [116]8.042.79.628.942.127.326.432.446.916.830.639.131.832.9
KTO [67]12.943.712.028.946.128.328.640.248.318.031.040.131.134.8
ORPO [117]28.446.413.530.241.429.531.640.049.116.830.738.432.034.5
R-DPO [191]10.043.07.628.739.327.226.036.448.817.231.640.631.534.4
SimPO [166]11.143.18.428.939.527.226.440.848.615.831.040.531.834.7
ModelMistral-7B-InstructLLaMA-3-8B-Instruct
IFEvalBBHMATHGPQAMUSRMMLU-ProAVERAGEIFEvalBBHMATHGPQAMUSRMMLU-ProAVERAGE
SFT48.446.210.929.147.627.134.950.749.326.931.037.935.738.6
RRHF [190]45.245.310.128.544.226.233.351.349.327.229.639.535.338.7
SLiC-HF [230]39.446.211.428.749.026.833.641.650.926.331.339.235.337.4
DPO [74]49.045.611.026.946.126.834.248.950.125.829.438.736.038.2
IPO [75]42.645.311.827.849.327.234.050.449.526.329.637.935.738.2
CPO [116]38.846.010.128.548.426.933.150.649.126.831.338.135.838.6
KTO [67]46.245.710.927.846.027.334.043.150.126.331.238.135.037.3
ORPO [117]37.645.111.228.246.926.532.643.050.626.929.339.135.137.3
R-DPO [191]46.845.99.928.746.227.634.250.950.325.329.839.035.738.5
SimPO [166]45.445.910.428.345.027.133.748.849.225.029.339.235.137.8
", + "image_path": "6f8f061ab915d05683da8b9488cffb80716ed95531ead04552cb45454c8c4d61.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 267, + 301, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 267, + 301, + 316 + ], + "spans": [ + { + "bbox": [ + 44, + 267, + 301, + 316 + ], + "type": "text", + "content": "(for tasks like mathematical reasoning found in GSM8K and MATH), win rates derived from pairwise comparisons (such as MT-Bench and Anthropic-HH), and attack success rates used for assessing adversarial robustness (AdvBench)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 327, + 105, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 327, + 105, + 338 + ], + "spans": [ + { + "bbox": [ + 45, + 327, + 105, + 338 + ], + "type": "text", + "content": "4.2 Results" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 342, + 301, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 342, + 301, + 514 + ], + "spans": [ + { + "bbox": [ + 44, + 342, + 301, + 514 + ], + "type": "text", + "content": "To demonstrate the effectiveness of different DPO variants, we conduct experiments on the Open LLM Leaderboard. We compare different DPO variants using Mistral-7B-Base, Mistral-7B-Instruct [261], LLaMA-3-8B-Base, and LLaMA-3-8B-Instruct [235] as starting points. The overall experimental setup follows Meng et al. [166], ensuring a reproducible evaluation of different DPO variants. For Mistral-7B-Base and LLaMA-3-8B-Base, the SFT models are trained based on the UltraChat-200k dataset [262], and subsequently applied different DPO variants on the SFT models using the UltraFeedback dataset [237]. For Mistral-7B-Instruct and LLaMA-3-8B-Instruct, which have already undergone instruction-tuning, the preference dataset is regenerated by collecting responses from the SFT models using prompts from the UltraFeedback dataset [237]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 515, + 302, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 515, + 302, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 515, + 302, + 746 + ], + "type": "text", + "content": "The experimental results, as summarized in Table 2, highlight the performance of different DPO variants across various benchmarks. For the Mistral-7B-Base and LLaMA-3-8B-Base models, ORPO consistently achieves the highest average scores, indicating its effectiveness in aligning models with human preferences. Notably, ORPO outperforms other methods on IFEval, BBH, and MATH, demonstrating its superiority in instruction-following and mathematical reasoning tasks. Meanwhile, SLiC-HF and KTO also achieve competitive results, particularly in BBH and GPQA, suggesting that these methods effectively leverage preference data for enhanced performance. For the Mistral-7B-Instruct and LLaMA-3-8B-Instruct models, the improvements across different DPO variants are more nuanced. While DPO and R-DPO show strong performance in IFEval and MMLU-Pro, IPO and CPO demonstrate robustness in handling complex reasoning tasks like MATH and GPQA. Overall, the results indicate that different DPO variants exhibit varying strengths across benchmarks, with some methods excelling in base models while others are more effective for instruct models." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 309, + 266, + 403, + 278 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 266, + 403, + 278 + ], + "spans": [ + { + "bbox": [ + 309, + 266, + 403, + 278 + ], + "type": "text", + "content": "5 APPLICATIONS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 281, + 567, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 281, + 567, + 353 + ], + "spans": [ + { + "bbox": [ + 308, + 281, + 567, + 353 + ], + "type": "text", + "content": "In this section, we discuss the applications of DPO in various domains, including different LLM-based applications, diffusion models, and multi-modal LLMs. We provide an overview of the key challenges and opportunities in each domain and highlight the potential impact of DPO on real-world applications." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 366, + 446, + 379 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 366, + 446, + 379 + ], + "spans": [ + { + "bbox": [ + 309, + 366, + 446, + 379 + ], + "type": "text", + "content": "5.1 LLM-based Applications" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 380, + 566, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 380, + 566, + 567 + ], + "spans": [ + { + "bbox": [ + 307, + 380, + 566, + 567 + ], + "type": "text", + "content": "DPO has emerged as a powerful paradigm for aligning LLMs with human preferences across diverse applications [116, 235, 263, 264]. In code generation, DPO enhances control over code quality by optimizing based on preferences from automated tests [265, 266, 267]. In mathematical reasoning, DPO reduces errors in complex problem-solving by emphasizing step-level preference optimization [100, 101, 129, 268]. Multilingual applications leverage DPO to synchronize cross-lingual preferences, thereby improving translation accuracy and cultural relevance [107, 269]. Recommendation systems utilize DPO to refine personalization by incorporating user preference data to optimize item rankings, thereby enhancing the model ability to distinguish preferred items from less preferred ones [270, 271]. These examples highlight the adaptability of DPO in achieving human-aligned outputs across diverse tasks." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 309, + 581, + 411, + 592 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 581, + 411, + 592 + ], + "spans": [ + { + "bbox": [ + 309, + 581, + 411, + 592 + ], + "type": "text", + "content": "5.2 Diffusion Models" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 595, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 595, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 595, + 566, + 748 + ], + "type": "text", + "content": "In the realm of diffusion models, DPO has been adapted to better align generated content with user expectations [272, 273, 274, 275]. By optimizing preferences over image-text pairs, DPO enhances the semantic accuracy of generated images and mitigates the production of undesirable or biased content. Studies have demonstrated that diffusion models fine-tuned with DPO respond more accurately to complex prompts compared to those trained with traditional techniques. Moreover, the efficiency of DPO allows for the fine-tuning of large-scale models using limited preference data, addressing significant computational challenges in training diffusion models [276, 277, 278]. While scaling DPO for high-resolution and dynamic content generation remains" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 67 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 67 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 67 + ], + "type": "text", + "content": "challenging, its ability to simplify reward modeling makes it a promising method for controlled content creation [279]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 81, + 151, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 81, + 151, + 92 + ], + "spans": [ + { + "bbox": [ + 45, + 81, + 151, + 92 + ], + "type": "text", + "content": "5.3 Multi-Modal LLMs" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 96, + 301, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 96, + 301, + 224 + ], + "spans": [ + { + "bbox": [ + 44, + 96, + 301, + 224 + ], + "type": "text", + "content": "For multi-modal LLMs, DPO plays a crucial role in aligning preferences across different data types, thereby improving coherence in tasks such as visual question answering and image captioning [89, 280, 281, 282, 283]. By optimizing alignment between textual responses and visual inputs, DPO reduces hallucinations in multi-modal interactions, ensuring outputs remain faithful to the given context. Although reconciling different types of feedback can be challenging, DPO offers a practical framework for lightweight adaptation, making it well-suited to preference-intensive multi-modal applications [280, 284, 285]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 241, + 265, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 241, + 265, + 253 + ], + "spans": [ + { + "bbox": [ + 45, + 241, + 265, + 253 + ], + "type": "text", + "content": "6 CHALLENGES AND FUTURE DIRECTIONS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 257, + 301, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 257, + 301, + 327 + ], + "spans": [ + { + "bbox": [ + 44, + 257, + 301, + 327 + ], + "type": "text", + "content": "In this section, we discuss the key challenges and future directions in DPO research. We identify several critical issues that need to be addressed to further advance the field. Moreover, we propose several promising research directions that can help overcome these challenges and accelerate the adoption of DPO in the future." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 342, + 221, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 342, + 221, + 354 + ], + "spans": [ + { + "bbox": [ + 45, + 342, + 221, + 354 + ], + "type": "text", + "content": "6.1 Efficient Preference Optimization" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 357, + 302, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 357, + 302, + 567 + ], + "spans": [ + { + "bbox": [ + 44, + 357, + 302, + 567 + ], + "type": "text", + "content": "Efficient preference optimization remains a pivotal challenge, as current DPO methods hinge on the availability of high-quality preference data, yet the manual collection of human annotations is both time-consuming and labor-intensive while automatically model-generated datasets often suffer from issues such as limited diversity, inherent biases, and insufficient fidelity to human judgment [121, 122, 128, 129]. Moreover, even though DPO circumvents the intricacies of reward model engineering common in RL, it does not fully leverage the exploratory strengths that RL methods offer, as evidenced by recent advances in reasoning approaches where RL-based training has achieved notable successes [18, 19]. This opens up an avenue for future research to not only enhance data efficiency through advanced learning techniques but also to integrate novel exploration mechanisms [138, 141], potentially through hybrid models that amalgamate the direct preference optimization benefits of DPO with the robust exploratory capabilities characteristic of RL." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 581, + 236, + 593 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 581, + 236, + 593 + ], + "spans": [ + { + "bbox": [ + 45, + 581, + 236, + 593 + ], + "type": "text", + "content": "6.2 Multi-Modal Preference Optimization" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 596, + 303, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 596, + 303, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 596, + 303, + 748 + ], + "type": "text", + "content": "Multi-Modal Preference Optimization presents another frontier, given that existing DPO frameworks have primarily targeted text-based modalities while many real-world applications demand the alignment of diverse human preferences across text, images, audio, and even video [280, 284, 285, 286, 287]. In scenarios where cross-modal cues might conflict, such as the need for concise text paired with richly detailed imagery, the challenge lies in constructing a unified preference representation space that can intelligently and automatically recalibrate the priority of different modalities based on the contextual demands of the task at hand [89, 282, 283]. Future directions in this area could involve the development of innovative multi-modal preference encoding architectures," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 42, + 566, + 90 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 566, + 90 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 566, + 90 + ], + "type": "text", + "content": "which are capable of disentangling compound preferences into modality-specific and cross-modal components that align conflicting preferences while also adapting dynamically to changing inputs." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 309, + 108, + 500, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 108, + 500, + 120 + ], + "spans": [ + { + "bbox": [ + 309, + 108, + 500, + 120 + ], + "type": "text", + "content": "6.3 Continuous Preference Optimization" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 125, + 566, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 125, + 566, + 311 + ], + "spans": [ + { + "bbox": [ + 308, + 125, + 566, + 311 + ], + "type": "text", + "content": "Continuous preference optimization addresses the dynamic nature of human preferences that evolve over time or vary with different phases of a task, a factor that static DPO models often fail to capture [123, 135, 137, 185]. As social norms and individual preferences shift, there is an increasing need for systems that can continuously recalibrate their alignment strategies in real time while simultaneously mitigating the risk of catastrophic forgetting. Future research in this domain may focus on meta-learning approaches that enable models to learn not only from the current state of preferences but also how to efficiently adapt when these preferences change. By integrating online learning frameworks with mechanisms for detecting temporal shifts and contextual variability in user behavior, researchers can pave the way toward systems that remain consistently relevant and effective in the face of evolving societal and individual expectations." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 330, + 504, + 342 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 330, + 504, + 342 + ], + "spans": [ + { + "bbox": [ + 309, + 330, + 504, + 342 + ], + "type": "text", + "content": "6.4 Interpretable Preference Optimization" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 346, + 567, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 346, + 567, + 544 + ], + "spans": [ + { + "bbox": [ + 308, + 346, + 567, + 544 + ], + "type": "text", + "content": "Interpretable preference optimization is critical for building trust in models that implicitly align human values, as the opaque nature of current DPO complicates the ability to audit and control the alignment process. In practice, human preferences are multi-dimensional [150, 151, 154], encompassing aspects such as factual accuracy, fairness, creativity, and beyond, and there is a pressing need to decompose these complex preferences into interpretable components that can be individually examined and fine-tuned. Future research could leverage advances in explainable techniques to develop models that not only achieve fine-grained alignment across diverse values but also provide transparent insights into how different preference dimensions interact to shape final decisions. This level of interpretability would allow stakeholders to balance competing values more effectively, ensuring that the alignment process remains both accountable and adaptable as societal norms continue to evolve." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 309, + 565, + 397, + 577 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 565, + 397, + 577 + ], + "spans": [ + { + "bbox": [ + 309, + 565, + 397, + 577 + ], + "type": "text", + "content": "7 CONCLUSION" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 584, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 584, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 584, + 566, + 748 + ], + "type": "text", + "content": "In recent years, DPO has emerged as a promising paradigm for aligning LLMs with human preferences by directly optimizing model policies using preference data. Despite its potential, the DPO research landscape remains fragmented, with a lack of systematic organization and comparative analysis. In this survey, we present a comprehensive overview of DPO and introduce a novel taxonomy that categorizes existing works into four key dimensions: data strategy, learning framework, constraint mechanism, and model property. We have also discussed the key benchmarks, evaluation results, and applications of DPO, highlighting the challenges and future directions in this field. By providing a systematic analysis of the existing DPO methods, we aim to facilitate further research and development in this area." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 41, + 115, + 53 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 41, + 115, + 53 + ], + "spans": [ + { + "bbox": [ + 46, + 41, + 115, + 53 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 60, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 38, + "blocks": [ + { + "bbox": [ + 47, + 60, + 301, + 78 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 60, + 301, + 78 + ], + "spans": [ + { + "bbox": [ + 47, + 60, + 301, + 78 + ], + "type": "text", + "content": "[1] Wayne Xin Zhao et al. A survey of large language models. arXiv, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 79, + 301, + 97 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 79, + 301, + 97 + ], + "spans": [ + { + "bbox": [ + 47, + 79, + 301, + 97 + ], + "type": "text", + "content": "[2] Humza Naveed et al. A comprehensive overview of large language models. arXiv, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 98, + 301, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 98, + 301, + 115 + ], + "spans": [ + { + "bbox": [ + 47, + 98, + 301, + 115 + ], + "type": "text", + "content": "[3] Yupeng Chang et al. A survey on evaluation of large language models. TIIS, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 115, + 301, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 115, + 301, + 133 + ], + "spans": [ + { + "bbox": [ + 47, + 115, + 301, + 133 + ], + "type": "text", + "content": "[4] Shervin Minaee et al. Large language models: A survey. arXiv, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 133, + 301, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 133, + 301, + 151 + ], + "spans": [ + { + "bbox": [ + 47, + 133, + 301, + 151 + ], + "type": "text", + "content": "[5] Shukang Yin et al. A survey on multimodal large language models. arXiv, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 152, + 301, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 152, + 301, + 169 + ], + "spans": [ + { + "bbox": [ + 47, + 152, + 301, + 169 + ], + "type": "text", + "content": "[6] Duzhen Zhang et al. Mm-llms: Recent advances in multimodal large language models. ACL, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 170, + 301, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 170, + 301, + 187 + ], + "spans": [ + { + "bbox": [ + 47, + 170, + 301, + 187 + ], + "type": "text", + "content": "[7] Jingyi Zhang et al. Vision-language models for vision tasks: A survey. TPAMI, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 187, + 301, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 187, + 301, + 214 + ], + "spans": [ + { + "bbox": [ + 47, + 187, + 301, + 214 + ], + "type": "text", + "content": "[8] Zhehui Wang et al. Enabling energy-efficient deployment of large language models on memristor crossbar: A synergy of large and small. TPAMI, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 215, + 301, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 215, + 301, + 232 + ], + "spans": [ + { + "bbox": [ + 47, + 215, + 301, + 232 + ], + "type": "text", + "content": "[9] Hongru Wang et al. A survey of the evolution of language model-based dialogue systems. arXiv, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 233, + 301, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 233, + 301, + 250 + ], + "spans": [ + { + "bbox": [ + 47, + 233, + 301, + 250 + ], + "type": "text", + "content": "[10] Zihao Yi et al. A survey on recent advances in llm-based multi-turn dialogue systems. arXiv, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 251, + 301, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 251, + 301, + 277 + ], + "spans": [ + { + "bbox": [ + 47, + 251, + 301, + 277 + ], + "type": "text", + "content": "[11] Jiawei Liu et al. Is your code generated by chatgpt really correct? rigorous evaluation of large language models for code generation. NeurIPS, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 277, + 301, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 277, + 301, + 295 + ], + "spans": [ + { + "bbox": [ + 47, + 277, + 301, + 295 + ], + "type": "text", + "content": "[12] Daya Guo et al. Deepseek-coder: When the large language model meets programming-the rise of code intelligence. arXiv, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 296, + 301, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 296, + 301, + 312 + ], + "spans": [ + { + "bbox": [ + 47, + 296, + 301, + 312 + ], + "type": "text", + "content": "[13] Xue Jiang et al. Self-planning code generation with large language models. TOSEM, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 313, + 301, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 313, + 301, + 340 + ], + "spans": [ + { + "bbox": [ + 47, + 313, + 301, + 340 + ], + "type": "text", + "content": "[14] Dave Van Veen et al. Adapted large language models can outperform medical experts in clinical text summarization. Nature Medicine, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 341, + 301, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 341, + 301, + 367 + ], + "spans": [ + { + "bbox": [ + 47, + 341, + 301, + 367 + ], + "type": "text", + "content": "[15] Jesutofunmi A Omiye et al. Large language models in medicine: the potentials and pitfalls: a narrative review. Annals of Internal Medicine, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 368, + 301, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 368, + 301, + 385 + ], + "spans": [ + { + "bbox": [ + 47, + 368, + 301, + 385 + ], + "type": "text", + "content": "[16] Karan Singhal et al. Toward expert-level medical question answering with large language models. Nature Medicine, 2025." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 386, + 301, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 386, + 301, + 403 + ], + "spans": [ + { + "bbox": [ + 47, + 386, + 301, + 403 + ], + "type": "text", + "content": "[17] Fenglin Liu et al. Aligning, autoencoding and prompting large language models for novel disease reporting. TPAMI, 2025." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 404, + 263, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 404, + 263, + 412 + ], + "spans": [ + { + "bbox": [ + 47, + 404, + 263, + 412 + ], + "type": "text", + "content": "[18] Aaron Jaech et al. Openai o1 system card. arXiv, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 413, + 301, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 413, + 301, + 430 + ], + "spans": [ + { + "bbox": [ + 47, + 413, + 301, + 430 + ], + "type": "text", + "content": "[19] Daya Guo et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv, 2025." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 431, + 301, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 431, + 301, + 449 + ], + "spans": [ + { + "bbox": [ + 47, + 431, + 301, + 449 + ], + "type": "text", + "content": "[20] Julia Hirschberg and Christopher D Manning. Advances in natural language processing. Science, 2015." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 47, + 449, + 301, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 449, + 301, + 475 + ], + "spans": [ + { + "bbox": [ + 47, + 449, + 301, + 475 + ], + "type": "text", + "content": "[21] Xiaowei Huang et al. A survey of safety and trustworthiness of large language models through the lens of verification and validation. Artificial Intelligence Review, 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 47, + 475, + 301, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 475, + 301, + 494 + ], + "spans": [ + { + "bbox": [ + 47, + 475, + 301, + 494 + ], + "type": "text", + "content": "[22] Yue Zhang et al. Siren's song in the ai ocean: a survey on hallucination in large language models. arXiv, 2023." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 47, + 495, + 301, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 495, + 301, + 511 + ], + "spans": [ + { + "bbox": [ + 47, + 495, + 301, + 511 + ], + "type": "text", + "content": "[23] Isabel O Gallegos et al. Bias and fairness in large language models: A survey. Computational Linguistics, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 47, + 512, + 301, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 512, + 301, + 529 + ], + "spans": [ + { + "bbox": [ + 47, + 512, + 301, + 529 + ], + "type": "text", + "content": "[24] Yufei Wang et al. Aligning large language models with human: A survey. arXiv, 2023." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 47, + 529, + 301, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 529, + 301, + 548 + ], + "spans": [ + { + "bbox": [ + 47, + 529, + 301, + 548 + ], + "type": "text", + "content": "[25] Yang Liu et al. Trustworthy llms: A survey and guideline for evaluating large language models' alignment. arXiv, 2023." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 47, + 548, + 301, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 548, + 301, + 565 + ], + "spans": [ + { + "bbox": [ + 47, + 548, + 301, + 565 + ], + "type": "text", + "content": "[26] Tianhao Shen et al. Large language model alignment: A survey. arXiv, 2023." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 47, + 566, + 301, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 566, + 301, + 592 + ], + "spans": [ + { + "bbox": [ + 47, + 566, + 301, + 592 + ], + "type": "text", + "content": "[27] Hannah Rose Kirk et al. The benefits, risks and bounds of personalizing the alignment of large language models to individuals. Nature Machine Intelligence, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 47, + 593, + 301, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 593, + 301, + 610 + ], + "spans": [ + { + "bbox": [ + 47, + 593, + 301, + 610 + ], + "type": "text", + "content": "[28] Usman Anwar et al. Foundational challenges in assuring alignment and safety of large language models. arXiv, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 47, + 611, + 301, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 611, + 301, + 628 + ], + "spans": [ + { + "bbox": [ + 47, + 611, + 301, + 628 + ], + "type": "text", + "content": "[29] Bofei Gao et al. Towards a unified view of preference learning for large language models: A survey. arXiv, 2024." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 47, + 629, + 301, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 629, + 301, + 646 + ], + "spans": [ + { + "bbox": [ + 47, + 629, + 301, + 646 + ], + "type": "text", + "content": "[30] Ruili Jiang et al. A survey on human preference learning for large language models. arXiv, 2024." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 47, + 647, + 301, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 647, + 301, + 665 + ], + "spans": [ + { + "bbox": [ + 47, + 647, + 301, + 665 + ], + "type": "text", + "content": "[31] Zhichao Wang et al. A comprehensive survey of llm alignment techniques: Rlhf, rlaif, ppo, dpo and more. arXiv, 2024." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 47, + 666, + 301, + 682 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 666, + 301, + 682 + ], + "spans": [ + { + "bbox": [ + 47, + 666, + 301, + 682 + ], + "type": "text", + "content": "[32] Genta Indra Winata et al. Preference tuning with human feedback on language, speech, and vision tasks: A survey. arXiv, 2024." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 47, + 683, + 301, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 683, + 301, + 700 + ], + "spans": [ + { + "bbox": [ + 47, + 683, + 301, + 700 + ], + "type": "text", + "content": "[33] Yue Huang et al. Position: TrustLLM: Trustworthiness in large language models. ICML, 2024." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 47, + 700, + 301, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 700, + 301, + 718 + ], + "spans": [ + { + "bbox": [ + 47, + 700, + 301, + 718 + ], + "type": "text", + "content": "[34] Paul F Christiano et al. Deep reinforcement learning from human preferences. NeurIPS, 2017." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 47, + 719, + 301, + 736 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 719, + 301, + 736 + ], + "spans": [ + { + "bbox": [ + 47, + 719, + 301, + 736 + ], + "type": "text", + "content": "[35] Long Ouyang et al. Training language models to follow instructions with human feedback. NeurIPS, 2022." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 47, + 737, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 737, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 737, + 301, + 746 + ], + "type": "text", + "content": "[36] Nisan Stiennon et al. Learning to summarize with human" + } + ] + } + ], + "index": 37 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 311, + 44, + 564, + 737 + ], + "type": "list", + "angle": 0, + "index": 78, + "blocks": [ + { + "bbox": [ + 332, + 44, + 422, + 53 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 44, + 422, + 53 + ], + "spans": [ + { + "bbox": [ + 332, + 44, + 422, + 53 + ], + "type": "text", + "content": "feedback. NeurIPS, 2020." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 311, + 53, + 525, + 62 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 53, + 525, + 62 + ], + "spans": [ + { + "bbox": [ + 311, + 53, + 525, + 62 + ], + "type": "text", + "content": "[37] Josh Achiam et al. Gpt-4 technical report. arXiv, 2023." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 311, + 63, + 564, + 79 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 63, + 564, + 79 + ], + "spans": [ + { + "bbox": [ + 311, + 63, + 564, + 79 + ], + "type": "text", + "content": "[38] Yuntao Bai et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv, 2022." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 311, + 80, + 564, + 89 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 80, + 564, + 89 + ], + "spans": [ + { + "bbox": [ + 311, + 80, + 564, + 89 + ], + "type": "text", + "content": "[39] Anthropic. The claude 3 model family: Opus, sonnet, haiku, 2024." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 311, + 90, + 564, + 107 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 90, + 564, + 107 + ], + "spans": [ + { + "bbox": [ + 311, + 90, + 564, + 107 + ], + "type": "text", + "content": "[40] Yuchun Miao et al. Inform: Mitigating reward hacking in rlhf via information-theoretic reward modeling. NeurIPS, 2024." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 311, + 108, + 564, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 108, + 564, + 125 + ], + "spans": [ + { + "bbox": [ + 311, + 108, + 564, + 125 + ], + "type": "text", + "content": "[41] Stephen Casper et al. Open problems and fundamental limitations of reinforcement learning from human feedback. arXiv, 2023." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 311, + 125, + 564, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 125, + 564, + 143 + ], + "spans": [ + { + "bbox": [ + 311, + 125, + 564, + 143 + ], + "type": "text", + "content": "[42] Keertana Chidambaram et al. Direct preference optimization with unobserved preference heterogeneity. arXiv, 2024." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 311, + 144, + 564, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 144, + 564, + 161 + ], + "spans": [ + { + "bbox": [ + 311, + 144, + 564, + 161 + ], + "type": "text", + "content": "[43] Haoxian Chen et al. Mallowspo: Fine-tune your llm with preference dispersions. arXiv, 2024." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 311, + 162, + 564, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 162, + 564, + 178 + ], + "spans": [ + { + "bbox": [ + 311, + 162, + 564, + 178 + ], + "type": "text", + "content": "[44] Shyam Sundhar Ramesh et al. Group robust preference optimization in reward-free rlhf. arXiv, 2024." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 311, + 179, + 564, + 197 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 179, + 564, + 197 + ], + "spans": [ + { + "bbox": [ + 311, + 179, + 564, + 197 + ], + "type": "text", + "content": "[45] Binwei Yao et al. No preference left behind: Group distributional preference optimization. *ICLR*, 2025." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 311, + 198, + 564, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 198, + 564, + 215 + ], + "spans": [ + { + "bbox": [ + 311, + 198, + 564, + 215 + ], + "type": "text", + "content": "[46] Afra Amini et al. Direct preference optimization with an offset. ACL Findings, 2024." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 311, + 216, + 564, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 216, + 564, + 241 + ], + "spans": [ + { + "bbox": [ + 311, + 216, + 564, + 241 + ], + "type": "text", + "content": "[47] Qi Gou and Cam-Tu Nguyen. Mixed preference optimization: Reinforcement learning with data selection and better reference model. arXiv, 2024." + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 311, + 242, + 564, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 242, + 564, + 259 + ], + "spans": [ + { + "bbox": [ + 311, + 242, + 564, + 259 + ], + "type": "text", + "content": "[48] Shiqi Wang et al. Reward difference optimization for sample reweighting in offline RLHF. EMNLP Findings, 2024." + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 311, + 261, + 564, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 261, + 564, + 277 + ], + "spans": [ + { + "bbox": [ + 311, + 261, + 564, + 277 + ], + "type": "text", + "content": "[49] Junkang Wu et al. " + }, + { + "bbox": [ + 311, + 261, + 564, + 277 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 311, + 261, + 564, + 277 + ], + "type": "text", + "content": "-dpo: Adaptive reward margin is what direct preference optimization needs. arXiv, 2024." + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 311, + 278, + 564, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 278, + 564, + 296 + ], + "spans": [ + { + "bbox": [ + 311, + 278, + 564, + 296 + ], + "type": "text", + "content": "[50] Hiroki Furuta et al. Geometric-averaged preference optimization for soft preference labels. NeurIPS, 2024." + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 311, + 297, + 564, + 313 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 297, + 564, + 313 + ], + "spans": [ + { + "bbox": [ + 311, + 297, + 564, + 313 + ], + "type": "text", + "content": "[51] Junkang Wu et al. Beta-dpo: Direct preference optimization with dynamic beta. NeurIPS, 2024." + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 311, + 314, + 564, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 314, + 564, + 331 + ], + "spans": [ + { + "bbox": [ + 311, + 314, + 564, + 331 + ], + "type": "text", + "content": "[52] Tetsuro Morimura et al. Filtered direct preference optimization, EMNLP, 2024." + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 311, + 332, + 564, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 332, + 564, + 350 + ], + "spans": [ + { + "bbox": [ + 311, + 332, + 564, + 350 + ], + "type": "text", + "content": "[53] Pulkit Pattnaik et al. Enhancing alignment using curriculum learning & ranked preferences. EMNLP, 2024." + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 311, + 350, + 564, + 368 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 350, + 564, + 368 + ], + "spans": [ + { + "bbox": [ + 311, + 350, + 564, + 368 + ], + "type": "text", + "content": "[54] Ilgee Hong et al. Adaptive preference scaling for reinforcement learning with human feedback. NeurIPS, 2024." + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 311, + 369, + 564, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 369, + 564, + 385 + ], + "spans": [ + { + "bbox": [ + 311, + 369, + 564, + 385 + ], + "type": "text", + "content": "[55] Dahiyun Kim et al. Sdpo: Don't use your data all at once. arXiv, 2024." + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 311, + 386, + 564, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 386, + 564, + 403 + ], + "spans": [ + { + "bbox": [ + 311, + 386, + 564, + 403 + ], + "type": "text", + "content": "[56] Runsheng Yu et al. Direct alignment of language models via quality-aware self-refinement. arXiv, 2024." + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 311, + 404, + 564, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 404, + 564, + 422 + ], + "spans": [ + { + "bbox": [ + 311, + 404, + 564, + 422 + ], + "type": "text", + "content": "[57] Lou Jieming et al. Gap-aware preference optimization: Enhancing model alignment with perception margin. OpenReview, 2024." + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 311, + 422, + 564, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 422, + 564, + 439 + ], + "spans": [ + { + "bbox": [ + 311, + 422, + 564, + 439 + ], + "type": "text", + "content": "[58] Jingyuan Ma et al. Plug-and-play training framework for preference optimization. arXiv, 2024." + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 311, + 440, + 564, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 440, + 564, + 458 + ], + "spans": [ + { + "bbox": [ + 311, + 440, + 564, + 458 + ], + "type": "text", + "content": "[59] Sayak Ray Chowdhury et al. Provably robust DPO: Aligning language models with noisy feedback. ICML, 2024." + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 311, + 459, + 564, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 459, + 564, + 475 + ], + "spans": [ + { + "bbox": [ + 311, + 459, + 564, + 475 + ], + "type": "text", + "content": "[60] Keyi Kong et al. Perplexity-aware correction for robust alignment with noisy preferences. NeurIPS, 2024." + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 311, + 476, + 564, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 476, + 564, + 494 + ], + "spans": [ + { + "bbox": [ + 311, + 476, + 564, + 494 + ], + "type": "text", + "content": "[61] Xize Liang et al. Ropo: Robust preference optimization for large language models. arXiv, 2024." + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 311, + 495, + 564, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 495, + 564, + 512 + ], + "spans": [ + { + "bbox": [ + 311, + 495, + 564, + 512 + ], + "type": "text", + "content": "[62] Dongyoung Kim et al. Spread preference annotation: Direct preference judgment for efficient LLM alignment. ICLR, 2025." + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 311, + 513, + 564, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 513, + 564, + 529 + ], + "spans": [ + { + "bbox": [ + 311, + 513, + 564, + 529 + ], + "type": "text", + "content": "[63] Lingfan Zhang et al. Combating inherent noise for direct preference optimization. OpenReview, 2025." + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 311, + 530, + 564, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 530, + 564, + 548 + ], + "spans": [ + { + "bbox": [ + 311, + 530, + 564, + 548 + ], + "type": "text", + "content": "[64] Shawn Im and Yixuan Li. Understanding generalization of preference optimization under noisy feedback. OpenReview, 2025." + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 311, + 548, + 564, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 548, + 564, + 566 + ], + "spans": [ + { + "bbox": [ + 311, + 548, + 564, + 566 + ], + "type": "text", + "content": "[65] Yang Gao et al. Impact of preference noise on the alignment performance of generative language models. COLM, 2024." + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 311, + 567, + 564, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 567, + 564, + 592 + ], + "spans": [ + { + "bbox": [ + 311, + 567, + 564, + 592 + ], + "type": "text", + "content": "[66] Junkang Wu et al. Towards robust alignment of language models: Distributionally robustifying direct preference optimization. ICLR, 2024." + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 311, + 593, + 564, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 593, + 564, + 610 + ], + "spans": [ + { + "bbox": [ + 311, + 593, + 564, + 610 + ], + "type": "text", + "content": "[67] Kawin Ethayarajh et al. Model alignment as prospect theoretic optimization. ICML, 2024." + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 311, + 611, + 564, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 611, + 564, + 628 + ], + "spans": [ + { + "bbox": [ + 311, + 611, + 564, + 628 + ], + "type": "text", + "content": "[68] Seungjae Jung et al. Binary classifier optimization for large language model alignment. arXiv, 2024." + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 311, + 629, + 564, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 629, + 564, + 647 + ], + "spans": [ + { + "bbox": [ + 311, + 629, + 564, + 647 + ], + "type": "text", + "content": "[69] Teng Xiao et al. Cal-dpo: Calibrated direct preference optimization for language model alignment. NeurIPS, 2024." + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 311, + 647, + 564, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 647, + 564, + 665 + ], + "spans": [ + { + "bbox": [ + 311, + 647, + 564, + 665 + ], + "type": "text", + "content": "[70] Igor Melnyk et al. Distributional preference alignment of llms via optimal transport. NeurIPS, 2024." + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 311, + 666, + 564, + 683 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 666, + 564, + 683 + ], + "spans": [ + { + "bbox": [ + 311, + 666, + 564, + 683 + ], + "type": "text", + "content": "[71] Tianchi Cai et al. Ulma: Unified language model alignment with human demonstration and point-wise preference. arXiv, 2023." + } + ] + } + ], + "index": 74 + }, + { + "bbox": [ + 311, + 684, + 564, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 684, + 564, + 700 + ], + "spans": [ + { + "bbox": [ + 311, + 684, + 564, + 700 + ], + "type": "text", + "content": "[72] Huayu Chen et al. Noise contrastive alignment of language models with explicit rewards. NeurIPS, 2024." + } + ] + } + ], + "index": 75 + }, + { + "bbox": [ + 311, + 701, + 564, + 719 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 701, + 564, + 719 + ], + "spans": [ + { + "bbox": [ + 311, + 701, + 564, + 719 + ], + "type": "text", + "content": "[73] Yifan Zhang et al. General preference modeling with preference representations for aligning language models. arXiv, 2024." + } + ] + } + ], + "index": 76 + }, + { + "bbox": [ + 311, + 719, + 564, + 737 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 719, + 564, + 737 + ], + "spans": [ + { + "bbox": [ + 311, + 719, + 564, + 737 + ], + "type": "text", + "content": "[74] Rafael Rafailov et al. Direct preference optimization: Your language model is secretly a reward model. NeurIPS, 2023." + } + ] + } + ], + "index": 77 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 43, + 301, + 738 + ], + "type": "list", + "angle": 0, + "index": 37, + "blocks": [ + { + "bbox": [ + 47, + 43, + 301, + 63 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 43, + 301, + 63 + ], + "spans": [ + { + "bbox": [ + 47, + 43, + 301, + 63 + ], + "type": "text", + "content": "[75] Mohammad Gheshlaghi Azar et al. A general theoretical paradigm to understand learning from human preferences. AISTATS, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 62, + 300, + 80 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 62, + 300, + 80 + ], + "spans": [ + { + "bbox": [ + 47, + 62, + 300, + 80 + ], + "type": "text", + "content": "[76] Jinghong Chen et al. On extending direct preference optimization to accommodate ties. arXiv, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 79, + 301, + 98 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 79, + 301, + 98 + ], + "spans": [ + { + "bbox": [ + 47, + 79, + 301, + 98 + ], + "type": "text", + "content": "[77] Yuxin Jiang et al. Bridging and modeling correlations in pairwise data for direct preference optimization. arXiv, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 98, + 301, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 98, + 301, + 125 + ], + "spans": [ + { + "bbox": [ + 47, + 98, + 301, + 125 + ], + "type": "text", + "content": "[78] Xinghua Zhang et al. Iopo: Empowering llms with complex instruction following via input-output preference optimization. arXiv, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 125, + 301, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 125, + 301, + 143 + ], + "spans": [ + { + "bbox": [ + 47, + 125, + 301, + 143 + ], + "type": "text", + "content": "[79] Abbas Abdelmaleki et al. Preference optimization as probabilistic inference. ICLR, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 143, + 301, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 143, + 301, + 161 + ], + "spans": [ + { + "bbox": [ + 47, + 143, + 301, + 161 + ], + "type": "text", + "content": "[80] Yueqin Yin et al. Self-augmented preference optimization: Off-policy paradigms for language model alignment. arXiv, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 161, + 301, + 188 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 161, + 301, + 188 + ], + "spans": [ + { + "bbox": [ + 47, + 161, + 301, + 188 + ], + "type": "text", + "content": "[81] Shitong Duan et al. Negating negatives: Alignment with human negative samples via distributional dispreference optimization. arXiv, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 188, + 301, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 188, + 301, + 206 + ], + "spans": [ + { + "bbox": [ + 47, + 188, + 301, + 206 + ], + "type": "text", + "content": "[82] Ruiqi Zhang et al. Negative preference optimization: From catastrophic collapse to effective unlearning. COLM, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 206, + 301, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 206, + 301, + 224 + ], + "spans": [ + { + "bbox": [ + 47, + 206, + 301, + 224 + ], + "type": "text", + "content": "[83] Chongyu Fan et al. Simplicity prevails: Rethinking negative preference optimization for lmm unlearning. arXiv, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 224, + 301, + 242 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 224, + 301, + 242 + ], + "spans": [ + { + "bbox": [ + 47, + 224, + 301, + 242 + ], + "type": "text", + "content": "[84] Yifan Zhong et al. Panacea: Pareto alignment via preference adaptation for llms. NeurIPS, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 242, + 301, + 260 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 242, + 301, + 260 + ], + "spans": [ + { + "bbox": [ + 47, + 242, + 301, + 260 + ], + "type": "text", + "content": "[85] Tianqi Liu et al. Lipo: Listwise preference optimization through learning-to-rank, 2024. arXiv, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 260, + 301, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 260, + 301, + 278 + ], + "spans": [ + { + "bbox": [ + 47, + 260, + 301, + 278 + ], + "type": "text", + "content": "[86] Mingye Zhu et al. LIRE: listwise reward enhancement for preference alignment. ACL, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 278, + 301, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 278, + 301, + 297 + ], + "spans": [ + { + "bbox": [ + 47, + 278, + 301, + 297 + ], + "type": "text", + "content": "[87] Yang Zhao et al. Ordinal preference optimization: Aligning human preferences via ndcg. arXiv, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 296, + 301, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 296, + 301, + 315 + ], + "spans": [ + { + "bbox": [ + 47, + 296, + 301, + 315 + ], + "type": "text", + "content": "[88] Jiacong Zhou et al. Optimizing preference alignment with differentiable ndcg ranking. arXiv, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 315, + 301, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 315, + 301, + 332 + ], + "spans": [ + { + "bbox": [ + 47, + 315, + 301, + 332 + ], + "type": "text", + "content": "[89] Fei Wang et al. mDPO: Conditional preference optimization for multimodal large language models. EMNLP, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 332, + 301, + 359 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 332, + 301, + 359 + ], + "spans": [ + { + "bbox": [ + 47, + 332, + 301, + 359 + ], + "type": "text", + "content": "[90] Yueqin Yin et al. Relative preference optimization: Enhancing llm alignment through contrasting responses across identical and diverse prompts. arXiv, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 359, + 301, + 377 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 359, + 301, + 377 + ], + "spans": [ + { + "bbox": [ + 47, + 359, + 301, + 377 + ], + "type": "text", + "content": "[91] Yuxiang Guo et al. Todo: Enhancing llm alignment with ternary preferences. ICLR, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 377, + 301, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 377, + 301, + 395 + ], + "spans": [ + { + "bbox": [ + 47, + 377, + 301, + 395 + ], + "type": "text", + "content": "[92] Rafael Rafailov et al. From r to " + }, + { + "bbox": [ + 47, + 377, + 301, + 395 + ], + "type": "inline_equation", + "content": "q^*" + }, + { + "bbox": [ + 47, + 377, + 301, + 395 + ], + "type": "text", + "content": ": Your language model is secretly a q-function. COLM, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 395, + 301, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 395, + 301, + 413 + ], + "spans": [ + { + "bbox": [ + 47, + 395, + 301, + 413 + ], + "type": "text", + "content": "[93] Yongcheng Zeng et al. Token-level direct preference optimization. ICML, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 413, + 301, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 413, + 301, + 431 + ], + "spans": [ + { + "bbox": [ + 47, + 413, + 301, + 431 + ], + "type": "text", + "content": "[94] Aiwei Liu et al. Tis-dpo: Token-level importance sampling for direct preference optimization with estimated weights. ICLR, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 431, + 301, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 431, + 301, + 449 + ], + "spans": [ + { + "bbox": [ + 47, + 431, + 301, + 449 + ], + "type": "text", + "content": "[95] Fenia Christopoulou et al. Sparsepo: Controlling preference alignment of llms via sparse token masks. arXiv, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 47, + 449, + 301, + 467 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 449, + 301, + 467 + ], + "spans": [ + { + "bbox": [ + 47, + 449, + 301, + 467 + ], + "type": "text", + "content": "[96] Han Zhong et al. Dpo meets ppo: Reinforced token optimization for rlhf. arXiv, 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 47, + 467, + 301, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 467, + 301, + 485 + ], + "spans": [ + { + "bbox": [ + 47, + 467, + 301, + 485 + ], + "type": "text", + "content": "[97] Kailai Yang et al. Selective preference optimization via token-level reward function estimation. arXiv, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 47, + 485, + 301, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 485, + 301, + 503 + ], + "spans": [ + { + "bbox": [ + 47, + 485, + 301, + 503 + ], + "type": "text", + "content": "[98] Qi Zhao et al. EPO: hierarchical LLM agents with environment preference optimization. EMNLP, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 47, + 503, + 301, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 503, + 301, + 529 + ], + "spans": [ + { + "bbox": [ + 47, + 503, + 301, + 529 + ], + "type": "text", + "content": "[99] Ruichen Shao et al. Earlier tokens contribute more: Learning direct preference optimization from temporal decay perspective. *ICLR*, 2025." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 47, + 529, + 301, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 529, + 301, + 548 + ], + "spans": [ + { + "bbox": [ + 47, + 529, + 301, + 548 + ], + "type": "text", + "content": "[100] Xin Lai et al. Step-dpo: Step-wise preference optimization for long-chain reasoning of llms. arXiv, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 47, + 548, + 301, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 548, + 301, + 567 + ], + "spans": [ + { + "bbox": [ + 47, + 548, + 301, + 567 + ], + "type": "text", + "content": "[101] Zimu Lu et al. Step-controlled dpo: Leveraging stepwise error for enhanced mathematical reasoning. arXiv, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 47, + 567, + 301, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 567, + 301, + 585 + ], + "spans": [ + { + "bbox": [ + 47, + 567, + 301, + 585 + ], + "type": "text", + "content": "[102] Xuan Zhang et al. Chain of preference optimization: Improving chain-of-thought reasoning in llms. NeurIPS, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 47, + 585, + 301, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 585, + 301, + 602 + ], + "spans": [ + { + "bbox": [ + 47, + 585, + 301, + 602 + ], + "type": "text", + "content": "[103] Yuxi Xie et al. Monte carlo tree search boosts reasoning via iterative preference learning. arXiv, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 47, + 602, + 301, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 602, + 301, + 620 + ], + "spans": [ + { + "bbox": [ + 47, + 602, + 301, + 620 + ], + "type": "text", + "content": "[104] Weibin Liao et al. Tpo: Aligning large language models with multi-branch & multi-step preference trees. arXiv, 2024." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 47, + 620, + 301, + 638 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 620, + 301, + 638 + ], + "spans": [ + { + "bbox": [ + 47, + 620, + 301, + 638 + ], + "type": "text", + "content": "[105] Hoang Anh Just et al. Data-centric human preference optimization with rationales. arXiv, 2024." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 47, + 638, + 301, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 638, + 301, + 665 + ], + "spans": [ + { + "bbox": [ + 47, + 638, + 301, + 665 + ], + "type": "text", + "content": "[106] Jiacai Liu et al. Improving multi-step reasoning abilities of large language models with direct advantage policy optimization. arXiv, 2024." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 47, + 665, + 301, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 665, + 301, + 692 + ], + "spans": [ + { + "bbox": [ + 47, + 665, + 301, + 692 + ], + "type": "text", + "content": "[107] Shuaijie She et al. MAPO: advancing multilingual reasoning through multilingual-alignment-as-preference optimization. ACL, 2024." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 47, + 692, + 301, + 711 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 692, + 301, + 711 + ], + "spans": [ + { + "bbox": [ + 47, + 692, + 301, + 711 + ], + "type": "text", + "content": "[108] Lifan Yuan et al. Advancing llm reasoning generalists with preference trees. arXiv, 2024." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 47, + 710, + 301, + 728 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 710, + 301, + 728 + ], + "spans": [ + { + "bbox": [ + 47, + 710, + 301, + 728 + ], + "type": "text", + "content": "[109] Richard Yuanzhe Pang et al. Iterative reasoning preference optimization. NeurIPS, 2024." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 47, + 728, + 301, + 738 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 728, + 301, + 738 + ], + "spans": [ + { + "bbox": [ + 47, + 728, + 301, + 738 + ], + "type": "text", + "content": "[110] Chao-Wei Huang and Yun-Nung Chen. Factalign: Long-form" + } + ] + } + ], + "index": 36 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 43, + 564, + 738 + ], + "type": "list", + "angle": 0, + "index": 75, + "blocks": [ + { + "bbox": [ + 332, + 43, + 543, + 53 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 43, + 543, + 53 + ], + "spans": [ + { + "bbox": [ + 332, + 43, + 543, + 53 + ], + "type": "text", + "content": "factuality alignment of large language models. arXiv, 2024." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 53, + 564, + 72 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 53, + 564, + 72 + ], + "spans": [ + { + "bbox": [ + 310, + 53, + 564, + 72 + ], + "type": "text", + "content": "[111] Wei Xiong et al. Building math agents with multi-turn iterative preference learning. *ICLR*, 2025." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 72, + 564, + 89 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 72, + 564, + 89 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 564, + 89 + ], + "type": "text", + "content": "[112] Yifan Song et al. Trial and error: Exploration-based trajectory optimization for lIm agents. ACL, 2024." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 89, + 564, + 107 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 89, + 564, + 107 + ], + "spans": [ + { + "bbox": [ + 310, + 89, + 564, + 107 + ], + "type": "text", + "content": "[113] Aobo Kong et al. Sdpo: Segment-level direct preference optimization for social agents. arXiv, 2025." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 310, + 107, + 564, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 107, + 564, + 125 + ], + "spans": [ + { + "bbox": [ + 310, + 107, + 564, + 125 + ], + "type": "text", + "content": "[114] Pranav Putta et al. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv, 2024." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 310, + 125, + 564, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 125, + 564, + 143 + ], + "spans": [ + { + "bbox": [ + 310, + 125, + 564, + 143 + ], + "type": "text", + "content": "[115] Wentao Shi et al. Direct multi-turn preference optimization for language agents. EMNLP, 2024." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 310, + 143, + 564, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 143, + 564, + 169 + ], + "spans": [ + { + "bbox": [ + 310, + 143, + 564, + 169 + ], + "type": "text", + "content": "[116] Haoran Xu et al. Contrastive preference optimization: Pushing the boundaries of LLM performance in machine translation. ICML, 2024." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 310, + 169, + 564, + 188 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 169, + 564, + 188 + ], + "spans": [ + { + "bbox": [ + 310, + 169, + 564, + 188 + ], + "type": "text", + "content": "[117] Jiwoo Hong et al. ORPO: Monolithic preference optimization without reference model. EMNLP, 2024." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 310, + 188, + 564, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 188, + 564, + 206 + ], + "spans": [ + { + "bbox": [ + 310, + 188, + 564, + 206 + ], + "type": "text", + "content": "[118] Shiva Kumar Pentyala et al. Paft: A parallel training paradigm for effective llm fine-tuning. arXiv, 2024." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 310, + 206, + 564, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 206, + 564, + 233 + ], + "spans": [ + { + "bbox": [ + 310, + 206, + 564, + 233 + ], + "type": "text", + "content": "[119] Songyang Gao et al. Linear alignment: A closed-form solution for aligning human preferences without tuning and feedback. ICML, 2024." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 310, + 233, + 564, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 233, + 564, + 259 + ], + "spans": [ + { + "bbox": [ + 310, + 233, + 564, + 259 + ], + "type": "text", + "content": "[120] Feifan Song et al. Icdpo: Effectively borrowing alignment capability of others via in-context direct preference optimization. arXiv, 2024." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 310, + 259, + 564, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 259, + 564, + 278 + ], + "spans": [ + { + "bbox": [ + 310, + 259, + 564, + 278 + ], + "type": "text", + "content": "[121] Shangmin Guo et al. Direct language model alignment from online ai feedback. arXiv, 2024." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 310, + 278, + 564, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 278, + 564, + 297 + ], + "spans": [ + { + "bbox": [ + 310, + 278, + 564, + 297 + ], + "type": "text", + "content": "[122] Biqing Qi et al. Online dpo: Online direct preference optimization with fast-slow chasing. arXiv, 2024." + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 310, + 296, + 564, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 296, + 564, + 306 + ], + "spans": [ + { + "bbox": [ + 310, + 296, + 564, + 306 + ], + "type": "text", + "content": "[123] Weizhe Yuan et al. Self-rewarding language models. ICML, 2024." + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 310, + 305, + 564, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 305, + 564, + 323 + ], + "spans": [ + { + "bbox": [ + 310, + 305, + 564, + 323 + ], + "type": "text", + "content": "[124] Wenda Xu et al. BPO: Staying close to the behavior LLM creates better online LLM alignment. EMNLP, 2024." + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 310, + 323, + 564, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 323, + 564, + 350 + ], + "spans": [ + { + "bbox": [ + 310, + 323, + 564, + 350 + ], + "type": "text", + "content": "[125] Saeed Khaki et al. RS-DPO: A hybrid rejection sampling and direct preference optimization method for alignment of large language models. NAACL, 2024." + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 310, + 350, + 564, + 368 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 350, + 564, + 368 + ], + "spans": [ + { + "bbox": [ + 310, + 350, + 564, + 368 + ], + "type": "text", + "content": "[126] Tianqi Liu et al. Statistical rejection sampling improves preference optimization. ICLR, 2024." + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 310, + 368, + 564, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 368, + 564, + 386 + ], + "spans": [ + { + "bbox": [ + 310, + 368, + 564, + 386 + ], + "type": "text", + "content": "[127] Ruizhe Shi et al. The crucial role of samplers in online direct preference optimization. *ICLR*, 2025." + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 310, + 386, + 564, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 386, + 564, + 404 + ], + "spans": [ + { + "bbox": [ + 310, + 386, + 564, + 404 + ], + "type": "text", + "content": "[128] Lichang Chen et al. Optune: Efficient online preference tuning. arXiv, 2024." + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 310, + 404, + 564, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 404, + 564, + 422 + ], + "spans": [ + { + "bbox": [ + 310, + 404, + 564, + 422 + ], + "type": "text", + "content": "[129] Tianduo Wang et al. Self-training with direct preference optimization improves chain-of-thought reasoning. ACL, 2024." + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 310, + 422, + 564, + 440 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 422, + 564, + 440 + ], + "spans": [ + { + "bbox": [ + 310, + 422, + 564, + 440 + ], + "type": "text", + "content": "[130] Jiafan He et al. Accelerated preference optimization for large language model alignment. arXiv, 2024." + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 310, + 440, + 564, + 467 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 440, + 564, + 467 + ], + "spans": [ + { + "bbox": [ + 310, + 440, + 564, + 467 + ], + "type": "text", + "content": "[131] Wei Xiong et al. Iterative preference learning from human feedback: Bridging theory and practice for RLHF under KL-constraint. ICML, 2024." + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 310, + 467, + 564, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 467, + 564, + 485 + ], + "spans": [ + { + "bbox": [ + 310, + 467, + 564, + 485 + ], + "type": "text", + "content": "[132] Yixin Liu et al. Comal: A convergent meta-algorithm for aligning llms with general preferences. arXiv, 2024." + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 310, + 485, + 564, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 485, + 564, + 503 + ], + "spans": [ + { + "bbox": [ + 310, + 485, + 564, + 503 + ], + "type": "text", + "content": "[133] Jing Xu et al. Some things are more cringe than others: Iterative preference optimization with the pairwise cringe loss. arXiv, 2024." + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 310, + 503, + 564, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 503, + 564, + 521 + ], + "spans": [ + { + "bbox": [ + 310, + 503, + 564, + 521 + ], + "type": "text", + "content": "[134] Jongwoo Ko et al. Sera: Self-reviewing and alignment of large language models using implicit reward margins. *ICLR*, 2025." + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 310, + 521, + 564, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 521, + 564, + 540 + ], + "spans": [ + { + "bbox": [ + 310, + 521, + 564, + 540 + ], + "type": "text", + "content": "[135] Zhaoyang Wang et al. Cream: Consistency regularized self-rewarding language models. *ICLR*, 2025." + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 310, + 540, + 564, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 540, + 564, + 557 + ], + "spans": [ + { + "bbox": [ + 310, + 540, + 564, + 557 + ], + "type": "text", + "content": "[136] Prasann Singhal et al. D2PO: Discriminator-guided DPO with response evaluation models. COLM, 2024." + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 310, + 557, + 564, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 557, + 564, + 575 + ], + "spans": [ + { + "bbox": [ + 310, + 557, + 564, + 575 + ], + "type": "text", + "content": "[137] Aiwei Liu et al. Direct large language model alignment through self-rewarding contrastive prompt distillation. ACL, 2024." + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 310, + 575, + 564, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 575, + 564, + 602 + ], + "spans": [ + { + "bbox": [ + 310, + 575, + 564, + 602 + ], + "type": "text", + "content": "[138] Tengyang Xie et al. Exploratory preference optimization: Provably sample-efficient exploration in rlhf with general function approximation. *ICLR*, 2025." + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 310, + 602, + 564, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 602, + 564, + 620 + ], + "spans": [ + { + "bbox": [ + 310, + 602, + 564, + 620 + ], + "type": "text", + "content": "[139] Shenao Zhang et al. Self-exploring language models: Active preference elicitation for online alignment. arXiv, 2024." + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 310, + 620, + 564, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 620, + 564, + 639 + ], + "spans": [ + { + "bbox": [ + 310, + 620, + 564, + 639 + ], + "type": "text", + "content": "[140] Shicong Cen et al. Value-incentivized preference optimization: A unified approach to online and offline rlhf. *ICLR*, 2025." + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 310, + 639, + 564, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 639, + 564, + 656 + ], + "spans": [ + { + "bbox": [ + 310, + 639, + 564, + 656 + ], + "type": "text", + "content": "[141] Chenjia Bai et al. Online preference alignment for language models via count-based exploration. *ICLR*, 2025." + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 310, + 656, + 564, + 674 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 656, + 564, + 674 + ], + "spans": [ + { + "bbox": [ + 310, + 656, + 564, + 674 + ], + "type": "text", + "content": "[142] Yuda Song et al. The importance of online data: Understanding preference fine-tuning via coverage. NeurIPS, 2024." + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 310, + 674, + 564, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 674, + 564, + 692 + ], + "spans": [ + { + "bbox": [ + 310, + 674, + 564, + 692 + ], + "type": "text", + "content": "[143] Yaojie Shen et al. Aipo: Improving training objective for iterative preference optimization. arXiv, 2024." + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 310, + 692, + 564, + 711 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 692, + 564, + 711 + ], + "spans": [ + { + "bbox": [ + 310, + 692, + 564, + 711 + ], + "type": "text", + "content": "[144] Yunhao Tang et al. Understanding the performance gap between online and offline alignment algorithms. arXiv, 2024." + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 310, + 710, + 564, + 728 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 710, + 564, + 728 + ], + "spans": [ + { + "bbox": [ + 310, + 710, + 564, + 728 + ], + "type": "text", + "content": "[145] Shusheng Xu et al. Is DPO superior to PPO for LLM alignment? A comprehensive study. ICML, 2024." + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 310, + 728, + 564, + 738 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 728, + 564, + 738 + ], + "spans": [ + { + "bbox": [ + 310, + 728, + 564, + 738 + ], + "type": "text", + "content": "[146] William Muldrew et al. Active preference learning for large" + } + ] + } + ], + "index": 74 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 44, + 301, + 738 + ], + "type": "list", + "angle": 0, + "index": 37, + "blocks": [ + { + "bbox": [ + 68, + 44, + 178, + 53 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 44, + 178, + 53 + ], + "spans": [ + { + "bbox": [ + 68, + 44, + 178, + 53 + ], + "type": "text", + "content": "language models. ICML, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 53, + 301, + 71 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 53, + 301, + 71 + ], + "spans": [ + { + "bbox": [ + 46, + 53, + 301, + 71 + ], + "type": "text", + "content": "[147] Seola Choi et al. Active preference optimization via maximizing learning capacity. OpenReview, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 72, + 301, + 89 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 301, + 89 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 301, + 89 + ], + "type": "text", + "content": "[148] Kaixuan Ji et al. Reinforcement learning from human feedback with active queries. arXiv, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 89, + 301, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 89, + 301, + 106 + ], + "spans": [ + { + "bbox": [ + 46, + 89, + 301, + 106 + ], + "type": "text", + "content": "[149] Nirjhar Das et al. Active preference optimization for sample efficient rlhf. arXiv, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 107, + 301, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 107, + 301, + 125 + ], + "spans": [ + { + "bbox": [ + 46, + 107, + 301, + 125 + ], + "type": "text", + "content": "[150] Zhanhui Zhou et al. Beyond one-preference-fits-all alignment: Multi-objective direct preference optimization. ACL Findings, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 125, + 301, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 125, + 301, + 143 + ], + "spans": [ + { + "bbox": [ + 46, + 125, + 301, + 143 + ], + "type": "text", + "content": "[151] Xingzhou Lou et al. Spo: Multi-dimensional preference sequential alignment with implicit reward modeling. arXiv, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 144, + 301, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 144, + 301, + 161 + ], + "spans": [ + { + "bbox": [ + 46, + 144, + 301, + 161 + ], + "type": "text", + "content": "[152] Yu Zhang et al. MOSLIM: Align with diverse preferences in prompts through reward classification. OpenReview, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 162, + 301, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 162, + 301, + 187 + ], + "spans": [ + { + "bbox": [ + 46, + 162, + 301, + 187 + ], + "type": "text", + "content": "[153] Anirudhan Badrinath et al. Hybrid preference optimization: Aug-mentation direct preference optimization with auxiliary objectives. arXiv, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 188, + 301, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 188, + 301, + 206 + ], + "spans": [ + { + "bbox": [ + 46, + 188, + 301, + 206 + ], + "type": "text", + "content": "[154] Yiju Guo et al. Controllable preference optimization: Toward controllable multi-objective alignment. EMNLP, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 207, + 301, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 207, + 301, + 233 + ], + "spans": [ + { + "bbox": [ + 46, + 207, + 301, + 233 + ], + "type": "text", + "content": "[155] Abhijnan Nath et al. Simultaneous reward distillation and preference learning: Get you a language model who can do both. arXiv, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 233, + 301, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 233, + 301, + 251 + ], + "spans": [ + { + "bbox": [ + 46, + 233, + 301, + 251 + ], + "type": "text", + "content": "[156] Zixiang Chen et al. Self-play fine-tuning converts weak language models to strong language models. ICML, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 251, + 301, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 251, + 301, + 269 + ], + "spans": [ + { + "bbox": [ + 46, + 251, + 301, + 269 + ], + "type": "text", + "content": "[157] Yue Wu et al. Self-play preference optimization for language model alignment. ICLR, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 270, + 301, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 270, + 301, + 287 + ], + "spans": [ + { + "bbox": [ + 46, + 270, + 301, + 287 + ], + "type": "text", + "content": "[158]Gokul Swamy et al. A minimaximalist approach to reinforcement learning from human feedback. ICML, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 288, + 301, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 288, + 301, + 305 + ], + "spans": [ + { + "bbox": [ + 46, + 288, + 301, + 305 + ], + "type": "text", + "content": "[159] Lin Gui et al. Bonbon alignment for large language models and the sweetness of best-of-n sampling. NeurIPS, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 305, + 301, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 305, + 301, + 323 + ], + "spans": [ + { + "bbox": [ + 46, + 305, + 301, + 323 + ], + "type": "text", + "content": "[160] Remi Munos et al. Nash learning from human feedback. ICML, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 46, + 323, + 301, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 323, + 301, + 342 + ], + "spans": [ + { + "bbox": [ + 46, + 323, + 301, + 342 + ], + "type": "text", + "content": "[161] Corby Rosset et al. Direct nash optimization: Teaching language models to self-improve with general preferences. arXiv, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 46, + 342, + 301, + 359 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 342, + 301, + 359 + ], + "spans": [ + { + "bbox": [ + 46, + 342, + 301, + 359 + ], + "type": "text", + "content": "[162] Daniele Calandriello et al. Human alignment of large language models through online preference optimisation. ICML, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 46, + 360, + 301, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 360, + 301, + 376 + ], + "spans": [ + { + "bbox": [ + 46, + 360, + 301, + 376 + ], + "type": "text", + "content": "[163] Eugene Choi et al. Self-improving robust preference optimization. *ICLR*, 2025." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 46, + 377, + 301, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 377, + 301, + 395 + ], + "spans": [ + { + "bbox": [ + 46, + 377, + 301, + 395 + ], + "type": "text", + "content": "[164] Haoyan Yang et al. Dynamic noise preference optimization for llm self-improvement via synthetic data. arXiv, 2025." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 46, + 396, + 301, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 396, + 301, + 413 + ], + "spans": [ + { + "bbox": [ + 46, + 396, + 301, + 413 + ], + "type": "text", + "content": "[165] Alexey Gorbatovski et al. Learn your reference model for real good alignment. arXiv, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 46, + 414, + 301, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 414, + 301, + 430 + ], + "spans": [ + { + "bbox": [ + 46, + 414, + 301, + 430 + ], + "type": "text", + "content": "[166] Yu Meng et al. Simpo: Simple preference optimization with a reference-free reward. NeurIPS, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 46, + 431, + 301, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 431, + 301, + 449 + ], + "spans": [ + { + "bbox": [ + 46, + 431, + 301, + 449 + ], + "type": "text", + "content": "[167] Teng Xiao et al. SimPER: A minimalist approach to preference alignment without hyperparameters. *ICLR*, 2025." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 46, + 449, + 301, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 449, + 301, + 468 + ], + "spans": [ + { + "bbox": [ + 46, + 449, + 301, + 468 + ], + "type": "text", + "content": "[168] Yixin Liu et al. Understanding reference policies in direct preference optimization. arXiv, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 46, + 468, + 301, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 468, + 301, + 493 + ], + "spans": [ + { + "bbox": [ + 46, + 468, + 301, + 493 + ], + "type": "text", + "content": "[169] Chaoqi Wang et al. Beyond reverse kl: Generalizing direct preference optimization with diverse divergence constraints. *ICLR*, 2023." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 46, + 495, + 301, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 495, + 301, + 512 + ], + "spans": [ + { + "bbox": [ + 46, + 495, + 301, + 512 + ], + "type": "text", + "content": "[170] Stewart Slocum et al. Diverse preference learning for capabilities and alignment. ICLR, 2025." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 46, + 513, + 301, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 513, + 301, + 538 + ], + "spans": [ + { + "bbox": [ + 46, + 513, + 301, + 538 + ], + "type": "text", + "content": "[171] Amitava Das et al. Dpo kernels: A semantically-aware, kernel-enhanced, and divergence-rich paradigm for direct preference optimization. arXiv, 2025." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 46, + 539, + 301, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 539, + 301, + 565 + ], + "spans": [ + { + "bbox": [ + 46, + 539, + 301, + 565 + ], + "type": "text", + "content": "[172] Mingye Zhu et al. FlipGuard: Defending preference alignment against update regression with constrained optimization. EMNLP, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 46, + 566, + 301, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 566, + 301, + 584 + ], + "spans": [ + { + "bbox": [ + 46, + 566, + 301, + 584 + ], + "type": "text", + "content": "[173] Qingyu Yin et al. Direct preference optimization using sparse feature-level constraints. arXiv, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 46, + 585, + 301, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 585, + 301, + 602 + ], + "spans": [ + { + "bbox": [ + 46, + 585, + 301, + 602 + ], + "type": "text", + "content": "[174] Yunhao Tang et al. Generalized preference optimization: A unified approach to offline alignment. ICML, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 46, + 602, + 301, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 602, + 301, + 620 + ], + "spans": [ + { + "bbox": [ + 46, + 602, + 301, + 620 + ], + "type": "text", + "content": "[175] Haozhe Ji et al. Towards efficient exact optimization of language model alignment. ICML, 2024." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 46, + 620, + 301, + 638 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 620, + 301, + 638 + ], + "spans": [ + { + "bbox": [ + 46, + 620, + 301, + 638 + ], + "type": "text", + "content": "[176] Arsalan Sharifnassab et al. Soft preference optimization: Aligning language models to expert distributions. arXiv, 2024." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 46, + 639, + 301, + 664 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 639, + 301, + 664 + ], + "spans": [ + { + "bbox": [ + 46, + 639, + 301, + 664 + ], + "type": "text", + "content": "[177] Janghwan Lee et al. Improving conversational abilities of quantized large language models via direct preference alignment. ACL, 2024." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 46, + 665, + 301, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 301, + 692 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 301, + 692 + ], + "type": "text", + "content": "[178] Audrey Huang et al. Correcting the mythos of kl-regularization: Direct alignment without overoptimization via chi-squared preference optimization. arXiv, 2025." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 46, + 693, + 301, + 711 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 693, + 301, + 711 + ], + "spans": [ + { + "bbox": [ + 46, + 693, + 301, + 711 + ], + "type": "text", + "content": "[179] Geon-Hyeong Kim et al. SafeDPO: A simple approach to direct preference optimization with enhanced safety. OpenReview, 2025." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 46, + 711, + 301, + 728 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 711, + 301, + 728 + ], + "spans": [ + { + "bbox": [ + 46, + 711, + 301, + 728 + ], + "type": "text", + "content": "[180] Akifumi Wachi et al. Stepwise alignment for constrained language model policy optimization. NeurIPS, 2024." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 46, + 728, + 301, + 738 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 728, + 301, + 738 + ], + "spans": [ + { + "bbox": [ + 46, + 728, + 301, + 738 + ], + "type": "text", + "content": "[181] Zixuan Liu et al. Enhancing llm safety via constrained direct" + } + ] + } + ], + "index": 36 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 44, + 564, + 738 + ], + "type": "list", + "angle": 0, + "index": 74, + "blocks": [ + { + "bbox": [ + 332, + 44, + 465, + 53 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 44, + 465, + 53 + ], + "spans": [ + { + "bbox": [ + 332, + 44, + 465, + 53 + ], + "type": "text", + "content": "preference optimization. arXiv, 2024." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 53, + 564, + 80 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 53, + 564, + 80 + ], + "spans": [ + { + "bbox": [ + 310, + 53, + 564, + 80 + ], + "type": "text", + "content": "[182] San Kim and Gary Geunbae Lee. Adversarial dpo: Harnessing harmful data for reducing toxicity with minimal impact on coherence and evasiveness in dialogue agents. arXiv, 2024." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 80, + 564, + 98 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 80, + 564, + 98 + ], + "spans": [ + { + "bbox": [ + 310, + 80, + 564, + 98 + ], + "type": "text", + "content": "[183] Andrew Lee et al. A mechanistic understanding of alignment algorithms: a case study on dpo and toxicity. ICML, 2024." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 99, + 564, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 99, + 564, + 115 + ], + "spans": [ + { + "bbox": [ + 310, + 99, + 564, + 115 + ], + "type": "text", + "content": "[184] Yiming Zhang et al. Backtracking improves generation safety. ICLR, 2025." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 310, + 116, + 564, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 116, + 564, + 134 + ], + "spans": [ + { + "bbox": [ + 310, + 116, + 564, + 134 + ], + "type": "text", + "content": "[185] Seongho Son et al. Right now, wrong then: Non-stationary direct preference optimization under preference drift. arXiv, 2024." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 310, + 134, + 564, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 134, + 564, + 152 + ], + "spans": [ + { + "bbox": [ + 310, + 134, + 564, + 152 + ], + "type": "text", + "content": "[186] Eugene Choi et al. Self-improving robust preference optimization. *ICLR*, 2025." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 310, + 152, + 564, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 152, + 564, + 170 + ], + "spans": [ + { + "bbox": [ + 310, + 152, + 564, + 170 + ], + "type": "text", + "content": "[187] Adam Fisch et al. Robust preference optimization through reward model distillation. arXiv, 2024." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 310, + 170, + 564, + 197 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 170, + 564, + 197 + ], + "spans": [ + { + "bbox": [ + 310, + 170, + 564, + 197 + ], + "type": "text", + "content": "[188] Yong Lin et al. On the limited generalization capability of the implicit reward model induced by direct preference optimization. EMNLP Findings, 2024." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 310, + 198, + 564, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 198, + 564, + 215 + ], + "spans": [ + { + "bbox": [ + 310, + 198, + 564, + 215 + ], + "type": "text", + "content": "[189] Fahim Tajwar et al. Preference fine-tuning of llms should leverage suboptimal, on-policy data. ICML, 2024." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 310, + 215, + 564, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 215, + 564, + 233 + ], + "spans": [ + { + "bbox": [ + 310, + 215, + 564, + 233 + ], + "type": "text", + "content": "[190] Hongyi Yuan et al. Rrrh: Rank responses to align language models with human feedback. NeurIPS, 2023." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 310, + 233, + 564, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 233, + 564, + 251 + ], + "spans": [ + { + "bbox": [ + 310, + 233, + 564, + 251 + ], + "type": "text", + "content": "[191] Ryan Park et al. Disentangling length from quality in direct preference optimization. ACL Findings, 2024." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 310, + 251, + 564, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 251, + 564, + 277 + ], + "spans": [ + { + "bbox": [ + 310, + 251, + 564, + 277 + ], + "type": "text", + "content": "[192] Junru Lu et al. Eliminating biased length reliance of direct preference optimization via down-sampled KL divergence. EMNLP, 2024." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 310, + 278, + 564, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 278, + 564, + 296 + ], + "spans": [ + { + "bbox": [ + 310, + 278, + 564, + 296 + ], + "type": "text", + "content": "[193] Weizhe Yuan et al. Following length constraints in instructions. arXiv, 2024." + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 310, + 296, + 564, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 296, + 564, + 314 + ], + "spans": [ + { + "bbox": [ + 310, + 296, + 564, + 314 + ], + "type": "text", + "content": "[194] Kian Ahrabian et al. The hitchhiker's guide to human alignment with* po. arXiv, 2024." + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 310, + 314, + 564, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 314, + 564, + 332 + ], + "spans": [ + { + "bbox": [ + 310, + 314, + 564, + 332 + ], + "type": "text", + "content": "[195] Wei Liu et al. Length desensitization in directed preference optimization. arXiv, 2024." + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 310, + 332, + 564, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 332, + 564, + 358 + ], + "spans": [ + { + "bbox": [ + 310, + 332, + 564, + 358 + ], + "type": "text", + "content": "[196] Guanzheng Chen et al. LongPO: Long context self-evolution of large language models through short-to-long preference optimization. ICLR, 2025." + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 310, + 359, + 564, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 359, + 564, + 376 + ], + "spans": [ + { + "bbox": [ + 310, + 359, + 564, + 376 + ], + "type": "text", + "content": "[197] Prasann Singhal et al. A long way to go: Investigating length correlations in RLHF. COLM, 2024." + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 310, + 376, + 564, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 376, + 564, + 395 + ], + "spans": [ + { + "bbox": [ + 310, + 376, + 564, + 395 + ], + "type": "text", + "content": "[198] Kyle Richardson et al. Understanding the logic of direct preference alignment through logic. arXiv, 2024." + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 310, + 395, + 564, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 395, + 564, + 422 + ], + "spans": [ + { + "bbox": [ + 310, + 395, + 564, + 422 + ], + "type": "text", + "content": "[199] Karel D'Oosterlinck et al. Anchored preference optimization and contrastive revisions: Addressing underspecification in alignment. arXiv, 2024." + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 310, + 422, + 564, + 440 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 422, + 564, + 440 + ], + "spans": [ + { + "bbox": [ + 310, + 422, + 564, + 440 + ], + "type": "text", + "content": "[200] Arka Pal et al. Smaug: Fixing failure modes of preference optimisation with dpo-positive. arXiv, 2024." + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 310, + 440, + 564, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 440, + 564, + 458 + ], + "spans": [ + { + "bbox": [ + 310, + 440, + 564, + 458 + ], + "type": "text", + "content": "[201] Yuzi Yan et al. 3d-properties: Identifying challenges in DPO and charting a path forward. ICLR, 2025." + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 310, + 458, + 564, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 458, + 564, + 476 + ], + "spans": [ + { + "bbox": [ + 310, + 458, + 564, + 476 + ], + "type": "text", + "content": "[202] Duanyu Feng et al. Towards analyzing and understanding the limitations of dpo: A theoretical perspective. arXiv, 2024." + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 310, + 476, + 564, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 476, + 564, + 494 + ], + "spans": [ + { + "bbox": [ + 310, + 476, + 564, + 494 + ], + "type": "text", + "content": "[203] Hui Yuan et al. A common pitfall of margin-based language model alignment: Gradient entanglement. *ICLR*, 2025." + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 310, + 495, + 564, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 495, + 564, + 512 + ], + "spans": [ + { + "bbox": [ + 310, + 495, + 564, + 512 + ], + "type": "text", + "content": "[204] Noam Razin et al. Unintentional unalignment: Likelihood displacement in direct preference optimization. arXiv, 2024." + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 310, + 512, + 564, + 530 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 512, + 564, + 530 + ], + "spans": [ + { + "bbox": [ + 310, + 512, + 564, + 530 + ], + "type": "text", + "content": "[205] Zhengyan Shi et al. Understanding likelihood over-optimisation in direct alignment algorithms. arXiv, 2024." + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 310, + 530, + 564, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 530, + 564, + 548 + ], + "spans": [ + { + "bbox": [ + 310, + 530, + 564, + 548 + ], + "type": "text", + "content": "[206] Yong Lin et al. Mitigating the alignment tax of RLHF. EMNLP, 2024." + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 310, + 548, + 564, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 548, + 564, + 566 + ], + "spans": [ + { + "bbox": [ + 310, + 548, + 564, + 566 + ], + "type": "text", + "content": "[207] Megh Thakkar et al. A deep dive into the trade-offs of parameter-efficient preference alignment techniques. ACL, 2024." + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 310, + 566, + 564, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 566, + 564, + 585 + ], + "spans": [ + { + "bbox": [ + 310, + 566, + 564, + 585 + ], + "type": "text", + "content": "[208] Keming Lu et al. Online merging optimizers for boosting rewards and mitigating tax in alignment. arXiv, 2024." + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 310, + 585, + 564, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 585, + 564, + 602 + ], + "spans": [ + { + "bbox": [ + 310, + 585, + 564, + 602 + ], + "type": "text", + "content": "[209] Angelica Chen et al. Preference learning algorithms do not learn preference rankings. NeurIPS, 2024." + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 310, + 602, + 564, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 602, + 564, + 628 + ], + "spans": [ + { + "bbox": [ + 310, + 602, + 564, + 628 + ], + "type": "text", + "content": "[210] Wenyi Xiao et al. A comprehensive survey of direct preference optimization: Datasets, theories, variants, and applications. arXiv, 2024." + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 310, + 629, + 564, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 629, + 564, + 647 + ], + "spans": [ + { + "bbox": [ + 310, + 629, + 564, + 647 + ], + "type": "text", + "content": "[211] Pierre Harvey Richemond et al. Offline regularised reinforcement learning for large language models alignment. arXiv, 2024." + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 310, + 647, + 564, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 647, + 564, + 665 + ], + "spans": [ + { + "bbox": [ + 310, + 647, + 564, + 665 + ], + "type": "text", + "content": "[212] Christian Wirth et al. A survey of preference-based reinforcement learning methods. JMLR, 2017." + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 310, + 665, + 564, + 682 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 665, + 564, + 682 + ], + "spans": [ + { + "bbox": [ + 310, + 665, + 564, + 682 + ], + "type": "text", + "content": "[213] Jiaming Ji et al. Ai alignment: A comprehensive survey. arXiv, 2023." + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 310, + 683, + 564, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 683, + 564, + 701 + ], + "spans": [ + { + "bbox": [ + 310, + 683, + 564, + 701 + ], + "type": "text", + "content": "[214] Xinpeng Wang et al. On the essence and prospect: An investigation of alignment approaches for big models. *IJCAI*, 2024." + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 310, + 701, + 564, + 728 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 701, + 564, + 728 + ], + "spans": [ + { + "bbox": [ + 310, + 701, + 564, + 728 + ], + "type": "text", + "content": "[215] Hannah Rose Kirk et al. The past, present and better future of feedback learning in large language models for subjective human preferences and values. EMNLP, 2023." + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 310, + 728, + 564, + 738 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 728, + 564, + 738 + ], + "spans": [ + { + "bbox": [ + 310, + 728, + 564, + 738 + ], + "type": "text", + "content": "[216] Patrick Fernandes et al. Bridging the gap: A survey on integrating" + } + ] + } + ], + "index": 73 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 44, + 301, + 738 + ], + "type": "list", + "angle": 0, + "index": 38, + "blocks": [ + { + "bbox": [ + 68, + 44, + 296, + 53 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 44, + 296, + 53 + ], + "spans": [ + { + "bbox": [ + 68, + 44, + 296, + 53 + ], + "type": "text", + "content": "(human) feedback for natural language generation. TACL, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 53, + 301, + 71 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 53, + 301, + 71 + ], + "spans": [ + { + "bbox": [ + 47, + 53, + 301, + 71 + ], + "type": "text", + "content": "[217] Timo Kaufmann et al. A survey of reinforcement learning from human feedback. arXiv, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 72, + 301, + 98 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 301, + 98 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 301, + 98 + ], + "type": "text", + "content": "[218] Ralph Allan Bradley and Milton E Terry. Rank analysis of incomplete block designs: I. the method of paired comparisons. Biometrika, 1952." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 99, + 301, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 99, + 301, + 116 + ], + "spans": [ + { + "bbox": [ + 47, + 99, + 301, + 116 + ], + "type": "text", + "content": "[219] John Schulman et al. Proximal policy optimization algorithms. arXiv, 2017." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 116, + 301, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 116, + 301, + 142 + ], + "spans": [ + { + "bbox": [ + 47, + 116, + 301, + 142 + ], + "type": "text", + "content": "[220] Arash Ahmadian et al. Back to basics: Revisiting reinforce style optimization for learning from human feedback in llms. ACL, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 143, + 301, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 143, + 301, + 169 + ], + "spans": [ + { + "bbox": [ + 47, + 143, + 301, + 169 + ], + "type": "text", + "content": "[221] Ziniu Li et al. ReMax: A simple, effective, and efficient reinforcement learning method for aligning large language models. ICML, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 170, + 301, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 170, + 301, + 189 + ], + "spans": [ + { + "bbox": [ + 47, + 170, + 301, + 189 + ], + "type": "text", + "content": "[222] Zhihong Shao et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 189, + 301, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 189, + 301, + 206 + ], + "spans": [ + { + "bbox": [ + 47, + 189, + 301, + 206 + ], + "type": "text", + "content": "[223] Jian Hu. Reinforce++: A simple and efficient approach for aligning large language models. arXiv, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 207, + 301, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 207, + 301, + 224 + ], + "spans": [ + { + "bbox": [ + 47, + 207, + 301, + 224 + ], + "type": "text", + "content": "[224] Chris Lu et al. Discovering preference optimization algorithms with and for large language models. NeurIPS, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 224, + 301, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 224, + 301, + 243 + ], + "spans": [ + { + "bbox": [ + 47, + 224, + 301, + 243 + ], + "type": "text", + "content": "[225] Hanyang Zhao et al. RainbowPO: A unified framework for combining improvements in preference optimization. ICLR, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 243, + 301, + 260 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 243, + 301, + 260 + ], + "spans": [ + { + "bbox": [ + 47, + 243, + 301, + 260 + ], + "type": "text", + "content": "[226] Hamish Ivison et al. Unpacking dpo and ppo: Disentangling best practices for learning from preference feedback. NeurIPS, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 261, + 301, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 261, + 301, + 278 + ], + "spans": [ + { + "bbox": [ + 47, + 261, + 301, + 278 + ], + "type": "text", + "content": "[227] Amir Saeidi et al. Insights into alignment: Evaluating dpo and its variants across multiple tasks. arXiv, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 278, + 301, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 278, + 301, + 304 + ], + "spans": [ + { + "bbox": [ + 47, + 278, + 301, + 304 + ], + "type": "text", + "content": "[228] Andi Nika et al. Reward model learning vs. direct policy optimization: a comparative analysis of learning from human preferences. ICML, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 305, + 301, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 305, + 301, + 323 + ], + "spans": [ + { + "bbox": [ + 47, + 305, + 301, + 323 + ], + "type": "text", + "content": "[229] Ziniu Li et al. When is rl better than dpo in rlhf? a representation and optimization perspective. *ICLR Tiny Papers*, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 323, + 301, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 323, + 301, + 341 + ], + "spans": [ + { + "bbox": [ + 47, + 323, + 301, + 341 + ], + "type": "text", + "content": "[230] Yao Zhao et al. Slic-hf: Sequence likelihood calibration with human feedback. arXiv, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 342, + 301, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 342, + 301, + 358 + ], + "spans": [ + { + "bbox": [ + 47, + 342, + 301, + 358 + ], + "type": "text", + "content": "[231] Feifan Song et al. Preference ranking optimization for human alignment. AAAI, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 359, + 301, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 359, + 301, + 376 + ], + "spans": [ + { + "bbox": [ + 47, + 359, + 301, + 376 + ], + "type": "text", + "content": "[232] Chaoqi Wang et al. Preference optimization with multi-sample comparisons. arXiv, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 377, + 301, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 377, + 301, + 395 + ], + "spans": [ + { + "bbox": [ + 47, + 377, + 301, + 395 + ], + "type": "text", + "content": "[233] Ziniu Li et al. Policy optimization in rlhf: The impact of out-of-preference data. arXiv, 2023." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 396, + 301, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 396, + 301, + 422 + ], + "spans": [ + { + "bbox": [ + 47, + 396, + 301, + 422 + ], + "type": "text", + "content": "[234] Lei Li et al. Improving reasoning ability of large language models via iterative uncertainty-based preference optimization. OpenReview, 2025." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 422, + 301, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 422, + 301, + 431 + ], + "spans": [ + { + "bbox": [ + 47, + 422, + 301, + 431 + ], + "type": "text", + "content": "[235] Abhimanyu Dubey et al. The llama 3 herd of models. arXiv, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 432, + 301, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 432, + 301, + 449 + ], + "spans": [ + { + "bbox": [ + 47, + 432, + 301, + 449 + ], + "type": "text", + "content": "[236] Lily H Zhang and Rajesh Ranganath. Win rate is all that can matter from preference data alone. OpenReview, 2025." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 47, + 449, + 301, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 449, + 301, + 468 + ], + "spans": [ + { + "bbox": [ + 47, + 449, + 301, + 468 + ], + "type": "text", + "content": "[237] Ganqu Cui et al. Ultrafeedback: Boosting language models with high-quality feedback. ICML, 2023." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 47, + 468, + 301, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 468, + 301, + 485 + ], + "spans": [ + { + "bbox": [ + 47, + 468, + 301, + 485 + ], + "type": "text", + "content": "[238] Jiaming Ji et al. Pku-saferlhf: Towards multi-level safety alignment for llms with human preference. arXiv, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 47, + 486, + 301, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 486, + 301, + 502 + ], + "spans": [ + { + "bbox": [ + 47, + 486, + 301, + 502 + ], + "type": "text", + "content": "[239] Zhilin Wang et al. Helpsteer: Multi-attribute helpfulness dataset for steerlm. arXiv, 2023." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 47, + 503, + 285, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 503, + 285, + 513 + ], + "spans": [ + { + "bbox": [ + 47, + 503, + 285, + 513 + ], + "type": "text", + "content": "[240] Hunter Lightman et al. Let's verify step by step. ICLR, 2023." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 47, + 513, + 301, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 513, + 301, + 529 + ], + "spans": [ + { + "bbox": [ + 47, + 513, + 301, + 529 + ], + "type": "text", + "content": "[241] Kawin Ethayarajh et al. Understanding dataset difficulty with v-usable information. ICML, 2022." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 47, + 530, + 301, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 530, + 301, + 548 + ], + "spans": [ + { + "bbox": [ + 47, + 530, + 301, + 548 + ], + "type": "text", + "content": "[242] Banghua Zhu et al. Starling-7b: Improving llm helpfulness & harmlessness with rlaif, 2023." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 47, + 548, + 301, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 548, + 301, + 566 + ], + "spans": [ + { + "bbox": [ + 47, + 548, + 301, + 566 + ], + "type": "text", + "content": "[243] Wing Lian et al. Openorca: An open dataset of gpt augmented flan reasoning traces, 2023." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 47, + 567, + 301, + 593 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 567, + 301, + 593 + ], + "spans": [ + { + "bbox": [ + 47, + 567, + 301, + 593 + ], + "type": "text", + "content": "[244] Luigi Daniele and Suphavadeeprasit. Amplify-instruct: Synthetically generated diverse multi-turn conversations for efficient llm training., 2023." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 47, + 593, + 301, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 593, + 301, + 611 + ], + "spans": [ + { + "bbox": [ + 47, + 593, + 301, + 611 + ], + "type": "text", + "content": "[245] Jiaming Ji et al. Beavertails: Towards improved safety alignment of llm via a human-preference dataset. NeurIPS, 2023." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 47, + 612, + 301, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 612, + 301, + 628 + ], + "spans": [ + { + "bbox": [ + 47, + 612, + 301, + 628 + ], + "type": "text", + "content": "[246] Andrew Maas et al. Learning word vectors for sentiment analysis. ACL, 2011." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 47, + 629, + 301, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 629, + 301, + 647 + ], + "spans": [ + { + "bbox": [ + 47, + 629, + 301, + 647 + ], + "type": "text", + "content": "[247] Michael Volske et al. Tl; dr: Mining reddit to learn automatic summarization. EMNLP Workshop, 2017." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 47, + 647, + 301, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 647, + 301, + 673 + ], + "spans": [ + { + "bbox": [ + 47, + 647, + 301, + 673 + ], + "type": "text", + "content": "[248] Deep Ganguli et al. Red teaming language models to reduce harms: Methods, scaling behaviors, and lessons learned. arXiv, 2022." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 47, + 674, + 301, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 674, + 301, + 692 + ], + "spans": [ + { + "bbox": [ + 47, + 674, + 301, + 692 + ], + "type": "text", + "content": "[249] Karl Cobbe et al. Training verifiers to solve math word problems. arXiv, 2021." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 47, + 693, + 301, + 710 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 693, + 301, + 710 + ], + "spans": [ + { + "bbox": [ + 47, + 693, + 301, + 710 + ], + "type": "text", + "content": "[250] Yann Dubois et al. Length-controlled alpacaeval: A simple way to debias automatic evaluators. arXiv, 2024." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 47, + 711, + 301, + 727 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 711, + 301, + 727 + ], + "spans": [ + { + "bbox": [ + 47, + 711, + 301, + 727 + ], + "type": "text", + "content": "[251] Lianmin Zheng et al. Judging llm-as-a-judge with mt-bench and chatbot arena. NeurIPS, 2023." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 47, + 728, + 301, + 738 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 728, + 301, + 738 + ], + "spans": [ + { + "bbox": [ + 47, + 728, + 301, + 738 + ], + "type": "text", + "content": "[252] Andy Zou et al. Universal and transferable adversarial attacks on" + } + ] + } + ], + "index": 37 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 44, + 564, + 736 + ], + "type": "list", + "angle": 0, + "index": 75, + "blocks": [ + { + "bbox": [ + 332, + 44, + 469, + 53 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 44, + 469, + 53 + ], + "spans": [ + { + "bbox": [ + 332, + 44, + 469, + 53 + ], + "type": "text", + "content": "aligned language models. arXiv, 2023." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 53, + 564, + 71 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 53, + 564, + 71 + ], + "spans": [ + { + "bbox": [ + 310, + 53, + 564, + 71 + ], + "type": "text", + "content": "[253] Tianle Li et al. From live data to high-quality benchmarks: The arena-hard pipeline. 2024." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 72, + 564, + 88 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 72, + 564, + 88 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 564, + 88 + ], + "type": "text", + "content": "[254] Stephanie Lin et al. Truthfulqa: Measuring how models mimic human falsehoods. arXiv, 2021." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 310, + 89, + 564, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 89, + 564, + 106 + ], + "spans": [ + { + "bbox": [ + 310, + 89, + 564, + 106 + ], + "type": "text", + "content": "[255] Jeffrey Zhou et al. Instruction-following evaluation for large language models. arXiv, 2023." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 310, + 107, + 564, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 107, + 564, + 125 + ], + "spans": [ + { + "bbox": [ + 310, + 107, + 564, + 125 + ], + "type": "text", + "content": "[256] Mirac Suzgun et al. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv, 2022." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 310, + 125, + 564, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 125, + 564, + 143 + ], + "spans": [ + { + "bbox": [ + 310, + 125, + 564, + 143 + ], + "type": "text", + "content": "[257] Dan Hendrycks et al. Measuring mathematical problem solving with the math dataset. arXiv, 2021." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 310, + 144, + 564, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 144, + 564, + 160 + ], + "spans": [ + { + "bbox": [ + 310, + 144, + 564, + 160 + ], + "type": "text", + "content": "[258] David Rein et al. Gpqa: A graduate-level google-proof q&a benchmark. COLM, 2024." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 310, + 161, + 564, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 161, + 564, + 178 + ], + "spans": [ + { + "bbox": [ + 310, + 161, + 564, + 178 + ], + "type": "text", + "content": "[259] Zayne Sprague et al. Musr: Testing the limits of chain-of-thought with multistep soft reasoning. arXiv, 2023." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 310, + 179, + 564, + 197 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 179, + 564, + 197 + ], + "spans": [ + { + "bbox": [ + 310, + 179, + 564, + 197 + ], + "type": "text", + "content": "[260] Yubo Wang et al. Mmlu-pro: A more robust and challenging multi-task language understanding benchmark. NeurIPS, 2024." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 310, + 198, + 564, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 198, + 564, + 215 + ], + "spans": [ + { + "bbox": [ + 310, + 198, + 564, + 215 + ], + "type": "text", + "content": "[261] Fengqing Jiang et al. Identifying and mitigating vulnerabilities in llm-integrated applications. arXiv, 2023." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 310, + 216, + 564, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 216, + 564, + 233 + ], + "spans": [ + { + "bbox": [ + 310, + 216, + 564, + 233 + ], + "type": "text", + "content": "[262] Ning Ding et al. Enhancing chat language models by scaling high-quality instructional conversations. arXiv, 2023." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 310, + 234, + 564, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 234, + 564, + 251 + ], + "spans": [ + { + "bbox": [ + 310, + 234, + 564, + 251 + ], + "type": "text", + "content": "[263] Qiyu Wu et al. Word alignment as preference for machine translation. EMNLP, 2024." + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 310, + 251, + 564, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 251, + 564, + 269 + ], + "spans": [ + { + "bbox": [ + 310, + 251, + 564, + 269 + ], + "type": "text", + "content": "[264] Yinghao Hu et al. Fine-tuning large language models for improving factuality in legal question answering. COLING, 2025." + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 310, + 270, + 564, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 270, + 564, + 287 + ], + "spans": [ + { + "bbox": [ + 310, + 270, + 564, + 287 + ], + "type": "text", + "content": "[265] Leonidas Gee et al. Code-optimise: Self-generated preference data for correctness and efficiency. arXiv, 2024." + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 310, + 288, + 564, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 288, + 564, + 304 + ], + "spans": [ + { + "bbox": [ + 310, + 288, + 564, + 304 + ], + "type": "text", + "content": "[266] Yibo Miao et al. Aligning codellms with direct preference optimization. arXiv, 2024." + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 310, + 305, + 564, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 305, + 564, + 323 + ], + "spans": [ + { + "bbox": [ + 310, + 305, + 564, + 323 + ], + "type": "text", + "content": "[267] Kechi Zhang et al. Codedpo: Aligning code models with self generated and verified source code. arXiv, 2024." + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 310, + 324, + 564, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 324, + 564, + 341 + ], + "spans": [ + { + "bbox": [ + 310, + 324, + 564, + 341 + ], + "type": "text", + "content": "[268] Guoxin Chen et al. Step-level value preference optimization for mathematical reasoning. EMNLP, 2024." + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 310, + 342, + 564, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 342, + 564, + 367 + ], + "spans": [ + { + "bbox": [ + 310, + 342, + 564, + 367 + ], + "type": "text", + "content": "[269] Wen Lai et al. LLMs beyond English: Scaling the multilingual capability of LLMs with cross-lingual feedback. ACL Findings, 2024." + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 310, + 369, + 564, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 369, + 564, + 386 + ], + "spans": [ + { + "bbox": [ + 310, + 369, + 564, + 386 + ], + "type": "text", + "content": "[270] Yuxin Chen et al. On softmax direct preference optimization for recommendation. NeurIPS, 2024." + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 310, + 387, + 564, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 387, + 564, + 404 + ], + "spans": [ + { + "bbox": [ + 310, + 387, + 564, + 404 + ], + "type": "text", + "content": "[271] Zhuoxi Bai et al. Finetuning large language model for personalized ranking. arXiv, 2024." + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 310, + 404, + 564, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 404, + 564, + 422 + ], + "spans": [ + { + "bbox": [ + 310, + 404, + 564, + 422 + ], + "type": "text", + "content": "[272] Yi Gu et al. Diffusion-rpo: Aligning diffusion models through relative preference optimization. arXiv, 2024." + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 310, + 422, + 564, + 440 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 422, + 564, + 440 + ], + "spans": [ + { + "bbox": [ + 310, + 422, + 564, + 440 + ], + "type": "text", + "content": "[273] Shivanshu Shekhar et al. See-dpo: Self entropy enhanced direct preference optimization. arXiv, 2024." + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 310, + 441, + 564, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 441, + 564, + 458 + ], + "spans": [ + { + "bbox": [ + 310, + 441, + 564, + 458 + ], + "type": "text", + "content": "[274] Shufan Li et al. Aligning diffusion models by optimizing human utility. NeurIPS, 2024." + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 310, + 459, + 564, + 484 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 459, + 564, + 484 + ], + "spans": [ + { + "bbox": [ + 310, + 459, + 564, + 484 + ], + "type": "text", + "content": "[275] Navonil Majumder et al. Tango 2: Aligning diffusion-based text-to-audio generations through direct preference optimization. ACM MM, 2024." + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 310, + 485, + 564, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 485, + 564, + 503 + ], + "spans": [ + { + "bbox": [ + 310, + 485, + 564, + 503 + ], + "type": "text", + "content": "[276] Bram Wallace et al. Diffusion model alignment using direct preference optimization. CVPR, 2024." + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 310, + 503, + 564, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 503, + 564, + 521 + ], + "spans": [ + { + "bbox": [ + 310, + 503, + 564, + 521 + ], + "type": "text", + "content": "[277] Shentao Yang et al. A dense reward view on aligning text-to-image diffusion with preference. ICML, 2024." + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 310, + 521, + 564, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 521, + 564, + 539 + ], + "spans": [ + { + "bbox": [ + 310, + 521, + 564, + 539 + ], + "type": "text", + "content": "[278] Kai Yang et al. Using human feedback to fine-tune diffusion models without any reward model. CVPR, 2024." + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 310, + 540, + 564, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 540, + 564, + 557 + ], + "spans": [ + { + "bbox": [ + 310, + 540, + 564, + 557 + ], + "type": "text", + "content": "[279] Buhua Liu et al. Alignment of diffusion models: Fundamentals, challenges, and future. arXiv, 2024." + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 310, + 558, + 564, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 558, + 564, + 583 + ], + "spans": [ + { + "bbox": [ + 310, + 558, + 564, + 583 + ], + "type": "text", + "content": "[280] Shengzhi Li et al. Multi-modal preference alignment remedies degradation of visual instruction tuning on language models. ACL, 2024." + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 310, + 584, + 564, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 584, + 564, + 601 + ], + "spans": [ + { + "bbox": [ + 310, + 584, + 564, + 601 + ], + "type": "text", + "content": "[281] Ziqi Liang et al. AlignCap: Aligning speech emotion captioning to human preferences. EMNLP, 2024." + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 310, + 602, + 564, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 602, + 564, + 620 + ], + "spans": [ + { + "bbox": [ + 310, + 602, + 564, + 620 + ], + "type": "text", + "content": "[282] Elmira Amirloo et al. Understanding alignment in multimodal llms: A comprehensive study. arXiv, 2024." + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 310, + 620, + 564, + 638 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 620, + 564, + 638 + ], + "spans": [ + { + "bbox": [ + 310, + 620, + 564, + 638 + ], + "type": "text", + "content": "[283] Jinlan Fu et al. Chip: Cross-modal hierarchical direct preference optimization for multimodal llms. arXiv, 2025." + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 310, + 639, + 564, + 664 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 639, + 564, + 664 + ], + "spans": [ + { + "bbox": [ + 310, + 639, + 564, + 664 + ], + "type": "text", + "content": "[284] Ruohong Zhang et al. Direct preference optimization of video large multimodal models from language model reward. arXiv, 2024." + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 310, + 665, + 564, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 665, + 564, + 692 + ], + "spans": [ + { + "bbox": [ + 310, + 665, + 564, + 692 + ], + "type": "text", + "content": "[285] Yuxi Xie et al. V-DPO: Mitigating hallucination in large vision language models via vision-guided direct preference optimization. EMNLP Findings, 2024." + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 310, + 693, + 564, + 710 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 693, + 564, + 710 + ], + "spans": [ + { + "bbox": [ + 310, + 693, + 564, + 710 + ], + "type": "text", + "content": "[286] Peng Xu et al. Lvlm-ehub: A comprehensive evaluation benchmark for large vision-language models. TPAMI, 2025." + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 310, + 711, + 564, + 736 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 711, + 564, + 736 + ], + "spans": [ + { + "bbox": [ + 310, + 711, + 564, + 736 + ], + "type": "text", + "content": "[287] Zhongzhan Huang et al. A causality-aware paradigm for evaluating creativity of multimodal large language models. TPAMI, 2025." + } + ] + } + ], + "index": 74 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13502/db12ade8-3943-4647-bb0d-ce8160710750_content_list.json b/data/2025/2503_13xxx/2503.13502/db12ade8-3943-4647-bb0d-ce8160710750_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..f00095e3fe4e0cd45f3399a43be62b9335987e4a --- /dev/null +++ b/data/2025/2503_13xxx/2503.13502/db12ade8-3943-4647-bb0d-ce8160710750_content_list.json @@ -0,0 +1,1857 @@ +[ + { + "type": "text", + "text": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey", + "text_level": 1, + "bbox": [ + 155, + 101, + 846, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yuxuan Liang $^{1}$ , Haomin Wen $^{2,1}$ , Yutong Xia $^{3}$ , Ming Jin $^{4}$ , Bin Yang $^{5}$ , Flora Salim $^{6}$ , Qingsong Wen $^{7}$ , Shirui Pan $^{4}$ , Gao Cong $^{8}$", + "bbox": [ + 228, + 162, + 769, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1The Hong Kong University of Science and Technology (Guangzhou) 2Carnegie Mellon University 3National University of Singapore 4Griffith University 5East China Normal University 6University of New South Wales 7Squirrel AI Learning, USA 8Nanyang Technology University {yuxiang,yutong.x}@outlook.com,{wenhaomin.whm,mingjinedu,qingsongedu}@gmail.com flora.salim@unsw.edu.au,byang@dase.ecnu.edu.cn,s.pan@griffith.edu.au,gaocong@ntu.edu.sg", + "bbox": [ + 166, + 198, + 830, + 276 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 83, + 282, + 156, + 297 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Spatio-Temporal (ST) data science, which includes sensing, managing, and mining large-scale data across space and time, is fundamental to understanding complex systems in domains such as urban computing, climate science, and intelligent transportation. Traditional deep learning approaches have significantly advanced this field, particularly in the stage of ST data mining. However, these models remain task-specific and often require extensive labeled data. Inspired by the success of Foundation Models (FM), especially large language models, researchers have begun exploring the concept of Spatio-Temporal Foundation Models (STFMs) to enhance adaptability and generalization across diverse ST tasks. Unlike prior architectures, STFMs empower the entire workflow of ST data science, ranging from data sensing, management, to mining, thereby offering a more holistic and scalable approach. Despite rapid progress, a systematic study of STFMs for ST data science remains lacking. This survey aims to provide a comprehensive review of STFMs, categorizing existing methodologies and identifying key research directions to advance ST general intelligence.", + "bbox": [ + 81, + 301, + 483, + 551 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 83, + 555, + 218, + 569 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Humans live in a world shaped by the dynamic interplay of countless elements across space and time. Spatio-Temporal (ST) Data, which refer to data that encapsulate ST phenomena, track the evolution of objects or events across locations and time [5], such as meteorological records, traffic patterns, and human traces. These data are frequently sourced from a wide array of platforms, ranging from IoT devices, GPS sensors, social media, to remote sensing.", + "bbox": [ + 81, + 573, + 482, + 670 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Within this context, Spatio-Temporal Data Science focuses on sensing, managing, and mining these datasets to uncover patterns, understand complex systems, and predict future dynamics. Motivated by its transformative potential, this field addresses critical challenges across urban environments and even the entire planet, enabling decision-making and fostering innovations that contribute to building smarter, more sustainable, and resilient systems [178].", + "bbox": [ + 81, + 670, + 482, + 768 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.", + "bbox": [ + 81, + 779, + 482, + 852 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 83, + 853, + 303, + 863 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM.", + "bbox": [ + 83, + 864, + 472, + 875 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM ISBN 978-x-xxxxx-xxxxx-x/YY/MM", + "bbox": [ + 84, + 875, + 269, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://doi.org/10.1145/nnnnnnn.nnnnnnn", + "bbox": [ + 84, + 883, + 284, + 895 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/585a84a4e1c2d564b53b7198e4a5ac28aaf09f4523682713f3c283074ed415a4.jpg", + "image_caption": [ + "Figure 1: ST Foundation Models (STFM), which include LLM and PFM, are pretrained with or applied to diverse ST data, with the abilities of perception, optimization, and reasoning. STFMs can, in turn, enhance each stage of ST data science." + ], + "image_footnote": [], + "bbox": [ + 519, + 282, + 906, + 425 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In the era of deep learning, the community has primarily concentrated on spatio-temporal representation learning, as a fundamental step of ST data mining [129]. Key advancements include the development of Spatio-Temporal Graph Neural Networks (STGNN) [51] and transformer-based architectures, which have shown remarkable success in tasks such as traffic forecasting [80, 146], air quality prediction [82], and human mobility analytics [132]. STGNNs integrate Graph Neural Networks (GNN) with temporal learning modules (e.g., GRU [6, 70], TCN [140, 141]) to model ST correlations, while transformer models leverage self-attention mechanisms [37, 78, 177] to process complex dependencies across space and time. Additionally, there has been significant research on self-supervised learning [46, 74, 92], where models are trained to extract powerful representations with minimal reliance on large annotated datasets.", + "bbox": [ + 511, + 493, + 915, + 686 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Driven by the success of Foundation Models (FM), especially Large Language Models (LLM), researchers have recently begun exploring the concept of Spatio-Temporal Foundation Models (STFM) [32, 81, 169]. By harnessing LLMs, it becomes possible to develop more generalized, adaptable solutions that can be fine-tuned for specific tasks with minimal data. Another prominent approach involves pretraining FMs (denoted as PFM) on cross-domain ST data and adapting them for particular domains. In contrast to previous architectures (e.g., STGNNs), STFMs integrates the capabilities of perception, reasoning and optimization, which not only promises to revolutionize ST data mining, but also empowers other stages of ST data science, such as ST data sensing and management (See Figure 1). This shift has the potential to enhance the scalability and efficiency of ST applications, offering a more holistic approach to addressing challenges in urban computing, climate science, etc.", + "bbox": [ + 511, + 688, + 915, + 896 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2503.13502v1 [cs.DB] 12 Mar 2025", + "bbox": [ + 22, + 260, + 57, + 705 + ], + "page_idx": 0 + }, + { + "type": "table", + "img_path": "images/08bcc66a52134813071d6c4eb50489b0a630570ee89c53dc5e3c14381549b4d2.jpg", + "table_caption": [ + "Table 1: Our survey vs. related surveys on FMs for learning ST data, such as locations (L), trajectories (T), events (E), ST rasters (R), and ST graphs (G). The applications (App.) include numerical (N) and inferential (I) problems." + ], + "table_footnote": [], + "table_body": "
SurveyYearVenueSensingManage.MiningApp.Data
Jin et al. [54]2023-NR,G
Jiang et al. [48]2024IJCAINR,G
Liang et al. [81]2024KDDNT,E,R,G
Zhang et al. [169]2024KDDN,IL,T,E,R,G
Goodge et al. [32]2025-NT,E,R,G
Ours2025-N,IL,T,E,R,G
", + "bbox": [ + 86, + 162, + 478, + 239 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Despite their rapid advancements, a systematic analysis of STFMs across the entire workflow of ST data science remains lacking. First, prior surveys have primarily focused on utilizing LLMs as the key tool for ST data mining [32, 54, 81, 169], leaving a significant gap in understanding how these models can be integrated throughout the entire process, i.e., with less focus placed on their role in the earlier stages of sensing and management. Second, they predominantly examine the applications of STFMs to numerical problems (e.g., forecasting, imputation) while overlooking their role in inferential problem-solving such as decision-making systems.", + "bbox": [ + 81, + 241, + 482, + 380 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To bridge these gaps, this paper aims to provide a more comprehensive survey of STFMs across all stages of ST data science, spanning data sensing, management, and mining (see Figure 1). For example, LLMs can enhance ST data sensing by actively processing citizen reports, optimizing participatory sensing strategies, and generating synthetic data at scale. In terms of data management, they can automate data cleaning tasks, construct meaningful knowledge graphs for data integration, and facilitate more efficient retrieval of cross-modal datasets. Beyond these stages, our survey also explores how STFMs support a broader range of downstream applications, including numerical and inferential problems. Through this endeavor, we seek to illuminate an overall vision of STFMs, thereby enhancing comprehension regarding their potential to optimize ST data science, fostering more integrated and adaptable solutions.", + "bbox": [ + 81, + 380, + 482, + 571 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Meanwhile, we systematically investigate the key methodologies of STFMs for modeling a variety of ST data. We begin by categorizing existing STFMs into two main classes: LLMs and Pretrained Foundation Models (PFMs). For LLMs, which are pretrained on linguistic data, we focus on their usage as a zero-shot [33] or few-shot learner [53, 73], where various prompting and fine-tuning strategies have been explored, respectively. For PFMs, which are trained from scratch based on cross-domain ST data [40, 158, 189], we examine their neural architectures, pretraining methods, and their adaptability to different types of ST data, including location data, trajectory data, events, ST raster data, and ST graph data.", + "bbox": [ + 81, + 573, + 482, + 726 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our major contributions lie in three aspects:", + "bbox": [ + 98, + 726, + 444, + 739 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Comprehensive and up-to-date survey: We provide the first comprehensive and modern survey of FMs across the entire workflow of ST data science, covering data sensing, management, and mining. We also explore a broader range of downstream tasks and data types compared to most existing surveys (See Table 1).", + "- Vision and Methodologies: We propose a vision for STFMs, identifying key capabilities essential for their success, and discuss current methodologies for implementing these abilities in detail.", + "- Future directions: We highlight promising directions for advancing ST data science with foundation models, encouraging further research and exploration in this emerging field." + ], + "bbox": [ + 83, + 739, + 482, + 891 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Paper Organization. The remainder of this paper is organized as follows: Sec. 2 provides essential background on FMs and ST data. Sec. 3 and 4 present a taxonomy of STFMs regarding the workflow and methodologies, respectively. Sec. 5 offers concluding remarks, and Appendix A highlights promising avenues for future research.", + "bbox": [ + 511, + 106, + 915, + 176 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Background", + "text_level": 1, + "bbox": [ + 514, + 181, + 645, + 198 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Foundation models. FMs are deep neural networks trained on vast datasets, enabling them to acquire broad, cross-domain knowledge and exceptional adaptability [45]. Unlike earlier task-specific models, FMs can be efficiently fine-tuned with relatively small amounts of task-specific data, offering remarkable flexibility, effectiveness, and cost efficiency. Pioneering attempts like BERT [58] and GPT-3 [11] have reshaped natural language processing. More recent models, e.g., GPT-4o [45] and DeepSeek-R1 [36], further push the frontiers of generative capabilities, enabling more nuanced reasoning, robust domain adaptation, and improved context-awareness in diverse tasks. In ST domains, recent FMs like Time-MoE [119], Chronos [4], and UniST [158] have made remarkable strides in time series analysis and universal ST forecasting, while UniTraj [189] serves as a versatile foundation for various trajectory-related tasks. Inspired by these successes, this survey delves into the utilization of FMs in the entire workflow of ST data science, covering data sensing, management, and mining.", + "bbox": [ + 511, + 200, + 915, + 436 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Formulation of Spatio-Temporal Data. ST data refer to datasets that integrate spatial (location-based) and temporal (time-based) information, capturing dynamic patterns and relationships over space and time. Figure 2 depicts the basic ST data structures discussed in this survey, including locations, trajectories, events, ST rasters, and ST graphs. Their definitions are delineated as follows.", + "bbox": [ + 511, + 439, + 915, + 521 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Definition 1 (Location). A location refers to a fixed spatial point or object in a geographical space, represented by the geospatial coordinates $l \\in \\mathbb{R}^2$ , i.e., latitude and longitude. It is often profiled by the corresponding satellite image, street-view image, and descriptions.", + "bbox": [ + 511, + 521, + 913, + 578 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Definition 2 (Trajectory). A trajectory is a sequence of time-ordered locations that describe the movements of an object in the geographical space. It can be formulated as $\\mathcal{T} = p_1\\rightarrow p_2\\rightarrow \\dots \\rightarrow p_T$ where $p_i = (l_i,t_i)$ , and $l_{i}$ denotes the object's location at time $t_i$ .", + "bbox": [ + 511, + 578, + 915, + 633 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Definition 3 (Event). An event sequence is a series of timestamped events, denoted as $\\mathcal{E} = v_{1}\\rightarrow v_{2}\\rightarrow \\dots \\rightarrow v_{T}$ , describing the progress of actions or occurrences, where $v_{i} = (e_{i},t_{i})$ and $e_i\\in \\mathbb{R}^d$ is an event and $t_i$ denotes the time when $e_i$ occurs.", + "bbox": [ + 511, + 633, + 913, + 689 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/037cc7fecab764f3c9489d19c64e195a2e4237fe21b75687cf6cec155fb66656.jpg", + "image_caption": [ + "Figure 2: Illustration of various types of ST data." + ], + "image_footnote": [], + "bbox": [ + 531, + 691, + 890, + 875 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 83, + 75, + 313, + 87 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Yuxuan Liang et al.", + "bbox": [ + 818, + 75, + 911, + 87 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Definition 4 (Spatio-Temporal Raster). An ST raster can be denoted as $\\mathcal{X} = < \\mathbf{X}_1,\\mathbf{X}_2,\\dots ,\\mathbf{X}_T > \\in \\mathbb{R}^{H\\times W\\times T\\times D}$ , where $\\mathbf{X}_t\\in \\mathbb{R}^{H\\times W\\times D}$ denotes the signals collected from $N = HW$ evenly distributed locations at time $t$ , each characterized by $D$ feature attributes.", + "bbox": [ + 81, + 106, + 480, + 161 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Definition 5 (Spatio-Temporal Graph). An ST graph extends the ST raster to be $X = < \\mathbf{X}_1, \\mathbf{X}_2, \\ldots, \\mathbf{X}_T > \\in \\mathbb{R}^{N \\times T \\times D}$ by explicitly incorporating spatial correlations with a graph $G_t = (V, E_t, \\mathbf{A}_t)$ when $N$ locations are not uniformly distributed. Here $V$ is the set of nodes, $E_t$ is the set of edges, and $\\mathbf{A}_t \\in \\mathbb{R}^{N \\times N}$ is the adjacency matrix at time $t$ . The size of $V$ is usually static.", + "bbox": [ + 81, + 162, + 480, + 247 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 The Workflow Perspective", + "text_level": 1, + "bbox": [ + 83, + 258, + 330, + 273 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As shown in Figure 3, we examine STFMs from a holistic, bottom-up perspective, emphasizing their composition across four key aspects:", + "bbox": [ + 81, + 276, + 482, + 305 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- ST Data Sensing refers to the acquisition of data that varies over both space and time from diverse resources (e.g., sensors, satellites, social media), to capture dynamic environmental, geographic, or social phenomena. We also consider synthetic data generation for enhancing data diversity and quantity.", + "- ST Data Management focuses on storing, indexing, and organizing these large-scale, heterogeneous ST datasets, incorporating strategies like distributed architectures for efficient retrieval and integration. FMs can enhance this process by facilitating data cleaning, query & retrieval, and data integration.", + "- ST Data Mining involves learning and analyzing ST data that varies across both space and time to uncover patterns, trends, and relationships, using data mining (DM), deep learning (DL) techniques, or the newly-proposed STFMs with strong capabilities in perception, optimization, and reasoning.", + "- Downstream Applications: This stage harnesses the above insights from ST data to drive real-world applications, ranging from numerical problems to inferential problems, where informed actions and policies are formulated." + ], + "bbox": [ + 83, + 306, + 482, + 570 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "By examining these four aspects, we can better understand how STFMs advance from raw data acquisition to high-level service providing, ultimately enabling more intelligent, adaptable, and impactful solutions. We will detail each stage in the following sections.", + "bbox": [ + 81, + 573, + 482, + 628 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/7550fb89f980864400839d7d1a4cff676d111c15a655bac2fa282342c99fd317.jpg", + "image_caption": [ + "Figure 3: The framework of STFMs for ST data science." + ], + "image_footnote": [], + "bbox": [ + 86, + 637, + 478, + 875 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Spatio-Temporal Data Sensing", + "text_level": 1, + "bbox": [ + 514, + 104, + 803, + 121 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "FMs revolutionize ST data sensing from two complementary aspects: real-world data sensing, which involves collecting data from physical sources, and synthetic data generation, which creates synthetic ST data through foundation models.", + "bbox": [ + 511, + 125, + 915, + 179 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1.1 Real-World Data Sensing. Advances in sensing and data acquisition technologies have led to the generation of vast amounts of ST data. FMs are increasingly applied in human-centric active sensing, particularly in the context of citizen reporting for urban and environmental monitoring [41]. These models act as powerful agents for collecting and processing real-time data from citizens, enabling the efficient handling of ST data [19, 27, 101]. For example, citizens might constantly report incidents, environmental changes, or social events through text or voice [178]. By understanding these reports, LLMs can categorize, prioritize, and trigger appropriate responses for various urban issues, from traffic congestion to environmental hazards. This enhances the decision-making process by continuously updating their models with new data streams. Thus, LLMs are not just passive analytical tools but active participants that help make urban environments more responsive and adaptive to citizen inputs, transforming traditional citizen feedback into actionable knowledge, enabling more sustainable and resilient cities.", + "bbox": [ + 511, + 183, + 913, + 419 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "FMs can also function as intelligent schedulers or simulate multiagent systems to optimize the recruitment and coordination of participants for crowdsensing, particularly under budget constraints [41, 139, 191]. By analyzing ST data and understanding context, LLMs can identify regions and times where crowdsensing efforts will yield the most valuable information. They dynamically recruit participants based on proximity, availability, and past contributions, reducing redundant data collection. Additionally, LLMs simulate multiple agents interacting in real time, ensuring the efficient distribution of sensing tasks across a network of citizens or devices [190]. This strategic scheduling and agent-based coordination maximize coverage while minimizing costs, ensuring that crowdsensing delivers valuable, real-time insights under budgetary constraints.", + "bbox": [ + 511, + 419, + 913, + 599 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1.2 Synthetic Data Generation. FMs can also facilitate data generation, which enhances ST data by increasing its diversity, improving model robustness, and compensating for missing or sparse information [95]. This is crucial for ST tasks like mobility analytics, where collecting real-world data is often costly or raises privacy concerns. For instance, Trajectory-LLM [154] generates vehicle trajectories from brief textual descriptions of vehicle interactions, whereas Traj-LLM [56] generates human trajectories by leveraging personas, memory modules, and routine profiles. LLMob [126] advances mobility data generation, offering flexibility in modeling diverse urban activities and personal mobility patterns, thus improving transportation system modeling and analysis. LLMs have also been employed to construct synthetic environments that replicate real-world conditions across diverse domains, including intelligent transportation [1] and disaster management [31].", + "bbox": [ + 511, + 602, + 913, + 811 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 Spatio-Temporal Data Management", + "text_level": 1, + "bbox": [ + 513, + 821, + 846, + 837 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Upon the acquisition of ST data, the challenge of effective management emerges, particularly in addressing data quality issues (e.g., missing values/views) and facilitating data retrieval and integration. Within this context, FMs can be harnessed in the following ways.", + "bbox": [ + 511, + 840, + 913, + 896 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey", + "bbox": [ + 83, + 75, + 447, + 87 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 684, + 75, + 913, + 87 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2.1 Data Cleaning. Data cleaning is the process of improving data quality by addressing issues such as missing values, low sampling rates, and noise. For example, ST data often exhibit missing values due to various factors like sensor malfunctions and transmission disruptions [178]. Filling in these missing values[113] is crucial for ensuring the integrity of predictive models, optimizing strategies, and facilitating informed decision-making. Recent literature reveals that LLMs can serve as powerful zero-shot [164] or few-shot [17, 172] learners to data imputation by leveraging their ability to identify and learn complex ST patterns. PLMTrajRec [135], utilizing a pretrained language model to recover sparse trajectory data by unifying intervals and inferring road conditions, showing effective generalization across varied sampling intervals in tests. Moreover, scholars have investigated the potential of leveraging LLMs to augment missing views or information, such as urban region profiling [40, 150, 163] and traffic video captioning [25].", + "bbox": [ + 86, + 107, + 480, + 328 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.2 Query & Retrieval. Meanwhile, LLM can be applied to querying and retrieval to enhance information retrieval accuracy under the ST context. By leveraging their advanced natural language understanding capabilities, LLMs can process user queries in a more contextual and semantically rich manner, enabling precise retrieval of relevant information from structured and unstructured data sources. For instance, UrbanLLM [47] finetunes LLMs for urban activity planning and management, which serves as a problem solver that decodes urban-related queries into several sub-tasks, with each one solved by suitable spatio-temporal AI models. Alamsyah et al. [2] propose an automated smart city planning system that utilizes a personalized LLM with Retrieval Augmented Generation (RAG) [30] to generate tailored urban planning recommendations while ensuring data privacy, where RAG is used to retrieve relevant urban planning documents for context-aware responses. Another line of work [67, 75, 170, 179] utilizes Multimodal LLM for cross-modal information retrieval to enhance urban computing tasks.", + "bbox": [ + 86, + 334, + 480, + 569 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.3 Data Integration. Data integration seeks to combine information from disparate sources, often necessitating the understanding and mapping of relationships between entities in heterogeneous datasets. LLMs are increasingly being employed in this domain, particularly for knowledge graph construction [24], where they automate and enhance the extraction, integration, and reasoning of related data. In the context of ST data, LLMs facilitate data integration by leveraging heterogeneous urban data sources, performing relational triplet extraction, and completing knowledge graphs through geospatial reasoning [94, 106]. A pioneering study UrbanKGent [105] proposes an LLM-based Agent framework to automate the process of urban knowledge graph construction.", + "bbox": [ + 86, + 571, + 480, + 738 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3 Spatio-Temporal Data Mining", + "text_level": 1, + "bbox": [ + 86, + 752, + 367, + 768 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Unlike traditional data mining, which primarily focuses on structured datasets, ST data mining captures intricate spatial and temporal dependencies within ST data using machine learning or deep learning techniques [51, 129, 167]. With the emergence of FMs and LLMs, Spatio-Temporal Foundation Models (STFMs) offer new possibilities by integrating perception, optimization, and reasoning capabilities to enhance ST data mining. In this section, we explore these key capabilities, while their specific applications across different domains are detailed in Sec. 3.4.", + "bbox": [ + 86, + 771, + 480, + 893 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3.1 Perception. In STFMs, perception encompasses the ability to effectively model, interpret, and generalize complex spatial and temporal patterns, enabling a deeper understanding of dynamic environments. This capability can be categorized into two key perspectives. The first view pertains to an agent's ability to perceive and understand its surrounding environment, capturing visual or contextual interactions within real-world scenarios such as smart cities [151], indoor activities [152, 153], and mobile Apps [127].", + "bbox": [ + 517, + 107, + 911, + 217 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The second aspect involves interpreting and extracting ST patterns from sensor data, ensuring accurate predictions across diverse domains. Domain-agnostic approaches, such as STEP [117] and GPT-ST [74], have employed pretraining strategies that leverage historical observations to enhance forecasting performance. In urban computing, models like TFM [130] and OpenCity [72] utilize graph-based FMs to analyze behaviors and interactions within transportation systems, yielding promising results in traffic prediction. In climate science, Pangu [9], trained on 39 years of global climate data, delivers superior deterministic forecasting outcomes across all evaluated variables when compared to leading numerical weather prediction systems. Additional notable examples in this area include the works [60, 76, 104, 108]. Despite these advances, achieving robust generalization remains a critical challenge, as most existing research has been confined to in-domain applications. While models like UniST [158] are designed as one-for-all solutions for diverse ST scenarios, their training datasets and evaluation testbeds are predominantly limited to transportation. Nevertheless, their underlying technique stacks show promise for broader cross-domain and cross-modality generalization. Other significant contributions in this realm include UniFlow [159] and UrbanDiT [160].", + "bbox": [ + 517, + 218, + 911, + 507 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3.2 Optimization. Building upon the perceptual foundations, the optimization ability focuses on refining and adapting models to achieve specific, task-oriented objectives. In other words, models are not only expected to capture rich ST patterns but also to drive actionable decision-making in dynamic, real-world scenarios. This involves integrating advanced optimization strategies that tailor model behavior to the unique demands of applications.", + "bbox": [ + 517, + 508, + 911, + 604 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A prominent approach involves agent-based frameworks. For example, in traffic signal control, traditional methods (e.g., RL) are now augmented by frameworks that use LLMs as decision-making agents [61]. These systems leverage real-time traffic data and expert prompts to enable human-like planning, resulting in more adaptive and interpretable control strategies. Similarly, CityGPT [35] decomposes ST analysis into specialized sub-tasks, handled by temporal, spatial, and fusion agents, to efficiently process IoT data and generate insightful visualizations. AgentMove [28] addresses human mobility prediction by breaking down the task into modules for individual pattern mining, urban structure analysis, and collective behavior extraction. In geo-science, systems like Geode [38] integrate explicit optimization modules with ST data retrieval and machine learning inference to tackle zero-shot geospatial QA with enhanced precision. In urban planning, an innovative work [185] simulates planners and residents by LLM agents and enables their interactions to optimize inclusive land-use plans efficiently. Despite these promising developments, significant challenges remain. Seamlessly integrating perceptual capabilities with targeted optimization strategies is crucial for next-generation ST models that are both versatile and effective across diverse operational contexts.", + "bbox": [ + 517, + 604, + 911, + 893 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 75, + 313, + 87 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Yuxuan Liang et al.", + "bbox": [ + 818, + 75, + 911, + 85 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/7bf729db0134e8592812df53324e7a0bdf9e7208c0a94f185406b5acb97be084.jpg", + "table_caption": [ + "Table 2: Summary of representative FMs tailored for ST data science." + ], + "table_footnote": [], + "table_body": "
StageTask & CapabilityExampleMethodCategoryVenueYear
SensingReal-World Data SensingIdentifying Citizen-Related Issues from Social Mediados Santos et al. [27]LLMCAiSE2024
Real-World Data SensingIntelligent Crowdsensing CoordinationAutoWebCrowds [190]LLMICWE2024
Synthetic Data GenerationTrajectories GenerationTrajectory-LLM [154]LLMICLR2025
Synthetic Data GenerationHuman Activity Data GenerationLLMob [126]LLMNeurIPS2024
ManagementData CleaningFew-Shot Learner for Filling Missing ValuesNuwaTS [17]PFMPreprint2024
Data CleaningTrajectory RecoveryPLMTrajRec [135]LLMPreprint2024
Data CleaningAugment Additional Views of DataUrbanCLIP [150]LLMWWW2024
Query & RetrievalAutonomous Query Processor for Urban ManagementUrbanLLM [47]LLMEMNLP2024
Data IntegrationUrban Knowledge Graph ConstructionUrbanKGent [105]LLMNeurIPS2024
MiningPerceptionUnderstand the EnvironmentMagma [152]PFMCVPR2025
PerceptionInterpret and Extract ST PatternsSTEP [117]PFMKDD2022
OptimizationDrive Actionable Decision-Making in Dynamic ScenariosAgentMove [28]LLMPreprint2024
OptimizationOptimize Land-Use Plans by LLM AgentsZhou et al. [185]LLMPreprint2024
ReasoningCommon-sense ReasoningCausal-VidQA [66]PFMCVPR2022
ReasoningNumerical ReasoningUrbanGPT [73]LLMKDD2024
ReasoningCausal ReasoningNuwaDynamics [128]PFMICLR2024
ApplicationForecastingGlobal Weather ForecastingPangu [9]PFMNature2023
ImputationGenerative Adversarial Network for Traffic Data ImputationSTGAN [162]PFMIEEE TBD2022
Anomaly DetectionTransformer-based Anomaly DetectorXu et al. [145]PFMICLR2022
Event AnalysisDetecting and Interpreting EventsLAMP [120]LLMNeurIPS2023
Physical GroundingGeo-localizationGeoGPT [174]LLMJAG2023
Decision MakingTransportation Analytics and ControlTrafficGPT [168]LLMTransport Policy2024
Scenario SimulationSimulation of Human BehaviorPark et al. [107]LLMUIST2023
", + "bbox": [ + 84, + 119, + 911, + 371 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.3 Reasoning. While current ST models have demonstrated notable success in recognition and agent-based tasks, their reasoning and cognitive capabilities remain underdeveloped compared to advanced systems like DeepSeek-R1 [36]. To progress toward ST general intelligence, we identify three key aspects of reasoning:", + "bbox": [ + 81, + 378, + 480, + 449 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Common-sense Reasoning harnesses everyday knowledge and contextual cues to draw implicit inferences from complex data. For instance, Causal-VidQA [66] enables models to infer explanations, predict future states, and generate counterfactual scenarios in video question-answering, while SituatedGen [173] integrates geographical and temporal contexts to generate coherent and contextually plausible statements.", + "- Numerical Reasoning involves interpreting and manipulating quantitative information to perform arithmetic operations, assess uncertainties, and discern relationships within ST data; for instance, STBench [69] evaluates these abilities in LLMs, while UrbanGPT [73] enhances ST forecasting with instruction tuning.", + "- Causal Reasoning seeks to uncover cause-effect relations within ST data, crucial for robust and interpretable predictions. For example, NuwaDynamics [128] identifies causal regions and applies interventions to improve generalization, and GCIM [176] learns latent causal structures to disentangle spurious correlations." + ], + "bbox": [ + 83, + 450, + 482, + 686 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Collectively, these dimensions offer a promising yet underexplored pathway toward achieving ST general intelligence, bridging the gap between pattern recognition and true cognitive understanding.", + "bbox": [ + 81, + 688, + 482, + 731 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4 Downstream Applications", + "text_level": 1, + "bbox": [ + 83, + 739, + 339, + 753 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4.1 STFMs for Numerical Problems. ST data is predominately numeric in many real-world scenarios. Addressing these numeric challenges is critical for tasks like forecasting, imputation, and anomaly detection [52], which demand an accurate understanding of the physical world. STFMs excel in these areas by uncovering intricate patterns and dependencies, ultimately enabling more reliable data-driven decision-making.", + "bbox": [ + 81, + 757, + 482, + 853 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "- Forecasting. Early forecasting approaches often relied on task-specific neural networks like STGNNs [51, 52, 110, 116], whereas recent developments have shifted toward universal forecasting [91,", + "bbox": [ + 81, + 854, + 482, + 896 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "137, 167]. For instance, GPT-ST [74] leverages pretraining on historical observations to boost predictive performance, while UniST [158] unifies multiple traffic prediction tasks within a single model by coupling sequence modeling with attention-based mechanisms. Building on this progress, ST-LLM [86] and STG-LLM [90] enhance traffic predictions by combining ST inputs with partially frozen large language models, and UrbanGPT [73] extends this paradigm further by employing ST instruction tuning to better align textual and ST data. Similar approaches have also been widely used in other domains, such as ClimaX [104], Geo-Bench [60], and Orca [76].", + "bbox": [ + 511, + 378, + 913, + 517 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "- Imputation. This has likewise benefited from techniques that capture ST dependencies to accurately restore missing or corrupted data. For instance, NuwaTS [17] repurposes pretrained language models with contrastive learning and specialized patch embeddings (capturing missing patterns/statistics) to enable cross-domain time series imputation through a unified framework. STD-LLM [44] employs LLMs with spatial-temporal tokenizers and hypergraph learning modules to handle missing values in spatio-temporal data while capturing non-pairwise correlations through topology-aware node embeddings. DrIM [83] combines LLM-derived text representations (from masked tabular data conversions) with contrastive learning to measure similarities for nearest-neighbor imputation in heterogeneous datasets.", + "bbox": [ + 511, + 517, + 913, + 696 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "- Anomaly Detection. Anomaly detection in ST data has advanced by leveraging models that learn the normal dynamics of ST systems to identify deviations indicative of abnormal events. Whereas prior methods relied on statistical thresholding and clustering to flag outliers, recent FMs learn robust ST representations to detect even subtle anomalies. For example, early attempts [26, 89, 186] investigate the feasibility of using LLMs for anomaly detection in time series data. SigLLM [3] employs GPT-series with signal-to-text conversion techniques, offering dual pipelines (anomaly prompting and deviation detection) for time series analysis through textual or visual representations of numerical data. AD-LLM [156] introduces a benchmark framework combining GPT-4's zero-shot reasoning with contrastive learning for anomaly context enrichment and automated model selection through chain-of-thought prompting.", + "bbox": [ + 511, + 698, + 913, + 892 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey", + "bbox": [ + 83, + 75, + 446, + 87 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 684, + 75, + 913, + 87 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "- Others. Furthermore, FMs have demonstrated great potential in other numerical problems such as time series classification [18], geospatial prediction [39, 100], traffic speed inference [7], and socioeconomic indicator prediction [40, 142, 150].", + "bbox": [ + 81, + 106, + 483, + 162 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4.2 STFMs for Inferential Problems. Inferential problems in ST data require the integration of both reasoning and understanding of environments. These problems involve high-level cognitive tasks where accurate representation of locations, movements, and environmental context is essential. Addressing such problems goes beyond numerical predictions — it necessitates answering critical inferential questions: What happened? Where is it? What to do? What if? FMs have shown their potential to enhance solutions for these challenges by leveraging their capacity to handle ST knowledge and interpret complex, unstructured data.", + "bbox": [ + 81, + 174, + 482, + 313 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "\"What happened?\" - Event Analysis. Detecting events aims to recognize and explain significant events in time and space. Traditional models struggle with scalability, interpretability, and incorporating external knowledge. To this end, LAMP [120] integrates LLMs with event models, using abductive reasoning to suggest plausible causes for predicted events, retrieve supporting evidence, and rank predictions for improved accuracy. Meanwhile, LEAP [165] replaces GNNs and RNNs with LLMs by framing event detection as a question-answering task, predicting missing event components and forecasting future relations through self-attention mechanisms.", + "bbox": [ + 81, + 320, + 482, + 460 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "\"Where is it?\"- Physical Grounding. Grounding ST models in real-world geographical contexts is essential for various applications such as geo-localization, map reconstruction, intelligent routing and navigation. Geo-localization aims to determine an object's location based on multimodal inputs, including images, text, and sensor data. By processing these cues in conjunction with map data, LLMs such as GPT-4o, DeepSeek [36], and GeoGPT [174] can infer geographic coordinates or identify specific locations described in natural language. Map reconstruction, on the other hand, involves creating or updating digital maps by synthesizing information from satellite imagery, sensor readings, and textual reports. LLMs contribute by interpreting and generating map content, correcting inaccuracies, and filling in missing details. For instance, MapGPT [14] employs language-guided updates, incorporating textual descriptions of environmental changes into existing map structures. In personalized routing, ItiNera [123] combines LLMs with spatial optimization to generate personalized \"Citywalk\" itineraries, providing user-specific and spatially coherent urban exploration; ChinaTravel [115] provides a benchmark for real-world Chinese travel planning, enabling scalable evaluation of constraint satisfaction and preference optimization while highlighting the strengths of neuro-symbolic agents. Navigation systems further benefit from LLMs' ability to understand contextual instructions, interpret user queries, and reason about dynamic environments. For example, NavGPT [182] and NavGPT-v2 [181] integrate natural language with real-time traffic and indoor video data to generate personalized and optimized routing solutions. By incorporating STFMs across these domains, physical grounding models facilitate more precise localization, efficient navigation, and adaptive urban mobility solutions, ultimately bridging the gap between digital intelligence and real-world spatial reasoning.", + "bbox": [ + 81, + 465, + 482, + 896 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/0dd90631ab1ad6054f2db1a2e36db5801f8436c980eebbece0578f2d29976011.jpg", + "image_caption": [ + "Figure 4: STFMs for addressing inferential problems." + ], + "image_footnote": [], + "bbox": [ + 524, + 106, + 906, + 256 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "\"What to do?\" - Decision Making. Optimizing policies and real-time decision-making in dynamic environments based on inferential insights plays a crucial role in a wide range of applications, including traffic control, autonomous vehicles, and disaster response. In traffic control and management, LLMs improve adaptability and interpretability compared to traditional reinforcement learning approaches [61]. Additionally, they facilitate sim-to-real transfer by modeling real-world traffic dynamics, improving the reliability of traffic signal optimization [22]. Beyond signal control, models like TrafficGPT [168] integrate multimodal traffic data with structured reasoning to analyze, predict, and optimize traffic efficiency and safety in real time. In autonomous vehicles, STFMs contribute to decision-making through both direct and indirect mechanisms. Directly, models such as DDM-Lag [88] employ diffusion-based frameworks with Lagrangian safety enhancements and hybrid policy updates to refine policy articulation and ensure safety. Indirectly, STFMs enhance autonomous driving by predicting realistic driving behaviors [55, 114] and leveraging multi-modal perception to integrate sensor data, bird's eye view maps, and traffic contexts [20, 184], improving situational awareness and vehicle control. Beyond transportation, STFMs play a critical role in disaster management and emergency response by integrating diverse spatio-temporal data sources, such as weather forecasts, remote sensing, and social media signals, to predict disaster impacts and optimize evacuation strategies [16, 31, 65].", + "bbox": [ + 511, + 284, + 915, + 631 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "\"What if?\"- Scenario Simulation. STFMs, with their advanced perception and reasoning capabilities, enable the development of STFM-based agents that integrate into Multi-Agent Systems (MAS) to model complex interactions across diverse domains [29]. In urban planning and social simulation, MAS facilitates participatory urban design by simulating interactions between planners and residents. For example, LLM-driven MAS has been used to collaboratively refine land-use plans, leading to improved accessibility and ecological outcomes that surpass human expert solutions [185]. Beyond urban planning, MAS contributes to social science research by modeling human-like behaviors in AI-driven networks. Studies such as [23, 107, 109] demonstrate that LLM-based agents can naturally develop social structures, providing valuable insights into emergent social dynamics. Beyond urban applications, MAS significantly advances game AI and strategic decision-making. Recent studies [112, 133, 187] highlight how MAS-powered reinforcement learning enables strategic gameplay, real-time opponent modeling, and interactive storytelling, fostering the development of more adaptive, intelligent, and realistic virtual agents.", + "bbox": [ + 511, + 632, + 915, + 896 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 75, + 313, + 85 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Yuxuan Liang et al.", + "bbox": [ + 818, + 75, + 911, + 87 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 The Methodology Perspective", + "text_level": 1, + "bbox": [ + 81, + 104, + 354, + 121 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As shown in Figure 5, we delve into STFMs from a methodology perspective, focusing on $i)$ LLM-based models, which are widely applied across the entire workflow of $ST$ data science by zero-shot utilization or fine-tuning and $ii)$ PFM-based models, i.e., pretraining FMs from scratch, which is mainly utilized for $ST$ data mining. The comparison between them can be found in Appendix C.", + "bbox": [ + 81, + 125, + 480, + 207 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/60d69dbcc8c85b62a9fac9cb5b695647836692fdc243ddfde0484cf9e89d35d3.jpg", + "image_caption": [ + "Figure 5: A method-centric taxonomy. Full version: Fig. 7." + ], + "image_footnote": [], + "bbox": [ + 84, + 215, + 475, + 484 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1 Large Language Models (LLM)", + "text_level": 1, + "bbox": [ + 81, + 515, + 372, + 531 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1.1 Zero-shot Learner. LLMs exhibit strong reasoning and contextual understanding capabilities, making them highly effective across various ST tasks, including data sensing, management, and mining. As shown in Appendix B, they can function as augmenters, predictors, or agents. To ease the presentation, we adopt a broad definition of LLMs, encompassing standard LLMs, Vision-Language Models (VLM), and Multimodal LLMs (MLLM). The zero-shot utilization of LLMs can be categorized into two primary classes.", + "bbox": [ + 81, + 534, + 482, + 645 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "- Prompt Engineering. When taking LLMs as zero-shot predictors [33, 53, 125] or data augmenters [150] for various tasks, prompt engineering plays an essential role in shaping model outputs. Below, we summarize key aspects for prompt engineering in current research: a) Prompt Construction: A well-designed prompt typically contains key elements like Task Instruction, Tokenization, and Few-shot Examples. Task instruction [53, 147, 149] aims to explicitly guide LLMs to execute specific operations, incorporating domain knowledge [157] if applicable. Tokenization [33, 53] is crucial to aligning ST data formats with LLM input structures. Additionally, presenting a small number of annotated examples [175] facilitates in-context learning, enabling LLMs to better generalize to complex tasks while ensuring output consistency and adherence to the expected format. b) Prompt Learning: [73, 148] Also known as instruction-tuning, this method learns prompts dynamically rather than relying on manually crafted ones. By optimizing prompt structures during training, it provides a flexible and efficient way to adapt LLMs to new tasks without altering their underlying model weights.", + "bbox": [ + 81, + 646, + 482, + 896 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "c) Chain-of-Thought (CoT) Prompting: CoT [87, 175] enhances LLMs' reasoning capabilities by guiding them through step-by-step logical progression. This method improves their ability to tackle complex spatio-temporal tasks, ensuring more interpretable, structured, and accurate outputs in decision-making processes.", + "bbox": [ + 511, + 106, + 913, + 176 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "- Agentic Engineering. The emergence of LLM-based agents [49, 144, 168, 174, 185] with reasoning, memory and tool-calling capabilities is transforming ST data science, enabling more adaptive and autonomous decision-making. When designing agent-based solutions, existing works primarily consider the following key aspects: a) Role Assignment. [50, 144, 174] clearly specify the responsibilities and functional boundaries of each agent within the system. b) Memorization [64, 174] refers to the agent's capability to store, recall, and leverage past information and context during task execution. A basic approach involves embedding past interactions into prompts, while more advanced techniques like Retrieval-Augmented Generation (RAG) [143, 155] dynamically retrieve relevant information from external knowledge bases, incorporating only the most pertinent content into the prompt. c) Tool Definition [168, 174], which identify and integrate various tools and functionalities that an agent can call upon to solve complex tasks. In ST data science, various expert models like STGNNs [51] can be wrapped as a tool and added into the agent in a plug-and-play manner. d) Multi-Agent System. Deploying multiple specialized agents to work collaboratively (each with distinct roles) enhances the efficiency and robustness of solutions for intricate ST challenges [49, 63, 185].", + "bbox": [ + 511, + 178, + 913, + 469 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1.2 Supervised Fine-Tuning for LLMs. Fine-tuning adapts LLMs to ST tasks by adjusting their parameters based on domain-specific datasets, sometimes incorporating additional modalities such as texts [79, 150] and vision [180]. We categorize fine-tuning methods into three approaches based on the extent of parameter updates:", + "bbox": [ + 511, + 477, + 913, + 546 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Full Parameter Fine-Tuning [68, 98, 100, 104, 108] updates all model parameters based on downstream ST datasets, achieving maximal adaptation to specific tasks. However, it requires substantial labeled data and high computational resources, making it impractical for many real-world applications.", + "- Partial Parameter Fine-tuning. To reduce computational overhead, this method [13, 183] freezes most parameters, such as attention weights, while fine-tuning only a small subset (e.g., position encodings and layer normalization). However, modifying a subset of parameters can disrupt the LLM's learned representations, leading to catastrophic forgetting of general knowledge.", + "- Add-on Parameter Fine-Tuning. To mitigate forgetting while maintaining efficiency, this technique [61] introduces trainable low-rank matrices (e.g., LoRA [42]), while keeping the original LLM weights frozen. This strategy preserves pretrained knowledge while enabling efficient adaptation to ST tasks. Besides fine-tuning LLMs' weights, another way is training additional layers for input tokenization or task adaption. For instance, TimeLLM [53] trains a self-attention layer that aligns patched time series representations with pretrained text prototype embeddings. Similarly, Time-VLM [180] trains a memory-enhanced attention to capture both short- and long-term dependencies. For task adaption, existing methods typically train an additional prediction head (e.g., linear layers) to project the LLM's output embeddings into a domain-specific space [53, 180]." + ], + "bbox": [ + 514, + 549, + 913, + 895 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey", + "bbox": [ + 83, + 75, + 446, + 87 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 684, + 75, + 913, + 87 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 Pretrained Foundation Models (PFM)", + "text_level": 1, + "bbox": [ + 81, + 104, + 426, + 119 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Unlike LLMs, which build STFMs by directly using or fine-tuning LLMs, PFMs are developed from scratch, independent of existing LLMs. This approach enables domain-specific optimization, allowing models to better capture ST dependencies from cross-domain ST data without constraints imposed by linguistic priors. Following this, we examine PFMs through three key dimensions:", + "bbox": [ + 81, + 123, + 482, + 205 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2.1 Neural Architecture. The architecture of PFMs is a fundamental design choice that directly influences their capabilities, efficiency, and adaptability in ST tasks, which can be categorized into:", + "bbox": [ + 81, + 210, + 482, + 253 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Transformer-based PFMs. Transformers have been the predominant architecture choice for building PFMs thanks to its powerful sequential modeling ability introduced by the self-attention mechanism [7, 72, 77, 85, 158].", + "- Diffusion-based PFMs. Diffusion-based models have recently emerged as a powerful approach for ST representation learning [12, 21, 136, 160, 161, 188], particularly in generative and predictive modeling. These models iteratively learn to reverse a stochastic noise process, enabling them to generate high-fidelity spatio-temporal sequences with strong generalization properties.", + "- Graph-based PFMs. Unlike sequential models, GNNs excel at representing spatially structured data such as road networks. [62, 130] build FMs based on graph neural networks to learn the complex correlation between different entities in ST applications.", + "- Others. Another emerging class of PFMs is State Space Model (SSM)-based models [8, 43, 97], which construct PFMs using structured state-space representations. Meanwhile, several studies utilize CNNs [118] as backbones for developing PFMs." + ], + "bbox": [ + 83, + 257, + 482, + 506 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2.2 Pretraining Scheme. To enhance generalization ability, PFMs are usually pretrained based on cross-domain datasets [91, 137, 158], enabling them to learn diverse ST patterns across multiple domains. Existing pretraining schemes of PFMs can be classified into three types based on the training objectives: a) Generative Pretraining [85, 98, 130, 138, 189] focuses on reconstructing input data by learning its underlying distribution, enabling the model to generate realistic time series or ST data, while b) Contrastive Pretraining [7, 84, 171] emphasize distinguishing between similar and dissimilar data pairs to learn robust representations by maximizing agreement between augmented views of the same sample. It is particularly effective in multimodal ST learning, aligning heterogeneous data sources such as satellite imagery and its text description. c) Hybrid Pretraining [77] integrates both generative and contrastive objectives, leveraging their complementary strengths.", + "bbox": [ + 81, + 513, + 482, + 722 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2.3 Data Modality. ST data manifests in various modalities, each characterized by unique properties (see Section 2), necessitating the development of modality-specific STFMs:", + "bbox": [ + 81, + 726, + 482, + 768 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "- Location. PFMs for location data [7, 40, 71, 124, 138, 150, 171] aim to learn general embedding for geographical entities. For instance, GeoVectors [124] and SpaBERT [71] learn location embeddings based on open-source data such OpenStreetMap, while G2PTL [138] learns from massive logistics delivery data. Notably, there is a noticeable trend that leverages multi-modalities (such as satellite image and text) for comprehensive location embeddings. For example, both UrbanCLIP [150], UrbanVLP [40], and ReFound [142] utilize satellite images for urban region profiling.", + "bbox": [ + 83, + 770, + 482, + 896 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/04153b45ab02f3e79c0ab49c30b7605ee00c77f9060519cff8dcabcf84cc5fc2.jpg", + "image_caption": [ + "Figure 6: Representative PFMs for different types of ST data." + ], + "image_footnote": [], + "bbox": [ + 517, + 102, + 903, + 273 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Trajectory & Event. PFMs for trajectory/event data [21, 84, 85, 103, 121, 189] are designed to learn general sequential patterns from inputs. A pioneering effort in this direction is TrajFM [85], which introduces a trajectory FM capable of supporting both regional and task transferability. Pretrained on vehicle trajectories from multiple cities, TrajFM employs a trajectory-masking and autoregressive recovery mechanism to enhance its learning capabilities. To tackle the limited resources of cross-domain trajectories, UniTraj [189] curates a billion-scale mobility dataset spanning diverse geographic regions to facilitate the advancement of trajectory-based FMs. For event data, MOTOR [121] proposes a time-to-event FM for structured medical records.", + "- ST Raster. PFMs for ST raster data [10, 15, 98, 104, 108, 117, 160] organize spatial information in a grid-like format, with a typical applied domain being weather/climate forecasting. For instance, W-MAE [98] trains a mask autoencoder for ST grid forecasting. CimaX [104] develops a general-purpose climate foundation model, pretrained on diverse datasets spanning various variables, ST scales, and physical contexts. Pangu [10] is trained on 39 years of global climate data, which achieves superior forecasting performance compared to leading numerical weather prediction systems. UniST [158] first pretrains the model in various ST raster data via masked pretraining, and then proposes a learnable ST prompt to enhance the model's generalization ability.", + "- ST Graph. PFMs for ST graph data [62, 72, 93, 111, 117, 134] learn the ST dependencies from ST graphs that generalize effectively in unseen spatial and temporal contexts. Unlike ST Raster PFMs, there are limited works in this area, which is more challenging due to the complex graph correlation. One representative is OpenCity [72] for ST graph forecasting, which integrates Transformer and GNN to model the ST dependencies in traffic data." + ], + "bbox": [ + 514, + 294, + 915, + 723 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 Conclusion and Future Directions", + "text_level": 1, + "bbox": [ + 514, + 727, + 818, + 739 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The rapid advancement of FMs has transformed ST data science, impacting sensing, management, and mining. This survey provides a comprehensive review of FMs for ST data science, identifying key capabilities such as perception, reasoning, and optimization while exploring diverse downstream tasks and datasets. We also establish a systematic taxonomy of methodologies, enhancing understanding of how STFMs model ST data. Despite progress, challenges remain in generalization, interpretability, and efficiency. By consolidating recent advances and outlining future directions (see Appendix A), this survey aims to inspire further innovations, driving the development of scalable and adaptive STFMs for real practice.", + "bbox": [ + 511, + 743, + 915, + 896 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 83, + 75, + 313, + 87 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Yuxuan Liang et al.", + "bbox": [ + 818, + 75, + 911, + 87 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 84, + 104, + 176, + 119 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Oluwanifemi Adebayo Moses Adekanye. 2024. LIm-powered synthetic environments for self-driving scenarios. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 23721-23723.", + "[2] Nurwahyu Alamsyah, Muhamad Amirul Haq, and Chayadi Oktomy Noto Susanto. 2024. Automated Smart City Planning through Personalized Large Language Model with Retrieval Augmented Generation. In 2024 International Conference on Information Technology and Computing (ICITCOM). IEEE, 306-311.", + "[3] Sarah Alnegheimish, Linh Nguyen, Laure Berti-Equille, and Kalyan Veeramacheneni. 2024. Large language models can be zero-shot anomaly detectors for time series? arXiv preprint arXiv:2405.14755 (2024).", + "[4] Abdul Fatir Ansari, Lorenzo Stella, Caner Turkmen, Xiyuan Zhang, Pedro Mercado, Huibin Shen, Oleksandr Shchur, Syama Sundar Rangapuram, Sebastian Pineda Arango, Shubham Kapoor, et al. 2024. Chronos: Learning the language of time series. arXiv preprint arXiv:2403.07815 (2024).", + "[5] Growtham Atluri, Anuj Karpatne, and Vipin Kumar. 2018. Spatio-temporal data mining: A survey of problems and methods. ACM Computing Surveys (CSUR) 51, 4 (2018), 1-41.", + "[6] Lei Bai, Lina Yao, Can Li, Xianzhi Wang, and Can Wang. 2020. Adaptive graph convolutional recurrent network for traffic forecasting. In NeurIPS, Vol. 33. 17804-17815.", + "[7] Pasquale Balsebre, Weiming Huang, Gao Cong, and Yi Li. 2024. City foundation models for learning general purpose representations from openstreetmap. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 87-97.", + "[8] Sathya Kamesh Bhethanabhotla, Omar Swelam, Julien Siems, David Salinas, and Frank Hutter. 2024. Mamba4Cast: Efficient Zero-Shot Time Series Forecasting with State Space Models. arXiv preprint arXiv:2410.09385 (2024).", + "[9] Kaifeng Bi, Lingxi Xie, Hengheng Zhang, Xin Chen, Xiaotao Gu, and Qi Tian. 2023. Accurate medium-range global weather forecasting with 3D neural networks. Nature 619, 7970 (2023), 533-538.", + "[10] Kaifeng Bi, Lingxi Xie, Hengheng Zhang, Xin Chen, Xiaotao Gu, and Qi Tian. 2023. Accurate medium-range global weather forecasting with 3D neural networks. Nature 619, 7970 (2023), 533-538.", + "[11] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in neural information processing systems 33 (2020), 1877-1901.", + "[12] Defu Cao, Wen Ye, and Yan Liu. [n.d.]. TimeDiT: General-purpose Diffusion Transformers for Time Series Foundation Model. In ICML 2024 Workshop on Foundation Models in the Wild.", + "[13] Ching Chang, Wen-Chih Peng, and Tien-Fu Chen. 2023. LLM4TS: Two-Stage Fine-Tuning for Time-Series Forecasting with Pre-Trained LLMs. arXiv preprint arXiv:2308.08469 (2023).", + "[14] Jiaqi Chen, Bingqian Lin, Ran Xu, Zhenhua Chai, Xiaodan Liang, and KwanYee K Wong. 2024. Mapppt: Map-guided prompting with adaptive path planning for vision-and-language navigation. arXiv preprint arXiv:2401.07314 (2024).", + "[15] Kang Chen, Tao Han, Junchao Gong, Lei Bai, Fenghua Ling, Jing-Jia Luo, Xi Chen, Leiming Ma, Tianning Zhang, Rui Su, et al. 2023. FengWu: Pushing the Skillful Global Medium-range Weather Forecast beyond 10 Days Lead. arXiv preprint arXiv:2304.02948 (2023).", + "[16] Minze Chen, Zhenxiang Tao, Weitong Tang, Tingxin Qin, Rui Yang, and Chunli Zhu. 2024. Enhancing emergency decision-making with knowledge graphs and large language models. International Journal of Disaster Risk Reduction 113 (2024), 104804.", + "[17] Jinguo Cheng, Chunwei Yang, Wanlin Cai, Yuxuan Liang, Qingsong Wen, and Yuankai Wu. 2024. NuwaTS: a Foundation Model Mending Every Incomplete Time Series. arXiv preprint arXiv:2405.15317 (2024).", + "[18] Mingyue Cheng, Yiheng Chen, Qi Liu, Zhiding Liu, and Yucong Luo. 2024. Advancing time series classification with multimodal language modeling. arXiv preprint arXiv:2403.12371 (2024).", + "[19] Garima Chhikara, Anurag Sharma, V Gurucharan, Kripabandhu Ghosh, and Abhijnan Chakraborty. 2024. LaMSUM: Amplifying Voices Against Harassment through LLM Guided Extractive Summarization of User Incident Reports. arXiv preprint arXiv:2406.15809 (2024).", + "[20] Tushar Choudhary, Vikrant Dewangan, Shivam Chandhok, Shubham Priyadarshan, Anushka Jain, Arun K Singh, Siddharth Srivastava, Krishna Murthy Jatavalabhula, and K Madhava Krishna. 2024. Talk2BEV: Language-enhanced Bird's-eye view maps for autonomous driving. In 2024 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 16345-16352.", + "[21] Chen Chu, Hengcai Zhang, and Feng Lu. 2023. TrajGDM: A New Trajectory Foundation Model for Simulating Human Mobility. In Proceedings of the 31st ACM International Conference on Advances in Geographic Information Systems. 1-2.", + "[22] Longchao Da, Minchiuan Gao, Hao Mei, and Hua Wei. 2023. Lm powered sim-to-real transfer for traffic signal control. arXiv preprint arXiv:2308.14284 (2023)." + ], + "bbox": [ + 89, + 123, + 482, + 887 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[23] Gordon Dai, Weijia Zhang, Jinhan Li, Siqi Yang, Srihas Rao, Arthur Caetano, Misha Sra, et al. 2024. Artificial leviathan: Exploring social evolution of lIm agents through the lens of hobbesian social contract theory. arXiv preprint arXiv:2406.14373 (2024).", + "[24] Zifeng Ding, Heling Cai, Jingpei Wu, Yunpu Ma, Ruotong Liao, Bo Xiong, and Volker Tresp. 2024. zrLLM: Zero-Shot Relational Learning on Temporal Knowledge Graphs with Large Language Models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers). 1877-1895.", + "[25] Quang Minh Dinh, Minh Khoi Ho, Anh Quan Dang, and Hung Phong Tran. 2024. Trafficvlm: A controllable visual language model for traffic video captioning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshop. 7134-7143.", + "[26] Manqing Dong, Hao Huang, and Longbing Cao. 2024. Can LLMs Serve As Time Series Anomaly Detectors? arXiv preprint arXiv:2408.03475 (2024).", + "[27] Vitor Gaboardi dos Santos, Guto Leoni Santos, Theo Lynn, and Boualem Benatallah. 2024. Identifying Citizen-Related Issues from Social Media Using LLM-Based Data Augmentation. In International Conference on Advanced Information Systems Engineering. Springer, 531-546.", + "[28] Jie Feng, Yuwei Du, Jie Zhao, and Yong Li. 2024. Agentmove: Predicting human mobility anywhere using large language model based agentic framework. arXiv preprint arXiv:2408.13986 (2024).", + "[29] Chen Gao, Xiaochong Lan, Nian Li, Yuan Yuan, Jingtao Ding, Zhilun Zhou, Fengli Xu, and Yong Li. 2024. Large language models empowered agent-based modeling and simulation: A survey and perspectives. *Humanities and Social Sciences Communications* 11, 1 (2024), 1-24.", + "[30] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, and Haofen Wang. 2023. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997 (2023).", + "[31] Vinicius G Goecks and Nicholas R Waytowich. 2023. Disasterresponsept: Large language models for accelerated plan of action development in disaster response scenarios. arXiv preprint arXiv:2306.17271 (2023).", + "[32] Adam Goodge, Wee Siong Ng, Bryan Hooi, and See Kiong Ng. 2025. Spatio-Temporal Foundation Models: Vision, Challenges, and Opportunities. arXiv preprint arXiv:2501.09045 (2025).", + "[33] Nate Gruver, Marc Finzi, Shikai Qiu, and Andrew Gordon Wilson. 2023. Large language models are zero-shot time series forecasters. Advances in neural information processing systems (2023).", + "[34] Nate Gruver, Marc Finzi, Shikai Qiu, and Andrew G Wilson. 2024. Large language models are zero-shot time series forecasters. Advances in Neural Information Processing Systems 36 (2024).", + "[35] Qinghua Guan, Jinhui Ouyang, Di Wu, and Weiren Yu. 2024. CityGPT: Towards Urban IoT Learning, Analysis and Interaction with Multi-Agent System. arXiv preprint arXiv:2405.14691 (2024).", + "[36] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025).", + "[37] Shengnan Guo, Youfang Lin, Ning Feng, Chao Song, and Huaiyu Wan. 2019. Attention based spatial-temporal graph convolutional networks for traffic flow forecasting. In AAAI, Vol. 33: 922-929.", + "[38] Devashish Vikas Gupta, Azeez Syed Ali Ishaqui, and Divya Kiran Kadiyala. 2024. Geode: A Zero-shot Geospatial Question-Answering Agent with Explicit Reasoning and Precise Spatio-Temporal Retrieval. arXiv preprint arXiv:2407.11014 (2024).", + "[39] Wes Gurnee and Max Tegmark. 2023. Language models represent space and time. arXiv preprint arXiv:2310.02207 (2023).", + "[40] Xixuan Hao, Wei Chen, Yibo Yan, Siru Zhong, Kun Wang, Qingsong Wen, and Yuxuan Liang. 2024. UrbanVLP: A Multi-Granularity Vision-Language Pre-Trained Foundation Model for Urban Indicator Prediction. arXiv preprint arXiv:2403.16831 (2024).", + "[41] Ce Hou, Fan Zhang, Yong Li, Haifeng Li, Gengchen Mai, Yuhao Kang, Ling Yao, Wenhao Yu, Yao Yao, Song Gao, et al. 2025. Urban sensing in the era of large language models. The Innovation 6, 1 (2025).", + "[42] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685 (2021).", + "[43] Jiaxi Hu, Disen Lan, Ziyu Zhou, Qingsong Wen, and Yuxuan Liang. 2024. TimeSSM: Simplifying and Unifying State Space Models for Time Series Forecasting. arXiv preprint arXiv:2405.16312 (2024).", + "[44] Yiheng Huang, Xiaowei Mao, Shengnan Guo, Yubin Chen, Junfeng Shen, Tiankuo Li, Youfang Lin, and Huaiyu Wan. 2024. STD-PLM: Understanding Both Spatial and Temporal Properties of Spatial-Temporal Data with PLM. arXiv preprint arXiv:2407.09096 (2024).", + "[45] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. 2024. Gpt-40 system card. arXiv preprint arXiv:2410.21276 (2024).", + "[46] Junzhong Ji, Fan Yu, and Minglong Lei. 2022. Self-Supervised Spatiotemporal Graph Neural Networks With Self-Distillation for Traffic Prediction. IEEE TITS" + ], + "bbox": [ + 522, + 109, + 913, + 904 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey", + "bbox": [ + 84, + 75, + 446, + 87 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 684, + 75, + 913, + 87 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "(2022).", + "[47] Yue Jiang, Qin Chao, Yile Chen, Xiucheng Li, Shuai Liu, and Gao Cong. 2024. UrbanLLM: Autonomous Urban Activity Planning and Management with Large Language Models. arXiv preprint arXiv:2406.12360 (2024).", + "[48] Yushan Jiang, Zijie Pan, Xikun Zhang, Sahil Garg, Anderson Schneider, Yuriy Nevmyvaka, and Dongjin Song. 2024. Empowering Time Series Analysis with Large Language Models: A Survey. In Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence, IfCAI-24, Kate Larson (Ed.). International Joint Conferences on Artificial Intelligence Organization, 8095-8103. https://doi.org/10.24963/ijcai.2024/895 Survey Track.", + "[49] Yushan Jiang, Wenzhao Yu, Geon Lee, Dongjin Song, Kijung Shin, Wei Cheng, Yanchi Liu, and Haifeng Chen. 2026. Explanable Multi-modal Time Series Prediction with LLM-in-the-Loop. arXiv preprint arXiv:2503.01013 (2026).", + "[50] WANG JIAWEI, Renhe Jiang, Chuang Yang, Zengqing Wu, Ryosuke Shibasaki, Noboru Koshizuka, Chuan Xiao, et al. 2024. Large language models as urban residents: An llm agent framework for personal mobility generation. Advances in Neural Information Processing Systems 37 (2024), 124547-124574.", + "[51] Guangyin Jin, Yuxuan Liang, Yuchen Fang, Zezhi Shao, Jincai Huang, Junbo Zhang, and Yu Zheng. 2023. Spatio-temporal graph neural networks for predictive learning in urban computing: A survey. IEEE Transactions on Knowledge and Data Engineering (2023).", + "[52] Ming Jin, Huan Yee Koh, Qingsong Wen, Daniele Zambon, Cesare Alippi, Geoffrey I Webb, Irwin King, and Shirui Pan. 2024. A survey on graph neural networks for time series: Forecasting, classification, imputation, and anomaly detection. IEEE Transactions on Pattern Analysis and Machine Intelligence (2024).", + "[53] Ming Jin, Shiyu Wang, Lintao Ma, Zhixuan Chu, James Y Zhang, Xiaoming Shi, Pin-Yu Chen, Yuxuan Liang, Yuan-Fang Li, Shirui Pan, et al. 2023. Time-LLM: Time series forecasting by reprogramming large language models. arXiv preprint arXiv:2310.01728 (2023).", + "[54] Ming Jin, Qingsong Wen, Yuxuan Liang, Chaoli Zhang, Siqiao Xue, Xue Wang, James Zhang, Yi Wang, Haifeng Chen, Xiaoli Li, et al. 2023. Large models for time series and spatio-temporal data: A survey and outlook. arXiv preprint arXiv:2310.10196 (2023).", + "[55] Ye Jin, Xiaoxi Shen, Huiling Peng, Xiaohan Liu, Jingli Qin, Jiayang Li, Jintao Xie, Peizhong Gao, Guyue Zhou, and Jiangtao Gong. 2023. Surrealdriver: Designing generative driver agent simulation framework in urban contexts based on large language model. arXiv preprint arXiv:2309.13193 (2023).", + "[56] Chenlu Ju, Jiaxin Liu, Shobhit Sinha, Hao Xue, and Flora Salim. 2025. TrajLLM: A Modular LLM-Enhanced Agent-Based Framework for Realistic Human Trajectory Simulation. (2025).", + "[57] Subbarao Kambhampati, Karthik Valmeekam, Lin Guan, Mudit Verma, Kaya Stechly, Siddhant Bhambri, Lucas Saldyt, and Anil Murthy. 2024. LLMs can't plan, but can help planning in LLM-modulo frameworks. arXiv preprint arXiv:2402.01817 (2024).", + "[58] Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of naacL-HLT, Vol. 1. Minneapolis, Minnesota.", + "[59] Dmitrii Kochkov, Janni Yuval, Ian Langmore, Peter Norgaard, Jamie Smith, Griffin Mooers, Milan Klower, James Lottes, Stephan Rasp, Peter Duben, et al. 2024. Neural general circulation models for weather and climate. Nature 632, 8027 (2024), 1060–1066.", + "[60] Alexandre Lacoste, Nils Lehmann, Pau Rodriguez, Evan Sherwin, Hannah Kerner, Björn Lütjens, Jeremy Irvin, David Dao, Hamed Alemohammad, Alexandre Drouin, et al. 2024. Geo-bench: Toward foundation models for earth monitoring. Advances in Neural Information Processing Systems 36 (2024).", + "[61] Siqi Lai, Zhao Xu, Weijia Zhang, Hao Liu, and Hui Xiong. 2025. Large language models as traffic signal control agents: Capacity and opportunity. In Proceedings of the 31st ACM SIGKDD conference on knowledge discovery and data mining.", + "[62] Remi Lam, Alvaro Sanchez-Gonzalez, Matthew Willson, Peter Wirsnsberger, Meire Fortunato, Ferran Alet, Suman Ravuri, Timo Ewalds, Zach Eaton-Rosen, Weihua Hu, et al. 2023. GraphCast: Learning skillful medium-range global weather forecasting. Science 382, 6677 (2023), 1416-1421.", + "[63] Geon Lee, Wenchao Yu, Kijung Shin, Wei Cheng, and Haifeng Chen. 2025. TimeCAP: Learning to Contextualize, Augment, and Predict Time Series Events with Large Language Model Agents. arXiv preprint arXiv:2502.11418 (2025).", + "[64] Mingcong Lei, Yiming Zhao, Ge Wang, Zhixin Mai, Shuguang Cui, Yatong Han, and Jinke Ren. 2025. STMA: A Spatio-Temporal Memory Agent for Long-Horizon Embodied Task Planning. arXiv preprint arXiv:2502.10177 (2025).", + "[65] Zhenyu Lei, Yushun Dong, Weiyu Li, Rong Ding, Qi Wang, and Jundong Li. 2025. Harnessing Large Language Models for Disaster Management: A Survey. arXiv preprint arXiv:2501.06932 (2025).", + "[66] Jiangtong Li, Li Niu, and Liqing Zhang. 2022. From representation to reasoning: Towards both evidence and commonsense reasoning for video question-answering. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 21273–21282.", + "[67] Jinpeng Li, Haiping Wang, Yuan Liu, Zhiyang Dou, Yuexin Ma, Sibei Yang, Yuan Li, Wenping Wang, Zhen Dong, Bisheng Yang, et al. [n.d.]. CityAnchor: City-scale 3D Visual Grounding with Multi-modality LLMs. In The Thirteenth" + ], + "bbox": [ + 91, + 109, + 480, + 895 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "International Conference on Learning Representations.", + "[68] Peibo Li, Maarten de Rijke, Hao Xue, Shuang Ao, Yang Song, and Flora D Salim. 2024. Large language models for next point-of-interest recommendation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1463-1472.", + "[69] Wenbin Li, Di Yao, Ruibo Zhao, Wenjie Chen, Zijie Xu, Chengxue Luo, Chang Gong, Quanliang Jing, Haining Tan, and Jingping Bi. 2024. STBench: Assessing the ability of large language models in spatio-temporal analysis. arXiv preprint arXiv:2406.19065 (2024).", + "[70] Yaguang Li, Rose Yu, Cyrus Shahabi, and Yan Liu. 2017. Diffusion convolutional recurrent neural network: Data-driven traffic forecasting. arXiv preprint arXiv:1707.01926 (2017).", + "[71] Zekun Li, Jina Kim, Yao-Yi Chiang, and Muhao Chen. 2022. SpaBERT: A pretrained language model from geographic data for geo-entity representation. arXiv preprint arXiv:2210.12213 (2022).", + "[72] Zhonghang Li, Long Xia, Lei Shi, Yong Xu, Dawei Yin, and Chao Huang. 2024. Opencity: Open spatio-temporal foundation models for traffic prediction. arXiv preprint arXiv:2408.10269 (2024).", + "[73] Zhonghang Li, Lianghao Xia, Jiabin Tang, Yong Xu, Lei Shi, Long Xia, Dawei Yin, and Chao Huang. 2024. Urbangpt: Spatio-temporal large language models. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 5351-5362.", + "[74] Zhonghang Li, Lianghao Xia, Yong Xu, and Chao Huang. 2024. GPT-ST: generative pre-training of spatio-temporal graph neural networks. Advances in Neural Information Processing Systems 36 (2024).", + "[75] Zongrong Li, Junhao Xu, Siqin Wang, Yifan Wu, and Haiyang Li. 2024. StreetviewLLM: Extracting Geographic Information Using a Chain-of-Thought Multimodal Large Language Model. arXiv preprint arXiv:2411.14476 (2024).", + "[76] Zhe Li, Ronghui Xu, Jilin Hu, Zhong Peng, Xi Lu, Chenjuan Guo, and Bin Yang. 2024. Ocean Significant Wave Height Estimation with Spatio-temporally Aware Large Language Models. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 3892-3896.", + "[77] Zekun Li, Wenxuan Zhou, Yao-Yi Chiang, and Muhao Chen. 2023. Geolm: Empowering language models for geospatially grounded language understanding. arXiv preprint arXiv:2310.14478 (2023).", + "[78] Yuxuan Liang, Songyu Ke, Junbo Zhang, Xiwen Yi, and Yu Zheng. 2018. Geom: Multi-level attention networks for geo-sensory time series prediction.. In ICAI, Vol. 2018. 3428-3434.", + "[79] Yuebing Liang, Yichao Liu, Xiaohan Wang, and Zhan Zhao. 2023. Exploring large language models for human mobility prediction under public events. arXiv preprint arXiv:2311.17351 (2023).", + "[80] Yuxuan Liang, Kun Ouyang, Yiwei Wang, Zheyi Pan, Yifang Yin, Hongyang Chen, Junbo Zhang, Yu Zheng, David S Rosenblum, and Roger Zimmermann. 2022. Mixed-Order Relation-Aware Recurrent Neural Networks for Spatio-Temporal Forecasting. IEEE TKDE (2022).", + "[81] Yuxuan Liang, Haomin Wen, Yuqi Nie, Yushan Jiang, Ming Jin, Dongjin Song, Shirui Pan, and Qingsong Wen. 2024. Foundation models for time series analysis: A tutorial and survey. In Proceedings of the 30th ACM SIGKDD conference on knowledge discovery and data mining. 6555-6565.", + "[82] Yuxuan Liang, Yutong Xia, Songyu Ke, Yiwei Wang, Qingsong Wen, Junbo Zhang, Yu Zheng, and Roger Zimmermann. 2023. Airformer: Predicting nationwide air quality in china with transformers. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 37. 14329-14337.", + "[83] Jaesung Lim, Seunghwan An, Gyeongdong Woo, ChangHyun Kim, and Jong-June Jeon. [n.d.]. Context-Driven Missing Data Imputation via Large Language Model. ([n.d.]).", + "[84] Yan Lin, Yichen Liu, Zeyu Zhou, Haomin Wen, Erwen Zheng, Shengnan Guo, Youfang Lin, and Huaiyu Wan. 2024. PTraJM: Efficient and Semantic-rich Trajectory Learning with Pretrained Trajectory-Mamba. arXiv preprint arXiv:2408.04916 (2024).", + "[85] Yan Lin, Tonglong Wei, Zeyu Zhou, Haomin Wen, Jilin Hu, Shengnan Guo, Youfang Lin, and Huaiyu Wan. 2024. TrajFM: A Vehicle Trajectory Foundation Model for Region and Task Transferability. arXiv preprint arXiv:2408.15251 (2024).", + "[86] Chenxi Liu, Sun Yang, Qianxiong Xu, Zhishuai Li, Cheng Long, Ziyue Li, and Rui Zhao. 2024. Spatial-temporal large language model for traffic prediction. arXiv preprint arXiv:2401.10134 (2024).", + "[87] Haoxin Liu, Zhiyuan Zhao, Jindong Wang, Harshavardhan Kamarthi, and B Aditya Prakash. 2024. Lstprompt: Large language models as zero-shot time series forecasters by long-short-term prompting. arXiv preprint arXiv:2402.16132 (2024).", + "[88] Jiaqi Liu, Peng Hang, Xiaocong Zhao, Jianqiang Wang, and Jian Sun. 2024. DDM-lag: A diffusion-based decision-making model for autonomous vehicles with lagrangian safety enhancement. IEEE Transactions on Artificial Intelligence (2024).", + "[89] Jun Liu, Chaoyun Zhang, Jiaxu Qian, Minghua Ma, Si Qin, Chetan Bansal, Qingwei Lin, Saravanan Rajmohan, and Dongmei Zhang. 2024. Large language models can deliver accurate and interpretable time series anomaly detection." + ], + "bbox": [ + 522, + 109, + 913, + 895 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 75, + 313, + 87 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Yuxuan Liang et al.", + "bbox": [ + 818, + 75, + 911, + 87 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "arXiv preprint arXiv:2405.15370 (2024).", + "[90] Lei Liu, Shuo Yu, Runze Wang, Zhenxun Ma, and Yanming Shen. 2024. How can large language models understand spatial-temporal data? arXiv preprint arXiv:2401.14192 (2024).", + "[91] Xu Liu, Junfeng Hu, Yuan Li, Shizhe Diao, Yuxuan Liang, Bryan Hooi, and Roger Zimmermann. 2024. Unitime: A language-empowered unified model for cross-domain time series forecasting. In Proceedings of the ACM Web Conference 2024. 4095-4106.", + "[92] Xu Liu, Yuxuan Liang, Chao Huang, Yu Zheng, Bryan Hooi, and Roger Zimmermann. 2022. When do contrastive learning signals help spatio-temporal graph forecasting? In SIGSPATIAL. 1-12.", + "[93] Xu Liu, Juncheng Liu, Gerald Woo, Taha Aksu, Yuxuan Liang, Roger Zimmermann, Chenghao Liu, Silvio Savarese, Caiming Xiong, and Doyen Sahoo. 2024. Moirai-MoE: Empowering Time Series Foundation Models with Sparse Mixture of Experts. arXiv preprint arXiv:2410.10469 (2024).", + "[94] Yu Liu, Jingtao Ding, Yanjie Fu, and Yong Li. 2023. Urban knowledge graph system. ACM Transactions on Intelligent Systems and Technology 14, 4 (2023), 1-25.", + "[95] Lin Long, Rui Wang, Ruixuan Xiao, Junbo Zhao, Xiao Ding, Gang Chen, and Haobo Wang. 2024. On llms-driven synthetic data generation, curation, and evaluation: A survey. arXiv preprint arXiv:2406.15126 (2024).", + "[96] Qingyue Long, Yuan Yuan, and Yong Li. 2024. A Universal Model for Human Mobility Prediction. arXiv preprint arXiv:2412.15294 (2024).", + "[97] Haoyu Ma, Yushu Chen, Wenlai Zhao, Jinzhe Yang, Yingsheng Ji, Xinghua Xu, Xiaozhu Liu, Hao Jing, Shengzhuo Liu, and Guangwen Yang. 2024. A Mamba Foundation Model for Time Series Forecasting. arXiv preprint arXiv:2411.02941 (2024).", + "[98] Xin Man, Chenghong Zhang, Changyu Li, and Jie Shao. 2023. W-MAE: Pretrained weather model with masked autoencoder for multi-variable weather forecasting. arXiv preprint arXiv:2304.08754 (2023).", + "[99] Rohin Manvi, Samar Khanna, Gengchen Mai, Marshall Burke, David Lobell, and Stefano Ermon. 2023. Geolm: Extracting geospatial knowledge from large language models. arXiv preprint arXiv:2310.06213 (2023).", + "[100] Rohin Manvi, Samar Khanna, Gengchen Mai, Marshall Burke, David B Lobell, and Stefano Ermon. 2024. GeoLLM: Extracting Geospatial Knowledge from Large Language Models. In The Twelfth International Conference on Learning Representations.", + "[101] Justin M Mittelstädt, Julia Maier, Panja Goerke, Frank Zinn, and Michael Hermes. 2024. Large language models can outperform humans in social situational judgments. Scientific Reports 14, 1 (2024), 27449.", + "[102] Seungwhan Moon, Andrea Madotto, Zhaojiang Lin, Aparajita Saraf, Amy Bearman, and Babak Damavandi. 2023. IMU2CLIP: Language-grounded Motion Sensor Translation with Multimodal Contrastive Learning. In Findings of the Association for Computational Linguistics: EMNLP 2023. 13246-13253.", + "[103] Alameen Najjar. 2023. Towards A Foundation Model For Trajectory Intelligence. In IEEE ICDMW. IEEE, 832-835.", + "[104] Tung Nguyen, Johannes Brandstetter, Ashish Kapoor, Jayesh K Gupta, and Aditya Grover. 2023. Climax: A foundation model for weather and climate. International Conference on Machine Learning (2023).", + "[105] Yansong Ning and Hao Liu. 2024. UrbanKGent: A Unified Large Language Model Agent Framework for Urban Knowledge Graph Construction. arXiv preprint arXiv:2402.06861 (2024).", + "[106] Yansong Ning, Hao Liu, Hao Wang, Zhenyu Zeng, and Hui Xiong. 2024. UUKG: unified urban knowledge graph dataset for urban spatiotemporal prediction. Advances in Neural Information Processing Systems 36 (2024).", + "[107] Joon Sung Park, Joseph O'Brien, Carrie Jun Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. 2023. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology. 1-22.", + "[108] Jaideep Pathak, Shashank Subramanian, Peter Harrington, Sanjeev Raja, Ashesh Chattopadhyay, Morteza Mardani, Thorsten Kurth, David Hall, Zongyi Li, Kamyar Azizzadenesheli, et al. 2022. Fourcastnet: A global data-driven high-resolution weather model using adaptive fourier neural operators. arXiv preprint arXiv:2202.11214 (2022).", + "[109] Jinghua Piao, Zhihong Lu, Chen Gao, Fengli Xu, Fernando P Santos, Yong Li, and James Evans. 2025. Emergence of human-like polarization among large language model agents. arXiv preprint arXiv:2501.05171 (2025).", + "[110] Arian Prabowo, Wei Shao, Hao Xue, Piotr Koniusz, and Flora D Salim. 2023. Because every sensor is unique, so is every pair: Handling dynamicity in traffic forecasting. In Proceedings of the 8th ACM/IEEE Conference on Internet of Things Design and Implementation. 93-104.", + "[111] Arian Prabowo, Hao Xue, Wei Shao, Piotr Koniusz, and Flora D Salim. 2024. Traffic forecasting on new roads using spatial contrastive pre-training (SCPT). Data Mining and Knowledge Discovery 38, 3 (2024), 913-937.", + "[112] Siyuan Qi, Shuo Chen, Yexin Li, Xiangyu Kong, Junqi Wang, Bangcheng Yang, Pring Wong, Yifan Zhong, Xiaoyuan Zhang, Zhaowei Zhang, et al. 2024. CivRealm: A Learning and Reasoning Odyssey in Civilization for Decision-Making Agents. In The Twelfth International Conference on Learning Representations." + ], + "bbox": [ + 86, + 109, + 480, + 902 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[113] Kyle K Qin, Yongli Ren, Wei Shao, Brennan Lake, Filippo Privitera, and Flora D Salim. 2023. Multiple-level point embedding for solving human trajectory imputation with prediction. ACM Transactions on Spatial Algorithms and Systems 9, 2 (2023), 1-22.", + "[114] Hao Sha, Yao Mu, Yuxuan Jiang, Li Chen, Chenfeng Xu, Ping Luo, Shengbo Eben Li, Masayoshi Tomizuka, Wei Zhan, and Mingyu Ding. 2023. *Languagempc: Large language models as decision makers for autonomous driving.* arXiv preprint arXiv:2310.03026 (2023).", + "[115] Jie-Jing Shao, Xiao-Wen Yang, Bo-Wen Zhang, Baizhi Chen, Wen-Da Wei, Lan-Zhe Guo, and Yu-feng Li. 2024. ChinaTravel: A Real-World Benchmark for Language Agents in Chinese Travel Planning. arXiv preprint arXiv:2412.13682 (2024).", + "[116] Wei Shao, Zhiling Jin, Shuo Wang, Yufan Kang, Xiao Xiao, Hamid Menouar, Zhaofeng Zhang, Junshan Zhang, and Flora Salim. 2022. Long-term spatiotemporal forecasting via dynamic multiple-graph attention. In Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence, JFCAI-22.", + "[117] Zezhi Shao, Zhao Zhang, Fei Wang, and Yongjun Xu. 2022. Pre-training enhanced spatial-temporal graph neural network for multivariate time series forecasting. In Proceedings of the 28th ACM SIGKDD conference on knowledge discovery and data mining. 1567-1577.", + "[118] Qichao Shentu, Beibu Li, Kai Zhao, Yang Shu, Zhongwen Rao, Lujia Pan, Bin Yang, and Chenjuan Guo. 2024. Towards a General Time Series Anomaly Detector with Adaptive Bottlenecks and Dual Adversarial Decoders. arXiv preprint arXiv:2405.15273 (2024).", + "[119] Xiaoming Shi, Shiyu Wang, Yuqi Nie, Dianqi Li, Zhou Ye, Qingsong Wen, and Ming Jin. 2025. Time-MoE: Billion-Scale Time Series Foundation Models with Mixture of Experts. In The Thirteenth International Conference on Learning Representations (ICLR).", + "[120] Xiaoming Shi, Sqiao Xue, Kangrui Wang, Fan Zhou, James Zhang, Jun Zhou, Chenhao Tan, and Hongyuan Mei. 2023. Language models can improve event prediction by few-shot abductive reasoning. Advances in Neural Information Processing Systems 36 (2023), 29532-29557.", + "[121] Ethan Steinberg, Jason Fries, Yizhe Xu, and Nigam Shah. 2023. MOTOR: A Time-To-Event Foundation Model For Structured Medical Records. arXiv preprint arXiv:2301.03150 (2023).", + "[122] Mingtian Tan, Mike A Merrill, Vinayak Gupta, Tim Althoff, and Thomas Hartvigsen. 2024. Are language models actually useful for time series forecasting?. In The Thirty-eighth Annual Conference on Neural Information Processing Systems.", + "[123] Yihong Tang, Zhaokai Wang, Ao Qu, Yihao Yan, Zhaofeng Wu, Dingyi Zhuang, Jushi Kai, Kebing Hou, Xiaotong Guo, Jinhua Zhao, et al. 2024. ITINERA: Integrating Spatial Optimization with Large Language Models for Open-domain Urban Itinerary Planning. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track. 1413-1432.", + "[124] Nicolas Tempelmeier, Simon Gottschalk, and Elena Demidova. 2021. GeoVectors: a linked open corpus of OpenStreetMap Embeddings on world scale. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management. 4604-4612.", + "[125] Saeid Ashraf Vaghefi, Dominik Stammbach, Veruska Muccione, Julia Bingler, Jingwei Ni, Mathias Kraus, Simon Allen, Chiara Colesanti-Senni, Tobias Wekhof, Tobias Schimanski, et al. 2023. ChatClimate: Grounding conversational AI in climate science. Communications Earth & Environment 4, 1 (2023), 480.", + "[126] Jiawei Wang, Renhe Jiang, Chuang Yang, Zengqing Wu, Makoto Onizuka, Ryosuke Shibasaki, Noboru Koshizuka, and Chuan Xiao. 2024. Large language models as urban residents: An llm agent framework for personal mobility generation. Advances in Neural Information Processing Systems (2024).", + "[127] Junyang Wang, Haiyang Xu, Jiabo Ye, Ming Yan, Weizhou Shen, Ji Zhang, Fei Huang, and Jitao Sang. 2024. Mobile-agent: Autonomous multi-modal mobile device agent with visual perception. arXiv preprint arXiv:2401.16158 (2024).", + "[128] Kun Wang, Hao Wu, Yifan Duan, Guibin Zhang, Kai Wang, Xiaojiang Peng, Yu Zheng, Yuxuan Liang, and Yang Wang. 2024. NuwaDynamics: Discovering and Updating in Causal Spatio-Temporal Modeling. In The Twelfth International Conference on Learning Representations.", + "[129] Senzhang Wang, Jiannong Cao, and Philip Yu. 2020. Deep learning for spatiotemporal data mining: A survey. IEEE TKDE (2020).", + "[130] Xuhong Wang, Ding Wang, Liang Chen, Fei-Yue Wang, and Yilun Lin. 2023. Building transportation foundation model via generative graph transformer. In 2023 IEEE 26th International Conference on Intelligent Transportation Systems (ITSC). IEEE, 6042-6047.", + "[131] Yihang Wang, Yuying Qiu, Peng Chen, Kai Zhao, Yang Shu, Zhongwen Rao, Lujia Pan, Bin Yang, and Chenjuan Guo. 2024. ROSE: Register Assisted General Time Series Forecasting with Decomposed Frequency Learning. CoRR abs/2405.17478 (2024).", + "[132] Yu Wang, Tongya Zheng, Shunyu Liu, Zunlei Feng, Kaixuan Chen, Yunzhi Hao, and Mingli Song. 2024. Spatiotemporal-Augmented Graph Neural Networks for Human Mobility Simulation. IEEE Transactions on Knowledge and Data Engineering (2024)." + ], + "bbox": [ + 517, + 109, + 911, + 885 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey", + "bbox": [ + 84, + 75, + 446, + 87 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 684, + 75, + 913, + 87 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[133] Zihao Wang, Shaofei Cai, Guanzhou Chen, Anji Liu, Xiaojian Ma, Yitao Liang, and Team CraftJarvis. 2023. Describe, explain, plan and select: interactive planning with large language models enables open-world multi-task agents. In Proceedings of the 37th International Conference on Neural Information Processing Systems. 34153-34189.", + "[134] Zhaonan Wang, Renhe Jiang, Hao Xue, Flora D Salim, Xuan Song, and Ryosuke Shibasaki. 2022. Event-aware multimodal mobility nowcasting. In AAAI, Vol. 36. 4228-4236.", + "[135] Tonglong Wei, Yan Lin, Youfang Lin, Shengnan Guo, Jilin Hu, Gao Cong, and Huaiyu Wan. 2024. PTR: A Pre-trained Language Model for Trajectory Recovery. arXiv preprint arXiv:2410.14281 (2024).", + "[136] Haomin Wen, Youfang Lin, Yutong Xia, Huaiyu Wan, Qingsong Wen, Roger Zimmermann, and Yuxuan Liang. 2023. Diffstg: Probabilistic spatio-temporal graph forecasting with denoising diffusion models. In ACM SIGSPATIAL. 1-12.", + "[137] Gerald Woo, Chenghao Liu, Akshit Kumar, Caiming Xiong, Silvio Savarese, and Doyen Sahoo. 2024. Unified training of universal time series forecasting transformers. (2024).", + "[138] Lixia Wu, Jianlin Liu, Junhong Lou, Minhui Deng, Jianbin Zheng, Haomin Wen, Chao Song, and Shu He. 2024. G2PTL: A Geography-Graph Pre-trained Model. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 4991-4999.", + "[139] Wansen Wu, Weiyi Yang, Juanjuan Li, Yong Zhao, Zhengqiu Zhu, Bin Chen, Sihang Qiu, Yong Peng, and Fei-Yue Wang. 2024. Autonomous crowdsensing: operating and organizing crowdsensing for sensing automation. IEEE Transactions on Intelligent Vehicles (2024).", + "[140] Zonghan Wu, Shirui Pan, Guodong Long, Jing Jiang, Xiaojun Chang, and Chengqi Zhang. 2020. Connecting the dots: Multivariate time series forecasting with graph neural networks. In SIGKDD. 753-763.", + "[141] Zonghan Wu, Shirui Pan, Guodong Long, Jing Jiang, and Chengqi Zhang. 2019. Graph wavenet for deep spatial-temporal graph modeling. arXiv preprint arXiv:1906.00121 (2019).", + "[142] Congxi Xiao, Jingbo Zhou, Yixiong Xiao, Jizhou Huang, and Hui Xiong. 2024. ReFound: Crafting a Foundation Model for Urban Region Understanding upon Language and Visual Foundations. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 3527-3538.", + "[143] Mengxi Xiao, Zihao Jiang, Lingfei Qian, Zhengyu Chen, Yueru He, Yijing Xu, Yuecheng Jiang, Dong Li, Ruey-Ling Weng, Min Peng, et al. 2025. Retrievalaugmented Large Language Models for Financial Time Series Forecasting. arXiv preprint arXiv:2502.05878 (2025).", + "[144] Fengli Xu, Jun Zhang, Chen Gao, Jie Feng, and Yong Li. 2023. Urban generative intelligence (ugi): A foundational platform for agents in embodied city environment. arXiv preprint arXiv:2312.11813 (2023).", + "[145] Jiehui Xu, Haixu Wu, Jianmin Wang, and Mingsheng Long. 2022. Anomaly Transformer: Time Series Anomaly Detection with Association Discrepancy. In International Conference on Learning Representations.", + "[146] Mingxing Xu, Wenrui Dai, Chunmiao Liu, Xing Gao, Weiyao Lin, Guo-Jun Qi, and Hongkai Xiong. 2020. Spatial-temporal transformer networks for traffic flow forecasting. arXiv preprint arXiv:2001.02908 (2020).", + "[147] Hao Xue and Flora D Salim. 2023. Promptcast: A new prompt-based learning paradigm for time series forecasting. IEEE Transactions on Knowledge and Data Engineering 36, 11 (2023), 6851-6864.", + "[148] Hao Xue, Tianye Tang, Ali Payani, and Flora D Salim. 2024. Prompt Mining for Language Models-based Mobility Flow Forecasting. In Proceedings of the 32nd ACM International Conference on Advances in Geographic Information Systems.", + "[149] Hao Xue, Bhanu Prakash Voutharoja, and Flora D Salim. 2022. Leveraging language foundation models for human mobility forecasting. In Proceedings of the 30th International Conference on Advances in Geographic Information Systems.", + "[150] Yibo Yan, Haomin Wen, Siru Zhong, Wei Chen, Haodong Chen, Qingsong Wen, Roger Zimmermann, and Yuxuan Liang. 2024. Urbanclip: Learning text-enhanced urban region profiling with contrastive language-image pretraining from the web. In Proceedings of the ACM on Web Conference 2024. 4006-4017.", + "[151] Yuwei Yan, Qingbin Zeng, Zhiheng Zheng, Jingzhe Yuan, Jie Feng, Jun Zhang, Fengli Xu, and Yong Li. 2024. OpenCity: A Scalable Platform to Simulate Urban Activities with Massive LLM Agents. arXiv preprint arXiv:2410.21286 (2024).", + "[152] Jianwei Yang, Reuben Tan, Qianhui Wu, Ruijie Zheng, Baolin Peng, Yongyuan Liang, Yu Gu, Mu Cai, Seonghyeon Ye, Joel Jang, et al. 2025. Magma: A Foundation Model for Multimodal AI Agents. arXiv preprint arXiv:2502.13130 (2025).", + "[153] Jihan Yang, Shusheng Yang, Anjali W Gupta, Rilyn Han, Li Fei-Fei, and Saining Xie. 2024. Thinking in space: How multimodal large language models see, remember, and recall spaces. arXiv preprint arXiv:2412.14171 (2024).", + "[154] Kairui Yang, Zihao Guo, Gengjie Lin, Haotian Dong, Zhao Huang, Yipeng Wu, Die Zuo, Jibin Peng, Ziyuan Zhong, Xin WANG, Qing Guo, Xiaosong Jia, Junchi Yan, and Di Lin. 2025. Trajectory-LLM: A Language-based Data Generator for Trajectory Prediction in Autonomous Driving. In ICLR.", + "[155] Silin Yang, Dong Wang, Haoqi Zheng, and Ruochun Jin. 2024. TimeRAG: BOOSTING LLM Time Series Forecasting via Retrieval-Augmented Generation. arXiv preprint arXiv:2412.16643 (2024).", + "[156] Tiankai Yang, Yi Nian, Shawn Li, Ruiyao Xu, Yuangang Li, Jiaqi Li, Zhuo Xiao, Xiyang Hu, Ryan Rossi, Kaize Ding, et al. 2024. Ad-llm: Benchmarking large" + ], + "bbox": [ + 84, + 108, + 480, + 902 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "language models for anomaly detection. arXiv preprint arXiv:2412.11142 (2024).", + "[157] Xinli Yu, Zheng Chen, Yuan Ling, Shujing Dong, Zongyi Liu, and Yanbin Lu. 2023. Temporal data meets LLM-explainable financial time series forecasting. arXiv preprint arXiv:2306.11025 (2023).", + "[158] Yuan Yuan, Jingtao Ding, Jie Feng, Depeng Jin, and Yong Li. 2024. Unist: A prompt-empowered universal model for urban spatio-temporal prediction. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4095-4106.", + "[159] Yuan Yuan, Jingtao Ding, Chonghua Han, Depeng Jin, and Yong Li. 2024. A Foundation Model for Unified Urban Spatio-Temporal Flow Prediction. arXiv preprint arXiv:2411.12972 (2024).", + "[160] Yuan Yuan, Chonghua Han, Jingtao Ding, Depeng Jin, and Yong Li. 2024. Urbanfit: A foundation model for open-world urban spatio-temporal learning. arXiv preprint arXiv:2411.12164 (2024).", + "[161] Yuan Yuan, Chenyang Shao, Jingtao Ding, Depeng Jin, and Yong Li. 2024. Spatiotemporal few-shot learning via diffusive neural network generation. In The Twelfth International Conference on Learning Representations.", + "[162] Ye Yuan, Yong Zhang, Boyue Wang, Yuan Peng, Yongli Hu, and Baocai Yin. 2022. STGAN: Spatio-temporal generative adversarial network for traffic data imputation. IEEE Transactions on Big Data 9, 1 (2022), 200-211.", + "[163] Zhenghang Yuan, Zhitong Xiong, Lichao Mou, and Xiao Xiang Zhu. 2024. Chatearthnet: A global-scale, high-quality image-text dataset for remote sensing. arXiv preprint arXiv:2402.11325 (2024).", + "[164] Kunpeng Zhang, Feng Zhou, Lan Wu, Na Xie, and Zhengbing He. 2024. Semantic understanding and prompt engineering for large-scale traffic data imputation. Information Fusion 102 (2024), 102038.", + "[165] Libo Zhang and Yue Ning. 2024. Large Language Models as Event Forecasters. arXiv preprint arXiv:2406.10492 (2024).", + "[166] Qianru Zhang, Xubin Ren, Lianghao Xia, Siu Ming Yiu, and Chao Huang. 2024. Spatio-Temporal Graph Learning With Large Language Model. https://openreview.net/forum?id=QUKcfq6GX", + "[167] Qianru Zhang, Haixin Wang, Cheng Long, Liangcai Su, Xingwei He, Jianlong Chang, Tailin Wu, Hongzhi Yin, Siu-Ming Yiu, Qi Tian, et al. 2024. A Survey of Generative Techniques for Spatial-Temporal Data Mining. arXiv preprint arXiv:2405.09592 (2024).", + "[168] Siyao Zhang, Daocheng Fu, Wenzhe Liang, Zhao Zhang, Bin Yu, Pinlong Cai, and Baozhen Yao. 2024. Trafficcpt: Viewing, processing and interacting with traffic foundation models. Transport Policy 150 (2024), 95-105.", + "[169] Weijia Zhang, Jindong Han, Zhao Xu, Hang Ni, Hao Liu, and Hui Xiong. 2024. Urban Foundation Models: A Survey. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 6633-6643.", + "[170] Xin Zhang, Tianjian Ouyang, Yu Shang, Qingmin Liao, and Yong Li. [n.d.]. UrbanMLLM: Joint Learning of Cross-view Imagery for Urban Understanding. ([n.d.]).", + "[171] Yu Zhang, Weiming Huang, Yao Yao, Song Gao, Lizhen Cui, and Zhongmin Yan. 2024. Urban region representation learning with human trajectories: a multiview approach incorporating transition, spatial, and temporal perspectives. GIScience & Remote Sensing 61, 1 (2024), 2387392.", + "[172] Yimei Zhang, Xiangjie Kong, Wenfeng Zhou, Jin Liu, Yanjie Fu, and Guojiang Shen. 2024. A comprehensive survey on traffic missing data imputation. IEEE Transactions on Intelligent Transportation Systems (2024).", + "[173] Yunxiang Zhang and Xiaojun Wan. 2024. SITUATEDGEN: incorporating geographical and temporal contexts into generative commonsense reasoning. Advances in Neural Information Processing Systems 36 (2024).", + "[174] Yifan Zhang, Cheng Wei, Shangyou Wu, Zhengting He, and Wenhao Yu. 2023. GeoGPT: understanding and processing geospatial tasks through an autonomous GPT. arXiv preprint arXiv:2307.07930 (2023).", + "[175] Zeyang Zhang, Xin Wang, Ziwei Zhang, Haoyang Li, Yijian Qin, and Wenwu Zhu. 2024. LLM4DyG: can large language models solve spatial-temporal problems on dynamic graphs? In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4350-4361.", + "[176] Yu Zhao, Pan Deng, Junting Liu, Xiaofeng Jia, and Jianwei Zhang. 2023. Generative Causal Interpretation Model for Spatio-Temporal Representation Learning. In Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 3537-3548.", + "[177] Chuanpan Zheng, Xiaoliang Fan, Cheng Wang, and Jianzhong Qi. 2020. Gman: A graph multi-attention network for traffic prediction. In AAAI, Vol. 34. 1234–1241.", + "[178] Yu Zheng, Licia Capra, Ouri Wolfson, and Hai Yang. 2014. Urban computing: concepts, methodologies, and applications. ACM TIST 5, 3 (2014), 1-55.", + "[179] Siru Zhong, Xixuan Hao, Yibo Yan, Ying Zhang, Yangqiu Song, and Yuxuan Liang. 2024. Urbancross: Enhancing satellite image-text retrieval with cross-domain adaptation. In Proceedings of the 32nd ACM International Conference on Multimedia. 6307-6315.", + "[180] Siru Zhong, Weilin Ruan, Ming Jin, Huan Li, Qingsong Wen, and Yuxuan Liang. 2025. Time-VLM: Exploring Multimodal Vision-Language Models for Augmented Time Series Forecasting. arXiv preprint arXiv:2502.04395 (2025)." + ], + "bbox": [ + 517, + 108, + 911, + 885 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 75, + 313, + 85 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Yuxuan Liang et al.", + "bbox": [ + 818, + 75, + 911, + 87 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[181] Gengze Zhou, Yicong Hong, Zun Wang, Xin Eric Wang, and Qi Wu. 2024. Navigpt-2: Unleashing navigational reasoning capability for large vision-language models. In European Conference on Computer Vision. Springer, 260-278.", + "[182] Gengze Zhou, Yicong Hong, and Qi Wu. 2024. Navigpt: Explicit reasoning in vision-and-language navigation with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 7641-7649.", + "[183] Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, and Rong Jin. 2023. One Fits All: Power General Time Series Analysis by Pretrained LM. Advances in Neural Information Processing Systems (2023).", + "[184] Xingcheng Zhou, Mingyu Liu, Bare Luka Zagar, Ekim Yurtsever, and Alois C Knoll. 2023. Vision language models in autonomous driving and intelligent transportation systems. arXiv preprint arXiv:2310.14414 (2023).", + "[185] Zhilun Zhou, Yuming Lin, Depeng Jin, and Yong Li. 2024. Large language model for participatory urban planning. arXiv preprint arXiv:2402.17161 (2024).", + "[186] Zihao Zhou and Rose Yu. 2024. Can LLMs Understand Time Series Anomalies? arXiv preprint arXiv:2410.05440 (2024).", + "[187] Xizhou Zhu, Yuntao Chen, Hao Tian, Chenxin Tao, Weijie Su, Chenyu Yang, Gao Huang, Bin Li, Lewei Lu, Xiaogang Wang, et al. 2023. Ghost in the minecraft: Generally capable agents for open-world environments via large language models with text-based knowledge and memory. arXiv preprint arXiv:2305.17144 (2023).", + "[188] Yuanshao Zhu, James Jianqiao Yu, Xiangyu Zhao, Qidong Liu, Yongchao Ye, Wei Chen, Zijian Zhang, Xuetao Wei, and Yuxuan Liang. 2024. Controllraj: Controllable trajectory generation with topology-constrained diffusion model. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4676-4687.", + "[189] Yuanshao Zhu, James Jianqiao Yu, Xiangyu Zhao, Xuetao Wei, and Yuxuan Liang. 2024. UniTraj: Universal Human Trajectory Modeling from Billion-Scale Worldwide Traces. arXiv preprint arXiv:2411.03859 (2024).", + "[190] Zhengqiu Zhu, Yatai Ji, Sihang Qiu, Yong Zhao, Kai Xu, Rusheng Ju, and Bin Chen. 2024. A Prototype Design of LLM-Based Autonomous Web Crowdsensing. In International Conference on Web Engineering. Springer, 406-409.", + "[191] Zhengqiu Zhu, Yong Zhao, Bin Chen, Sihang Qiu, Kai Xu, Quanjun Yin, Jincai Huang, Zhong Liu, and Fei-Yue Wang. 2024. Conversational Crowdsensing: A Parallel Intelligence Powered Novel Sensing Approach. arXiv preprint arXiv:2402.06654 (2024)." + ], + "bbox": [ + 84, + 108, + 483, + 470 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A Limitations and Future Opportunities", + "text_level": 1, + "bbox": [ + 83, + 484, + 423, + 501 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We further discuss the potential limitations of current research and identify several key future directions aimed at advancing the development of more powerful, transparent, and reliable STFMs:", + "bbox": [ + 81, + 503, + 480, + 545 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The curse of accuracy against interpretability. We have identified a significant challenge in developing FMs for addressing numerical problems in ST data science. Directly leveraging LLMs for numerical tasks such as forecasting proves to be non-trivial [34]. Meanwhile, fine-tuning LLMs or training STFMs from scratch using large-scale, cross-domain ST data often comes at the cost of interactive capabilities, thereby hindering interpretability in the prediction outcomes. These limitations motivate us to explore a novel paradigm that not only retains strong numerical reasoning abilities but also enhances interpretability, bridging the gap between predictive accuracy and explanatory insight.", + "- Large foundation models are all we need? While the extensive parameterization of FMs enables impressive generalization capabilities, particularly in zero/few-shot settings, their superiority over smaller expert models remains context-dependent. In ST domains such as time series analysis [122] and urban planning [57], smaller expert models often outperform FMs when provided with sufficient domain-specific training data. This raises fundamental questions about the trade-offs between model scalability, efficiency, and task-specific optimization. Future research should delve into hybrid approaches that combine the adaptability of large models with the precision of expert models.", + "- One-fit-all FMs across the full workflow. While current FMs are typically designed to support only specific stages of ST data science, we envision a more unified FM capable of seamlessly" + ], + "bbox": [ + 83, + 547, + 482, + 893 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "spanning the entire workflow, from initial data sensing and management to mining and supporting downstream applications. Achieving this goal will likely require the development of advanced LLM agents that can function as full-stack engineers (i.e., strongly benefiting all stages) for ST data science.", + "bbox": [ + 526, + 106, + 915, + 176 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "- Integrating STFMs with multimodal understanding. While current STFMs excel in processing structured ST data, their ability to integrate and reason over multimodal information, including text, images, video, and sensor data, remains underdeveloped. Many tasks require models to jointly interpret geospatial context, temporal dynamics, and text descriptions. Future research can focus on designing multimodal STFMs that effectively align, fuse, and reason over heterogeneous data sources, enabling more context-aware and human-interpretable decision-making.", + "bbox": [ + 514, + 176, + 915, + 301 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B Zero-shot Utilization of LLMs", + "text_level": 1, + "bbox": [ + 514, + 311, + 790, + 325 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "There are three ways of directly using LLMs for various ST tasks:", + "bbox": [ + 514, + 330, + 911, + 344 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- LLM-as-Augmenter. Pretrained LLMs can enhance both data understanding and model performance. On the one hand, it can serve as the input augmenter, which enhances data interoperability or provides external information [40, 79] (e.g., textual or visual). On the other hand, LLMs can serve as a parameter-frozen model component [102, 150, 166], thus augmenting domain models by injecting the pretrained external knowledge in LLMs.", + "- LLM-as-Predictor. LLMs can be directly employed as predictors [33, 53, 73, 125] for various tasks. Due to the modality gap between text and ST data, preprocessing is required to fit the input spaces of LLMs. Such step typically contains prompt engineering [73, 125, 147-149] or patch & tokenization [53].", + "- LLM-as-Agent. LLM-based agents are typically equipped with the ability to memorize and call various tools. When applied to ST data science, various domain-expert models can be wrapped as a tool and added into the agent in a plug-and-play manner [144, 168, 174]. As such, LLM serves as a router to access different models with both flexibility and performance guarantees. Furthermore, multi-agent systems [185] can be built to solve more complex tasks in the ST domain." + ], + "bbox": [ + 514, + 347, + 913, + 623 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "C Comparison between LLMs and PFMs", + "text_level": 1, + "bbox": [ + 514, + 635, + 854, + 651 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 3 demonstrates the comparison between LLMs and PFMs on their capabilities, including perception, optimization, and reasoning. For example, PFMs excel in exceptional numerical reasoning abilities, yet they often struggle with common-sense understanding. There is still no free lunch, and the user can choose either LLMs or PFMs according to the downstream applications.", + "bbox": [ + 511, + 654, + 913, + 737 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/146f81da04a218bcff0caa24a2b9f78a939ebf9ec6c05eb8643c1f4789377d85.jpg", + "table_caption": [ + "Table 3: A capability comparison between LLMs and PFMs for ST data science." + ], + "table_footnote": [], + "table_body": "
CapabilitiesLarge Language Models (LLMs)Pretrained Foundation Models (PFMs)
Perception▲ Limited native ST perception; can be enhanced via fine-tuning✓ Strong ST perception, integrating sensor data and domain-specific learning
Optimization✓ Agent-based reasoning for decision-making; relies on prompting and heuristics▲ Limited; lacks decision-making ability for control and planning
Common-sense Reasoning✓ Strong via pretraining on vast textual data; can be enhanced with fine-tuning▲ Limited; relies on structured ST data rather than broad world knowledge
Numerical Reasoning▲ Handles arithmetic but struggles with structured ST computations✓ Designed for numerical problems, e.g., forecasting, anomaly detection
Causal Reasoning▲ Can infer causal relations from text but lacks structured ST modeling✓ Built-in graph-based and ST causal modeling
", + "bbox": [ + 517, + 792, + 906, + 890 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey", + "bbox": [ + 83, + 75, + 446, + 87 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 684, + 75, + 913, + 87 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/0e2c71ca92a3b1948076a1507053343e497304cd0c184fa18d2ff53d67b1e756.jpg", + "image_caption": [ + "Figure 7: Taxonomy from the methodology perspective." + ], + "image_footnote": [], + "bbox": [ + 84, + 103, + 911, + 478 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 75, + 313, + 87 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Yuxuan Liang et al.", + "bbox": [ + 818, + 75, + 911, + 87 + ], + "page_idx": 13 + } +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13502/db12ade8-3943-4647-bb0d-ce8160710750_model.json b/data/2025/2503_13xxx/2503.13502/db12ade8-3943-4647-bb0d-ce8160710750_model.json new file mode 100644 index 0000000000000000000000000000000000000000..45d9aeee5904ae1665c548daac6e8894fbb18ea3 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13502/db12ade8-3943-4647-bb0d-ce8160710750_model.json @@ -0,0 +1,4111 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.261, + 0.058, + 0.707 + ], + "angle": 270, + "content": "arXiv:2503.13502v1 [cs.DB] 12 Mar 2025" + }, + { + "type": "title", + "bbox": [ + 0.156, + 0.102, + 0.848, + 0.152 + ], + "angle": 0, + "content": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.163, + 0.77, + 0.199 + ], + "angle": 0, + "content": "Yuxuan Liang\\(^{1}\\), Haomin Wen\\(^{2,1}\\), Yutong Xia\\(^{3}\\), Ming Jin\\(^{4}\\), Bin Yang\\(^{5}\\), Flora Salim\\(^{6}\\), Qingsong Wen\\(^{7}\\), Shirui Pan\\(^{4}\\), Gao Cong\\(^{8}\\)" + }, + { + "type": "text", + "bbox": [ + 0.168, + 0.199, + 0.831, + 0.277 + ], + "angle": 0, + "content": "1The Hong Kong University of Science and Technology (Guangzhou) 2Carnegie Mellon University 3National University of Singapore 4Griffith University 5East China Normal University 6University of New South Wales 7Squirrel AI Learning, USA 8Nanyang Technology University {yuxiang,yutong.x}@outlook.com,{wenhaomin.whm,mingjinedu,qingsongedu}@gmail.com flora.salim@unsw.edu.au,byang@dase.ecnu.edu.cn,s.pan@griffith.edu.au,gaocong@ntu.edu.sg" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.284, + 0.158, + 0.298 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.303, + 0.484, + 0.552 + ], + "angle": 0, + "content": "Spatio-Temporal (ST) data science, which includes sensing, managing, and mining large-scale data across space and time, is fundamental to understanding complex systems in domains such as urban computing, climate science, and intelligent transportation. Traditional deep learning approaches have significantly advanced this field, particularly in the stage of ST data mining. However, these models remain task-specific and often require extensive labeled data. Inspired by the success of Foundation Models (FM), especially large language models, researchers have begun exploring the concept of Spatio-Temporal Foundation Models (STFMs) to enhance adaptability and generalization across diverse ST tasks. Unlike prior architectures, STFMs empower the entire workflow of ST data science, ranging from data sensing, management, to mining, thereby offering a more holistic and scalable approach. Despite rapid progress, a systematic study of STFMs for ST data science remains lacking. This survey aims to provide a comprehensive review of STFMs, categorizing existing methodologies and identifying key research directions to advance ST general intelligence." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.556, + 0.22, + 0.57 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.574, + 0.483, + 0.671 + ], + "angle": 0, + "content": "Humans live in a world shaped by the dynamic interplay of countless elements across space and time. Spatio-Temporal (ST) Data, which refer to data that encapsulate ST phenomena, track the evolution of objects or events across locations and time [5], such as meteorological records, traffic patterns, and human traces. These data are frequently sourced from a wide array of platforms, ranging from IoT devices, GPS sensors, social media, to remote sensing." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.671, + 0.483, + 0.769 + ], + "angle": 0, + "content": "Within this context, Spatio-Temporal Data Science focuses on sensing, managing, and mining these datasets to uncover patterns, understand complex systems, and predict future dynamics. Motivated by its transformative potential, this field addresses critical challenges across urban environments and even the entire planet, enabling decision-making and fostering innovations that contribute to building smarter, more sustainable, and resilient systems [178]." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.78, + 0.483, + 0.853 + ], + "angle": 0, + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.854, + 0.305, + 0.864 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.865, + 0.473, + 0.875 + ], + "angle": 0, + "content": "© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.875, + 0.27, + 0.884 + ], + "angle": 0, + "content": "ACM ISBN 978-x-xxxxx-xxxxx-x/YY/MM" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.885, + 0.285, + 0.896 + ], + "angle": 0, + "content": "https://doi.org/10.1145/nnnnnnn.nnnnnnn" + }, + { + "type": "image", + "bbox": [ + 0.521, + 0.283, + 0.907, + 0.426 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.433, + 0.916, + 0.49 + ], + "angle": 0, + "content": "Figure 1: ST Foundation Models (STFM), which include LLM and PFM, are pretrained with or applied to diverse ST data, with the abilities of perception, optimization, and reasoning. STFMs can, in turn, enhance each stage of ST data science." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.494, + 0.916, + 0.688 + ], + "angle": 0, + "content": "In the era of deep learning, the community has primarily concentrated on spatio-temporal representation learning, as a fundamental step of ST data mining [129]. Key advancements include the development of Spatio-Temporal Graph Neural Networks (STGNN) [51] and transformer-based architectures, which have shown remarkable success in tasks such as traffic forecasting [80, 146], air quality prediction [82], and human mobility analytics [132]. STGNNs integrate Graph Neural Networks (GNN) with temporal learning modules (e.g., GRU [6, 70], TCN [140, 141]) to model ST correlations, while transformer models leverage self-attention mechanisms [37, 78, 177] to process complex dependencies across space and time. Additionally, there has been significant research on self-supervised learning [46, 74, 92], where models are trained to extract powerful representations with minimal reliance on large annotated datasets." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.689, + 0.916, + 0.897 + ], + "angle": 0, + "content": "Driven by the success of Foundation Models (FM), especially Large Language Models (LLM), researchers have recently begun exploring the concept of Spatio-Temporal Foundation Models (STFM) [32, 81, 169]. By harnessing LLMs, it becomes possible to develop more generalized, adaptable solutions that can be fine-tuned for specific tasks with minimal data. Another prominent approach involves pretraining FMs (denoted as PFM) on cross-domain ST data and adapting them for particular domains. In contrast to previous architectures (e.g., STGNNs), STFMs integrates the capabilities of perception, reasoning and optimization, which not only promises to revolutionize ST data mining, but also empowers other stages of ST data science, such as ST data sensing and management (See Figure 1). This shift has the potential to enhance the scalability and efficiency of ST applications, offering a more holistic approach to addressing challenges in urban computing, climate science, etc." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.314, + 0.088 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.819, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "Yuxuan Liang et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.105, + 0.483, + 0.161 + ], + "angle": 0, + "content": "Table 1: Our survey vs. related surveys on FMs for learning ST data, such as locations (L), trajectories (T), events (E), ST rasters (R), and ST graphs (G). The applications (App.) include numerical (N) and inferential (I) problems." + }, + { + "type": "table", + "bbox": [ + 0.088, + 0.163, + 0.48, + 0.24 + ], + "angle": 0, + "content": "
SurveyYearVenueSensingManage.MiningApp.Data
Jin et al. [54]2023-NR,G
Jiang et al. [48]2024IJCAINR,G
Liang et al. [81]2024KDDNT,E,R,G
Zhang et al. [169]2024KDDN,IL,T,E,R,G
Goodge et al. [32]2025-NT,E,R,G
Ours2025-N,IL,T,E,R,G
" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.242, + 0.483, + 0.381 + ], + "angle": 0, + "content": "Despite their rapid advancements, a systematic analysis of STFMs across the entire workflow of ST data science remains lacking. First, prior surveys have primarily focused on utilizing LLMs as the key tool for ST data mining [32, 54, 81, 169], leaving a significant gap in understanding how these models can be integrated throughout the entire process, i.e., with less focus placed on their role in the earlier stages of sensing and management. Second, they predominantly examine the applications of STFMs to numerical problems (e.g., forecasting, imputation) while overlooking their role in inferential problem-solving such as decision-making systems." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.381, + 0.483, + 0.573 + ], + "angle": 0, + "content": "To bridge these gaps, this paper aims to provide a more comprehensive survey of STFMs across all stages of ST data science, spanning data sensing, management, and mining (see Figure 1). For example, LLMs can enhance ST data sensing by actively processing citizen reports, optimizing participatory sensing strategies, and generating synthetic data at scale. In terms of data management, they can automate data cleaning tasks, construct meaningful knowledge graphs for data integration, and facilitate more efficient retrieval of cross-modal datasets. Beyond these stages, our survey also explores how STFMs support a broader range of downstream applications, including numerical and inferential problems. Through this endeavor, we seek to illuminate an overall vision of STFMs, thereby enhancing comprehension regarding their potential to optimize ST data science, fostering more integrated and adaptable solutions." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.574, + 0.483, + 0.727 + ], + "angle": 0, + "content": "Meanwhile, we systematically investigate the key methodologies of STFMs for modeling a variety of ST data. We begin by categorizing existing STFMs into two main classes: LLMs and Pretrained Foundation Models (PFMs). For LLMs, which are pretrained on linguistic data, we focus on their usage as a zero-shot [33] or few-shot learner [53, 73], where various prompting and fine-tuning strategies have been explored, respectively. For PFMs, which are trained from scratch based on cross-domain ST data [40, 158, 189], we examine their neural architectures, pretraining methods, and their adaptability to different types of ST data, including location data, trajectory data, events, ST raster data, and ST graph data." + }, + { + "type": "text", + "bbox": [ + 0.099, + 0.727, + 0.446, + 0.74 + ], + "angle": 0, + "content": "In summary, our major contributions lie in three aspects:" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.74, + 0.483, + 0.809 + ], + "angle": 0, + "content": "- Comprehensive and up-to-date survey: We provide the first comprehensive and modern survey of FMs across the entire workflow of ST data science, covering data sensing, management, and mining. We also explore a broader range of downstream tasks and data types compared to most existing surveys (See Table 1)." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.81, + 0.483, + 0.851 + ], + "angle": 0, + "content": "- Vision and Methodologies: We propose a vision for STFMs, identifying key capabilities essential for their success, and discuss current methodologies for implementing these abilities in detail." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.851, + 0.483, + 0.892 + ], + "angle": 0, + "content": "- Future directions: We highlight promising directions for advancing ST data science with foundation models, encouraging further research and exploration in this emerging field." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.74, + 0.483, + 0.892 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.107, + 0.916, + 0.178 + ], + "angle": 0, + "content": "Paper Organization. The remainder of this paper is organized as follows: Sec. 2 provides essential background on FMs and ST data. Sec. 3 and 4 present a taxonomy of STFMs regarding the workflow and methodologies, respectively. Sec. 5 offers concluding remarks, and Appendix A highlights promising avenues for future research." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.183, + 0.646, + 0.199 + ], + "angle": 0, + "content": "2 Background" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.201, + 0.916, + 0.437 + ], + "angle": 0, + "content": "Foundation models. FMs are deep neural networks trained on vast datasets, enabling them to acquire broad, cross-domain knowledge and exceptional adaptability [45]. Unlike earlier task-specific models, FMs can be efficiently fine-tuned with relatively small amounts of task-specific data, offering remarkable flexibility, effectiveness, and cost efficiency. Pioneering attempts like BERT [58] and GPT-3 [11] have reshaped natural language processing. More recent models, e.g., GPT-4o [45] and DeepSeek-R1 [36], further push the frontiers of generative capabilities, enabling more nuanced reasoning, robust domain adaptation, and improved context-awareness in diverse tasks. In ST domains, recent FMs like Time-MoE [119], Chronos [4], and UniST [158] have made remarkable strides in time series analysis and universal ST forecasting, while UniTraj [189] serves as a versatile foundation for various trajectory-related tasks. Inspired by these successes, this survey delves into the utilization of FMs in the entire workflow of ST data science, covering data sensing, management, and mining." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.44, + 0.916, + 0.522 + ], + "angle": 0, + "content": "Formulation of Spatio-Temporal Data. ST data refer to datasets that integrate spatial (location-based) and temporal (time-based) information, capturing dynamic patterns and relationships over space and time. Figure 2 depicts the basic ST data structures discussed in this survey, including locations, trajectories, events, ST rasters, and ST graphs. Their definitions are delineated as follows." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.522, + 0.915, + 0.579 + ], + "angle": 0, + "content": "Definition 1 (Location). A location refers to a fixed spatial point or object in a geographical space, represented by the geospatial coordinates \\( l \\in \\mathbb{R}^2 \\), i.e., latitude and longitude. It is often profiled by the corresponding satellite image, street-view image, and descriptions." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.579, + 0.916, + 0.634 + ], + "angle": 0, + "content": "Definition 2 (Trajectory). A trajectory is a sequence of time-ordered locations that describe the movements of an object in the geographical space. It can be formulated as \\(\\mathcal{T} = p_1\\rightarrow p_2\\rightarrow \\dots \\rightarrow p_T\\) where \\(p_i = (l_i,t_i)\\), and \\(l_{i}\\) denotes the object's location at time \\(t_i\\)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.634, + 0.915, + 0.69 + ], + "angle": 0, + "content": "Definition 3 (Event). An event sequence is a series of timestamped events, denoted as \\(\\mathcal{E} = v_{1}\\rightarrow v_{2}\\rightarrow \\dots \\rightarrow v_{T}\\), describing the progress of actions or occurrences, where \\(v_{i} = (e_{i},t_{i})\\) and \\(e_i\\in \\mathbb{R}^d\\) is an event and \\(t_i\\) denotes the time when \\(e_i\\) occurs." + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.692, + 0.892, + 0.876 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.551, + 0.878, + 0.878, + 0.894 + ], + "angle": 0, + "content": "Figure 2: Illustration of various types of ST data." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.448, + 0.088 + ], + "angle": 0, + "content": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey" + }, + { + "type": "header", + "bbox": [ + 0.685, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.482, + 0.162 + ], + "angle": 0, + "content": "Definition 4 (Spatio-Temporal Raster). An ST raster can be denoted as \\(\\mathcal{X} = < \\mathbf{X}_1,\\mathbf{X}_2,\\dots ,\\mathbf{X}_T > \\in \\mathbb{R}^{H\\times W\\times T\\times D}\\), where \\(\\mathbf{X}_t\\in \\mathbb{R}^{H\\times W\\times D}\\) denotes the signals collected from \\(N = HW\\) evenly distributed locations at time \\(t\\), each characterized by \\(D\\) feature attributes." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.163, + 0.482, + 0.248 + ], + "angle": 0, + "content": "Definition 5 (Spatio-Temporal Graph). An ST graph extends the ST raster to be \\( X = < \\mathbf{X}_1, \\mathbf{X}_2, \\ldots, \\mathbf{X}_T > \\in \\mathbb{R}^{N \\times T \\times D} \\) by explicitly incorporating spatial correlations with a graph \\( G_t = (V, E_t, \\mathbf{A}_t) \\) when \\( N \\) locations are not uniformly distributed. Here \\( V \\) is the set of nodes, \\( E_t \\) is the set of edges, and \\( \\mathbf{A}_t \\in \\mathbb{R}^{N \\times N} \\) is the adjacency matrix at time \\( t \\). The size of \\( V \\) is usually static." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.259, + 0.331, + 0.274 + ], + "angle": 0, + "content": "3 The Workflow Perspective" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.277, + 0.483, + 0.306 + ], + "angle": 0, + "content": "As shown in Figure 3, we examine STFMs from a holistic, bottom-up perspective, emphasizing their composition across four key aspects:" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.308, + 0.483, + 0.377 + ], + "angle": 0, + "content": "- ST Data Sensing refers to the acquisition of data that varies over both space and time from diverse resources (e.g., sensors, satellites, social media), to capture dynamic environmental, geographic, or social phenomena. We also consider synthetic data generation for enhancing data diversity and quantity." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.378, + 0.483, + 0.446 + ], + "angle": 0, + "content": "- ST Data Management focuses on storing, indexing, and organizing these large-scale, heterogeneous ST datasets, incorporating strategies like distributed architectures for efficient retrieval and integration. FMs can enhance this process by facilitating data cleaning, query & retrieval, and data integration." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.447, + 0.483, + 0.516 + ], + "angle": 0, + "content": "- ST Data Mining involves learning and analyzing ST data that varies across both space and time to uncover patterns, trends, and relationships, using data mining (DM), deep learning (DL) techniques, or the newly-proposed STFMs with strong capabilities in perception, optimization, and reasoning." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.516, + 0.483, + 0.571 + ], + "angle": 0, + "content": "- Downstream Applications: This stage harnesses the above insights from ST data to drive real-world applications, ranging from numerical problems to inferential problems, where informed actions and policies are formulated." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.308, + 0.483, + 0.571 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.574, + 0.483, + 0.63 + ], + "angle": 0, + "content": "By examining these four aspects, we can better understand how STFMs advance from raw data acquisition to high-level service providing, ultimately enabling more intelligent, adaptable, and impactful solutions. We will detail each stage in the following sections." + }, + { + "type": "image", + "bbox": [ + 0.087, + 0.638, + 0.479, + 0.876 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.099, + 0.879, + 0.466, + 0.892 + ], + "angle": 0, + "content": "Figure 3: The framework of STFMs for ST data science." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.106, + 0.805, + 0.122 + ], + "angle": 0, + "content": "3.1 Spatio-Temporal Data Sensing" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.125, + 0.916, + 0.18 + ], + "angle": 0, + "content": "FMs revolutionize ST data sensing from two complementary aspects: real-world data sensing, which involves collecting data from physical sources, and synthetic data generation, which creates synthetic ST data through foundation models." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.184, + 0.915, + 0.42 + ], + "angle": 0, + "content": "3.1.1 Real-World Data Sensing. Advances in sensing and data acquisition technologies have led to the generation of vast amounts of ST data. FMs are increasingly applied in human-centric active sensing, particularly in the context of citizen reporting for urban and environmental monitoring [41]. These models act as powerful agents for collecting and processing real-time data from citizens, enabling the efficient handling of ST data [19, 27, 101]. For example, citizens might constantly report incidents, environmental changes, or social events through text or voice [178]. By understanding these reports, LLMs can categorize, prioritize, and trigger appropriate responses for various urban issues, from traffic congestion to environmental hazards. This enhances the decision-making process by continuously updating their models with new data streams. Thus, LLMs are not just passive analytical tools but active participants that help make urban environments more responsive and adaptive to citizen inputs, transforming traditional citizen feedback into actionable knowledge, enabling more sustainable and resilient cities." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.42, + 0.915, + 0.6 + ], + "angle": 0, + "content": "FMs can also function as intelligent schedulers or simulate multiagent systems to optimize the recruitment and coordination of participants for crowdsensing, particularly under budget constraints [41, 139, 191]. By analyzing ST data and understanding context, LLMs can identify regions and times where crowdsensing efforts will yield the most valuable information. They dynamically recruit participants based on proximity, availability, and past contributions, reducing redundant data collection. Additionally, LLMs simulate multiple agents interacting in real time, ensuring the efficient distribution of sensing tasks across a network of citizens or devices [190]. This strategic scheduling and agent-based coordination maximize coverage while minimizing costs, ensuring that crowdsensing delivers valuable, real-time insights under budgetary constraints." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.603, + 0.915, + 0.812 + ], + "angle": 0, + "content": "3.1.2 Synthetic Data Generation. FMs can also facilitate data generation, which enhances ST data by increasing its diversity, improving model robustness, and compensating for missing or sparse information [95]. This is crucial for ST tasks like mobility analytics, where collecting real-world data is often costly or raises privacy concerns. For instance, Trajectory-LLM [154] generates vehicle trajectories from brief textual descriptions of vehicle interactions, whereas Traj-LLM [56] generates human trajectories by leveraging personas, memory modules, and routine profiles. LLMob [126] advances mobility data generation, offering flexibility in modeling diverse urban activities and personal mobility patterns, thus improving transportation system modeling and analysis. LLMs have also been employed to construct synthetic environments that replicate real-world conditions across diverse domains, including intelligent transportation [1] and disaster management [31]." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.822, + 0.848, + 0.838 + ], + "angle": 0, + "content": "3.2 Spatio-Temporal Data Management" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.841, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Upon the acquisition of ST data, the challenge of effective management emerges, particularly in addressing data quality issues (e.g., missing values/views) and facilitating data retrieval and integration. Within this context, FMs can be harnessed in the following ways." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.314, + 0.088 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.82, + 0.076, + 0.912, + 0.087 + ], + "angle": 0, + "content": "Yuxuan Liang et al." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.108, + 0.482, + 0.329 + ], + "angle": 0, + "content": "3.2.1 Data Cleaning. Data cleaning is the process of improving data quality by addressing issues such as missing values, low sampling rates, and noise. For example, ST data often exhibit missing values due to various factors like sensor malfunctions and transmission disruptions [178]. Filling in these missing values[113] is crucial for ensuring the integrity of predictive models, optimizing strategies, and facilitating informed decision-making. Recent literature reveals that LLMs can serve as powerful zero-shot [164] or few-shot [17, 172] learners to data imputation by leveraging their ability to identify and learn complex ST patterns. PLMTrajRec [135], utilizing a pretrained language model to recover sparse trajectory data by unifying intervals and inferring road conditions, showing effective generalization across varied sampling intervals in tests. Moreover, scholars have investigated the potential of leveraging LLMs to augment missing views or information, such as urban region profiling [40, 150, 163] and traffic video captioning [25]." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.335, + 0.482, + 0.57 + ], + "angle": 0, + "content": "3.2.2 Query & Retrieval. Meanwhile, LLM can be applied to querying and retrieval to enhance information retrieval accuracy under the ST context. By leveraging their advanced natural language understanding capabilities, LLMs can process user queries in a more contextual and semantically rich manner, enabling precise retrieval of relevant information from structured and unstructured data sources. For instance, UrbanLLM [47] finetunes LLMs for urban activity planning and management, which serves as a problem solver that decodes urban-related queries into several sub-tasks, with each one solved by suitable spatio-temporal AI models. Alamsyah et al. [2] propose an automated smart city planning system that utilizes a personalized LLM with Retrieval Augmented Generation (RAG) [30] to generate tailored urban planning recommendations while ensuring data privacy, where RAG is used to retrieve relevant urban planning documents for context-aware responses. Another line of work [67, 75, 170, 179] utilizes Multimodal LLM for cross-modal information retrieval to enhance urban computing tasks." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.573, + 0.482, + 0.739 + ], + "angle": 0, + "content": "3.2.3 Data Integration. Data integration seeks to combine information from disparate sources, often necessitating the understanding and mapping of relationships between entities in heterogeneous datasets. LLMs are increasingly being employed in this domain, particularly for knowledge graph construction [24], where they automate and enhance the extraction, integration, and reasoning of related data. In the context of ST data, LLMs facilitate data integration by leveraging heterogeneous urban data sources, performing relational triplet extraction, and completing knowledge graphs through geospatial reasoning [94, 106]. A pioneering study UrbanKGent [105] proposes an LLM-based Agent framework to automate the process of urban knowledge graph construction." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.753, + 0.368, + 0.769 + ], + "angle": 0, + "content": "3.3 Spatio-Temporal Data Mining" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.772, + 0.482, + 0.895 + ], + "angle": 0, + "content": "Unlike traditional data mining, which primarily focuses on structured datasets, ST data mining captures intricate spatial and temporal dependencies within ST data using machine learning or deep learning techniques [51, 129, 167]. With the emergence of FMs and LLMs, Spatio-Temporal Foundation Models (STFMs) offer new possibilities by integrating perception, optimization, and reasoning capabilities to enhance ST data mining. In this section, we explore these key capabilities, while their specific applications across different domains are detailed in Sec. 3.4." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.108, + 0.913, + 0.218 + ], + "angle": 0, + "content": "3.3.1 Perception. In STFMs, perception encompasses the ability to effectively model, interpret, and generalize complex spatial and temporal patterns, enabling a deeper understanding of dynamic environments. This capability can be categorized into two key perspectives. The first view pertains to an agent's ability to perceive and understand its surrounding environment, capturing visual or contextual interactions within real-world scenarios such as smart cities [151], indoor activities [152, 153], and mobile Apps [127]." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.219, + 0.913, + 0.508 + ], + "angle": 0, + "content": "The second aspect involves interpreting and extracting ST patterns from sensor data, ensuring accurate predictions across diverse domains. Domain-agnostic approaches, such as STEP [117] and GPT-ST [74], have employed pretraining strategies that leverage historical observations to enhance forecasting performance. In urban computing, models like TFM [130] and OpenCity [72] utilize graph-based FMs to analyze behaviors and interactions within transportation systems, yielding promising results in traffic prediction. In climate science, Pangu [9], trained on 39 years of global climate data, delivers superior deterministic forecasting outcomes across all evaluated variables when compared to leading numerical weather prediction systems. Additional notable examples in this area include the works [60, 76, 104, 108]. Despite these advances, achieving robust generalization remains a critical challenge, as most existing research has been confined to in-domain applications. While models like UniST [158] are designed as one-for-all solutions for diverse ST scenarios, their training datasets and evaluation testbeds are predominantly limited to transportation. Nevertheless, their underlying technique stacks show promise for broader cross-domain and cross-modality generalization. Other significant contributions in this realm include UniFlow [159] and UrbanDiT [160]." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.509, + 0.913, + 0.605 + ], + "angle": 0, + "content": "3.3.2 Optimization. Building upon the perceptual foundations, the optimization ability focuses on refining and adapting models to achieve specific, task-oriented objectives. In other words, models are not only expected to capture rich ST patterns but also to drive actionable decision-making in dynamic, real-world scenarios. This involves integrating advanced optimization strategies that tailor model behavior to the unique demands of applications." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.606, + 0.913, + 0.895 + ], + "angle": 0, + "content": "A prominent approach involves agent-based frameworks. For example, in traffic signal control, traditional methods (e.g., RL) are now augmented by frameworks that use LLMs as decision-making agents [61]. These systems leverage real-time traffic data and expert prompts to enable human-like planning, resulting in more adaptive and interpretable control strategies. Similarly, CityGPT [35] decomposes ST analysis into specialized sub-tasks, handled by temporal, spatial, and fusion agents, to efficiently process IoT data and generate insightful visualizations. AgentMove [28] addresses human mobility prediction by breaking down the task into modules for individual pattern mining, urban structure analysis, and collective behavior extraction. In geo-science, systems like Geode [38] integrate explicit optimization modules with ST data retrieval and machine learning inference to tackle zero-shot geospatial QA with enhanced precision. In urban planning, an innovative work [185] simulates planners and residents by LLM agents and enables their interactions to optimize inclusive land-use plans efficiently. Despite these promising developments, significant challenges remain. Seamlessly integrating perceptual capabilities with targeted optimization strategies is crucial for next-generation ST models that are both versatile and effective across diverse operational contexts." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.447, + 0.088 + ], + "angle": 0, + "content": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey" + }, + { + "type": "header", + "bbox": [ + 0.685, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "table_caption", + "bbox": [ + 0.269, + 0.105, + 0.726, + 0.118 + ], + "angle": 0, + "content": "Table 2: Summary of representative FMs tailored for ST data science." + }, + { + "type": "table", + "bbox": [ + 0.085, + 0.121, + 0.913, + 0.372 + ], + "angle": 0, + "content": "
StageTask & CapabilityExampleMethodCategoryVenueYear
SensingReal-World Data SensingIdentifying Citizen-Related Issues from Social Mediados Santos et al. [27]LLMCAiSE2024
Real-World Data SensingIntelligent Crowdsensing CoordinationAutoWebCrowds [190]LLMICWE2024
Synthetic Data GenerationTrajectories GenerationTrajectory-LLM [154]LLMICLR2025
Synthetic Data GenerationHuman Activity Data GenerationLLMob [126]LLMNeurIPS2024
ManagementData CleaningFew-Shot Learner for Filling Missing ValuesNuwaTS [17]PFMPreprint2024
Data CleaningTrajectory RecoveryPLMTrajRec [135]LLMPreprint2024
Data CleaningAugment Additional Views of DataUrbanCLIP [150]LLMWWW2024
Query & RetrievalAutonomous Query Processor for Urban ManagementUrbanLLM [47]LLMEMNLP2024
Data IntegrationUrban Knowledge Graph ConstructionUrbanKGent [105]LLMNeurIPS2024
MiningPerceptionUnderstand the EnvironmentMagma [152]PFMCVPR2025
PerceptionInterpret and Extract ST PatternsSTEP [117]PFMKDD2022
OptimizationDrive Actionable Decision-Making in Dynamic ScenariosAgentMove [28]LLMPreprint2024
OptimizationOptimize Land-Use Plans by LLM AgentsZhou et al. [185]LLMPreprint2024
ReasoningCommon-sense ReasoningCausal-VidQA [66]PFMCVPR2022
ReasoningNumerical ReasoningUrbanGPT [73]LLMKDD2024
ReasoningCausal ReasoningNuwaDynamics [128]PFMICLR2024
ApplicationForecastingGlobal Weather ForecastingPangu [9]PFMNature2023
ImputationGenerative Adversarial Network for Traffic Data ImputationSTGAN [162]PFMIEEE TBD2022
Anomaly DetectionTransformer-based Anomaly DetectorXu et al. [145]PFMICLR2022
Event AnalysisDetecting and Interpreting EventsLAMP [120]LLMNeurIPS2023
Physical GroundingGeo-localizationGeoGPT [174]LLMJAG2023
Decision MakingTransportation Analytics and ControlTrafficGPT [168]LLMTransport Policy2024
Scenario SimulationSimulation of Human BehaviorPark et al. [107]LLMUIST2023
" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.38, + 0.482, + 0.45 + ], + "angle": 0, + "content": "3.3.3 Reasoning. While current ST models have demonstrated notable success in recognition and agent-based tasks, their reasoning and cognitive capabilities remain underdeveloped compared to advanced systems like DeepSeek-R1 [36]. To progress toward ST general intelligence, we identify three key aspects of reasoning:" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.452, + 0.483, + 0.547 + ], + "angle": 0, + "content": "- Common-sense Reasoning harnesses everyday knowledge and contextual cues to draw implicit inferences from complex data. For instance, Causal-VidQA [66] enables models to infer explanations, predict future states, and generate counterfactual scenarios in video question-answering, while SituatedGen [173] integrates geographical and temporal contexts to generate coherent and contextually plausible statements." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.548, + 0.483, + 0.618 + ], + "angle": 0, + "content": "- Numerical Reasoning involves interpreting and manipulating quantitative information to perform arithmetic operations, assess uncertainties, and discern relationships within ST data; for instance, STBench [69] evaluates these abilities in LLMs, while UrbanGPT [73] enhances ST forecasting with instruction tuning." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.618, + 0.483, + 0.687 + ], + "angle": 0, + "content": "- Causal Reasoning seeks to uncover cause-effect relations within ST data, crucial for robust and interpretable predictions. For example, NuwaDynamics [128] identifies causal regions and applies interventions to improve generalization, and GCIM [176] learns latent causal structures to disentangle spurious correlations." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.452, + 0.483, + 0.687 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.689, + 0.483, + 0.732 + ], + "angle": 0, + "content": "Collectively, these dimensions offer a promising yet underexplored pathway toward achieving ST general intelligence, bridging the gap between pattern recognition and true cognitive understanding." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.74, + 0.34, + 0.755 + ], + "angle": 0, + "content": "3.4 Downstream Applications" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.758, + 0.483, + 0.854 + ], + "angle": 0, + "content": "3.4.1 STFMs for Numerical Problems. ST data is predominately numeric in many real-world scenarios. Addressing these numeric challenges is critical for tasks like forecasting, imputation, and anomaly detection [52], which demand an accurate understanding of the physical world. STFMs excel in these areas by uncovering intricate patterns and dependencies, ultimately enabling more reliable data-driven decision-making." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.855, + 0.483, + 0.897 + ], + "angle": 0, + "content": "- Forecasting. Early forecasting approaches often relied on task-specific neural networks like STGNNs [51, 52, 110, 116], whereas recent developments have shifted toward universal forecasting [91," + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.38, + 0.915, + 0.518 + ], + "angle": 0, + "content": "137, 167]. For instance, GPT-ST [74] leverages pretraining on historical observations to boost predictive performance, while UniST [158] unifies multiple traffic prediction tasks within a single model by coupling sequence modeling with attention-based mechanisms. Building on this progress, ST-LLM [86] and STG-LLM [90] enhance traffic predictions by combining ST inputs with partially frozen large language models, and UrbanGPT [73] extends this paradigm further by employing ST instruction tuning to better align textual and ST data. Similar approaches have also been widely used in other domains, such as ClimaX [104], Geo-Bench [60], and Orca [76]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.518, + 0.915, + 0.698 + ], + "angle": 0, + "content": "- Imputation. This has likewise benefited from techniques that capture ST dependencies to accurately restore missing or corrupted data. For instance, NuwaTS [17] repurposes pretrained language models with contrastive learning and specialized patch embeddings (capturing missing patterns/statistics) to enable cross-domain time series imputation through a unified framework. STD-LLM [44] employs LLMs with spatial-temporal tokenizers and hypergraph learning modules to handle missing values in spatio-temporal data while capturing non-pairwise correlations through topology-aware node embeddings. DrIM [83] combines LLM-derived text representations (from masked tabular data conversions) with contrastive learning to measure similarities for nearest-neighbor imputation in heterogeneous datasets." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.699, + 0.915, + 0.893 + ], + "angle": 0, + "content": "- Anomaly Detection. Anomaly detection in ST data has advanced by leveraging models that learn the normal dynamics of ST systems to identify deviations indicative of abnormal events. Whereas prior methods relied on statistical thresholding and clustering to flag outliers, recent FMs learn robust ST representations to detect even subtle anomalies. For example, early attempts [26, 89, 186] investigate the feasibility of using LLMs for anomaly detection in time series data. SigLLM [3] employs GPT-series with signal-to-text conversion techniques, offering dual pipelines (anomaly prompting and deviation detection) for time series analysis through textual or visual representations of numerical data. AD-LLM [156] introduces a benchmark framework combining GPT-4's zero-shot reasoning with contrastive learning for anomaly context enrichment and automated model selection through chain-of-thought prompting." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.314, + 0.087 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.82, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "Yuxuan Liang et al." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.107, + 0.485, + 0.164 + ], + "angle": 0, + "content": "- Others. Furthermore, FMs have demonstrated great potential in other numerical problems such as time series classification [18], geospatial prediction [39, 100], traffic speed inference [7], and socioeconomic indicator prediction [40, 142, 150]." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.175, + 0.483, + 0.314 + ], + "angle": 0, + "content": "3.4.2 STFMs for Inferential Problems. Inferential problems in ST data require the integration of both reasoning and understanding of environments. These problems involve high-level cognitive tasks where accurate representation of locations, movements, and environmental context is essential. Addressing such problems goes beyond numerical predictions — it necessitates answering critical inferential questions: What happened? Where is it? What to do? What if? FMs have shown their potential to enhance solutions for these challenges by leveraging their capacity to handle ST knowledge and interpret complex, unstructured data." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.321, + 0.483, + 0.461 + ], + "angle": 0, + "content": "\"What happened?\" - Event Analysis. Detecting events aims to recognize and explain significant events in time and space. Traditional models struggle with scalability, interpretability, and incorporating external knowledge. To this end, LAMP [120] integrates LLMs with event models, using abductive reasoning to suggest plausible causes for predicted events, retrieve supporting evidence, and rank predictions for improved accuracy. Meanwhile, LEAP [165] replaces GNNs and RNNs with LLMs by framing event detection as a question-answering task, predicting missing event components and forecasting future relations through self-attention mechanisms." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.467, + 0.483, + 0.897 + ], + "angle": 0, + "content": "\"Where is it?\"- Physical Grounding. Grounding ST models in real-world geographical contexts is essential for various applications such as geo-localization, map reconstruction, intelligent routing and navigation. Geo-localization aims to determine an object's location based on multimodal inputs, including images, text, and sensor data. By processing these cues in conjunction with map data, LLMs such as GPT-4o, DeepSeek [36], and GeoGPT [174] can infer geographic coordinates or identify specific locations described in natural language. Map reconstruction, on the other hand, involves creating or updating digital maps by synthesizing information from satellite imagery, sensor readings, and textual reports. LLMs contribute by interpreting and generating map content, correcting inaccuracies, and filling in missing details. For instance, MapGPT [14] employs language-guided updates, incorporating textual descriptions of environmental changes into existing map structures. In personalized routing, ItiNera [123] combines LLMs with spatial optimization to generate personalized \"Citywalk\" itineraries, providing user-specific and spatially coherent urban exploration; ChinaTravel [115] provides a benchmark for real-world Chinese travel planning, enabling scalable evaluation of constraint satisfaction and preference optimization while highlighting the strengths of neuro-symbolic agents. Navigation systems further benefit from LLMs' ability to understand contextual instructions, interpret user queries, and reason about dynamic environments. For example, NavGPT [182] and NavGPT-v2 [181] integrate natural language with real-time traffic and indoor video data to generate personalized and optimized routing solutions. By incorporating STFMs across these domains, physical grounding models facilitate more precise localization, efficient navigation, and adaptive urban mobility solutions, ultimately bridging the gap between digital intelligence and real-world spatial reasoning." + }, + { + "type": "image", + "bbox": [ + 0.526, + 0.107, + 0.907, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.537, + 0.265, + 0.89, + 0.279 + ], + "angle": 0, + "content": "Figure 4: STFMs for addressing inferential problems." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.285, + 0.916, + 0.632 + ], + "angle": 0, + "content": "\"What to do?\" - Decision Making. Optimizing policies and real-time decision-making in dynamic environments based on inferential insights plays a crucial role in a wide range of applications, including traffic control, autonomous vehicles, and disaster response. In traffic control and management, LLMs improve adaptability and interpretability compared to traditional reinforcement learning approaches [61]. Additionally, they facilitate sim-to-real transfer by modeling real-world traffic dynamics, improving the reliability of traffic signal optimization [22]. Beyond signal control, models like TrafficGPT [168] integrate multimodal traffic data with structured reasoning to analyze, predict, and optimize traffic efficiency and safety in real time. In autonomous vehicles, STFMs contribute to decision-making through both direct and indirect mechanisms. Directly, models such as DDM-Lag [88] employ diffusion-based frameworks with Lagrangian safety enhancements and hybrid policy updates to refine policy articulation and ensure safety. Indirectly, STFMs enhance autonomous driving by predicting realistic driving behaviors [55, 114] and leveraging multi-modal perception to integrate sensor data, bird's eye view maps, and traffic contexts [20, 184], improving situational awareness and vehicle control. Beyond transportation, STFMs play a critical role in disaster management and emergency response by integrating diverse spatio-temporal data sources, such as weather forecasts, remote sensing, and social media signals, to predict disaster impacts and optimize evacuation strategies [16, 31, 65]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.633, + 0.916, + 0.897 + ], + "angle": 0, + "content": "\"What if?\"- Scenario Simulation. STFMs, with their advanced perception and reasoning capabilities, enable the development of STFM-based agents that integrate into Multi-Agent Systems (MAS) to model complex interactions across diverse domains [29]. In urban planning and social simulation, MAS facilitates participatory urban design by simulating interactions between planners and residents. For example, LLM-driven MAS has been used to collaboratively refine land-use plans, leading to improved accessibility and ecological outcomes that surpass human expert solutions [185]. Beyond urban planning, MAS contributes to social science research by modeling human-like behaviors in AI-driven networks. Studies such as [23, 107, 109] demonstrate that LLM-based agents can naturally develop social structures, providing valuable insights into emergent social dynamics. Beyond urban applications, MAS significantly advances game AI and strategic decision-making. Recent studies [112, 133, 187] highlight how MAS-powered reinforcement learning enables strategic gameplay, real-time opponent modeling, and interactive storytelling, fostering the development of more adaptive, intelligent, and realistic virtual agents." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.447, + 0.088 + ], + "angle": 0, + "content": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey" + }, + { + "type": "header", + "bbox": [ + 0.685, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "title", + "bbox": [ + 0.083, + 0.106, + 0.356, + 0.122 + ], + "angle": 0, + "content": "4 The Methodology Perspective" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.125, + 0.482, + 0.208 + ], + "angle": 0, + "content": "As shown in Figure 5, we delve into STFMs from a methodology perspective, focusing on \\( i) \\) LLM-based models, which are widely applied across the entire workflow of \\( ST \\) data science by zero-shot utilization or fine-tuning and \\( ii) \\) PFM-based models, i.e., pretraining FMs from scratch, which is mainly utilized for \\( ST \\) data mining. The comparison between them can be found in Appendix C." + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.216, + 0.476, + 0.486 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.489, + 0.476, + 0.503 + ], + "angle": 0, + "content": "Figure 5: A method-centric taxonomy. Full version: Fig. 7." + }, + { + "type": "title", + "bbox": [ + 0.083, + 0.516, + 0.373, + 0.532 + ], + "angle": 0, + "content": "4.1 Large Language Models (LLM)" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.535, + 0.483, + 0.646 + ], + "angle": 0, + "content": "4.1.1 Zero-shot Learner. LLMs exhibit strong reasoning and contextual understanding capabilities, making them highly effective across various ST tasks, including data sensing, management, and mining. As shown in Appendix B, they can function as augmenters, predictors, or agents. To ease the presentation, we adopt a broad definition of LLMs, encompassing standard LLMs, Vision-Language Models (VLM), and Multimodal LLMs (MLLM). The zero-shot utilization of LLMs can be categorized into two primary classes." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.647, + 0.483, + 0.897 + ], + "angle": 0, + "content": "- Prompt Engineering. When taking LLMs as zero-shot predictors [33, 53, 125] or data augmenters [150] for various tasks, prompt engineering plays an essential role in shaping model outputs. Below, we summarize key aspects for prompt engineering in current research: a) Prompt Construction: A well-designed prompt typically contains key elements like Task Instruction, Tokenization, and Few-shot Examples. Task instruction [53, 147, 149] aims to explicitly guide LLMs to execute specific operations, incorporating domain knowledge [157] if applicable. Tokenization [33, 53] is crucial to aligning ST data formats with LLM input structures. Additionally, presenting a small number of annotated examples [175] facilitates in-context learning, enabling LLMs to better generalize to complex tasks while ensuring output consistency and adherence to the expected format. b) Prompt Learning: [73, 148] Also known as instruction-tuning, this method learns prompts dynamically rather than relying on manually crafted ones. By optimizing prompt structures during training, it provides a flexible and efficient way to adapt LLMs to new tasks without altering their underlying model weights." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.107, + 0.915, + 0.177 + ], + "angle": 0, + "content": "c) Chain-of-Thought (CoT) Prompting: CoT [87, 175] enhances LLMs' reasoning capabilities by guiding them through step-by-step logical progression. This method improves their ability to tackle complex spatio-temporal tasks, ensuring more interpretable, structured, and accurate outputs in decision-making processes." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.179, + 0.915, + 0.47 + ], + "angle": 0, + "content": "- Agentic Engineering. The emergence of LLM-based agents [49, 144, 168, 174, 185] with reasoning, memory and tool-calling capabilities is transforming ST data science, enabling more adaptive and autonomous decision-making. When designing agent-based solutions, existing works primarily consider the following key aspects: a) Role Assignment. [50, 144, 174] clearly specify the responsibilities and functional boundaries of each agent within the system. b) Memorization [64, 174] refers to the agent's capability to store, recall, and leverage past information and context during task execution. A basic approach involves embedding past interactions into prompts, while more advanced techniques like Retrieval-Augmented Generation (RAG) [143, 155] dynamically retrieve relevant information from external knowledge bases, incorporating only the most pertinent content into the prompt. c) Tool Definition [168, 174], which identify and integrate various tools and functionalities that an agent can call upon to solve complex tasks. In ST data science, various expert models like STGNNs [51] can be wrapped as a tool and added into the agent in a plug-and-play manner. d) Multi-Agent System. Deploying multiple specialized agents to work collaboratively (each with distinct roles) enhances the efficiency and robustness of solutions for intricate ST challenges [49, 63, 185]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.478, + 0.915, + 0.547 + ], + "angle": 0, + "content": "4.1.2 Supervised Fine-Tuning for LLMs. Fine-tuning adapts LLMs to ST tasks by adjusting their parameters based on domain-specific datasets, sometimes incorporating additional modalities such as texts [79, 150] and vision [180]. We categorize fine-tuning methods into three approaches based on the extent of parameter updates:" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.55, + 0.913, + 0.618 + ], + "angle": 0, + "content": "- Full Parameter Fine-Tuning [68, 98, 100, 104, 108] updates all model parameters based on downstream ST datasets, achieving maximal adaptation to specific tasks. However, it requires substantial labeled data and high computational resources, making it impractical for many real-world applications." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.62, + 0.913, + 0.701 + ], + "angle": 0, + "content": "- Partial Parameter Fine-tuning. To reduce computational overhead, this method [13, 183] freezes most parameters, such as attention weights, while fine-tuning only a small subset (e.g., position encodings and layer normalization). However, modifying a subset of parameters can disrupt the LLM's learned representations, leading to catastrophic forgetting of general knowledge." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.703, + 0.914, + 0.896 + ], + "angle": 0, + "content": "- Add-on Parameter Fine-Tuning. To mitigate forgetting while maintaining efficiency, this technique [61] introduces trainable low-rank matrices (e.g., LoRA [42]), while keeping the original LLM weights frozen. This strategy preserves pretrained knowledge while enabling efficient adaptation to ST tasks. Besides fine-tuning LLMs' weights, another way is training additional layers for input tokenization or task adaption. For instance, TimeLLM [53] trains a self-attention layer that aligns patched time series representations with pretrained text prototype embeddings. Similarly, Time-VLM [180] trains a memory-enhanced attention to capture both short- and long-term dependencies. For task adaption, existing methods typically train an additional prediction head (e.g., linear layers) to project the LLM's output embeddings into a domain-specific space [53, 180]." + }, + { + "type": "list", + "bbox": [ + 0.516, + 0.55, + 0.914, + 0.896 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.314, + 0.088 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.82, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "Yuxuan Liang et al." + }, + { + "type": "title", + "bbox": [ + 0.083, + 0.106, + 0.428, + 0.121 + ], + "angle": 0, + "content": "4.2 Pretrained Foundation Models (PFM)" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.124, + 0.483, + 0.207 + ], + "angle": 0, + "content": "Unlike LLMs, which build STFMs by directly using or fine-tuning LLMs, PFMs are developed from scratch, independent of existing LLMs. This approach enables domain-specific optimization, allowing models to better capture ST dependencies from cross-domain ST data without constraints imposed by linguistic priors. Following this, we examine PFMs through three key dimensions:" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.212, + 0.483, + 0.255 + ], + "angle": 0, + "content": "4.2.1 Neural Architecture. The architecture of PFMs is a fundamental design choice that directly influences their capabilities, efficiency, and adaptability in ST tasks, which can be categorized into:" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.258, + 0.483, + 0.313 + ], + "angle": 0, + "content": "- Transformer-based PFMs. Transformers have been the predominant architecture choice for building PFMs thanks to its powerful sequential modeling ability introduced by the self-attention mechanism [7, 72, 77, 85, 158]." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.314, + 0.483, + 0.397 + ], + "angle": 0, + "content": "- Diffusion-based PFMs. Diffusion-based models have recently emerged as a powerful approach for ST representation learning [12, 21, 136, 160, 161, 188], particularly in generative and predictive modeling. These models iteratively learn to reverse a stochastic noise process, enabling them to generate high-fidelity spatio-temporal sequences with strong generalization properties." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.397, + 0.483, + 0.452 + ], + "angle": 0, + "content": "- Graph-based PFMs. Unlike sequential models, GNNs excel at representing spatially structured data such as road networks. [62, 130] build FMs based on graph neural networks to learn the complex correlation between different entities in ST applications." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.452, + 0.483, + 0.507 + ], + "angle": 0, + "content": "- Others. Another emerging class of PFMs is State Space Model (SSM)-based models [8, 43, 97], which construct PFMs using structured state-space representations. Meanwhile, several studies utilize CNNs [118] as backbones for developing PFMs." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.258, + 0.483, + 0.507 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.515, + 0.483, + 0.723 + ], + "angle": 0, + "content": "4.2.2 Pretraining Scheme. To enhance generalization ability, PFMs are usually pretrained based on cross-domain datasets [91, 137, 158], enabling them to learn diverse ST patterns across multiple domains. Existing pretraining schemes of PFMs can be classified into three types based on the training objectives: a) Generative Pretraining [85, 98, 130, 138, 189] focuses on reconstructing input data by learning its underlying distribution, enabling the model to generate realistic time series or ST data, while b) Contrastive Pretraining [7, 84, 171] emphasize distinguishing between similar and dissimilar data pairs to learn robust representations by maximizing agreement between augmented views of the same sample. It is particularly effective in multimodal ST learning, aligning heterogeneous data sources such as satellite imagery and its text description. c) Hybrid Pretraining [77] integrates both generative and contrastive objectives, leveraging their complementary strengths." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.727, + 0.483, + 0.769 + ], + "angle": 0, + "content": "4.2.3 Data Modality. ST data manifests in various modalities, each characterized by unique properties (see Section 2), necessitating the development of modality-specific STFMs:" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.771, + 0.483, + 0.897 + ], + "angle": 0, + "content": "- Location. PFMs for location data [7, 40, 71, 124, 138, 150, 171] aim to learn general embedding for geographical entities. For instance, GeoVectors [124] and SpaBERT [71] learn location embeddings based on open-source data such OpenStreetMap, while G2PTL [138] learns from massive logistics delivery data. Notably, there is a noticeable trend that leverages multi-modalities (such as satellite image and text) for comprehensive location embeddings. For example, both UrbanCLIP [150], UrbanVLP [40], and ReFound [142] utilize satellite images for urban region profiling." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.103, + 0.905, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.515, + 0.276, + 0.913, + 0.291 + ], + "angle": 0, + "content": "Figure 6: Representative PFMs for different types of ST data." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.295, + 0.916, + 0.461 + ], + "angle": 0, + "content": "- Trajectory & Event. PFMs for trajectory/event data [21, 84, 85, 103, 121, 189] are designed to learn general sequential patterns from inputs. A pioneering effort in this direction is TrajFM [85], which introduces a trajectory FM capable of supporting both regional and task transferability. Pretrained on vehicle trajectories from multiple cities, TrajFM employs a trajectory-masking and autoregressive recovery mechanism to enhance its learning capabilities. To tackle the limited resources of cross-domain trajectories, UniTraj [189] curates a billion-scale mobility dataset spanning diverse geographic regions to facilitate the advancement of trajectory-based FMs. For event data, MOTOR [121] proposes a time-to-event FM for structured medical records." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.461, + 0.915, + 0.627 + ], + "angle": 0, + "content": "- ST Raster. PFMs for ST raster data [10, 15, 98, 104, 108, 117, 160] organize spatial information in a grid-like format, with a typical applied domain being weather/climate forecasting. For instance, W-MAE [98] trains a mask autoencoder for ST grid forecasting. CimaX [104] develops a general-purpose climate foundation model, pretrained on diverse datasets spanning various variables, ST scales, and physical contexts. Pangu [10] is trained on 39 years of global climate data, which achieves superior forecasting performance compared to leading numerical weather prediction systems. UniST [158] first pretrains the model in various ST raster data via masked pretraining, and then proposes a learnable ST prompt to enhance the model's generalization ability." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.628, + 0.915, + 0.724 + ], + "angle": 0, + "content": "- ST Graph. PFMs for ST graph data [62, 72, 93, 111, 117, 134] learn the ST dependencies from ST graphs that generalize effectively in unseen spatial and temporal contexts. Unlike ST Raster PFMs, there are limited works in this area, which is more challenging due to the complex graph correlation. One representative is OpenCity [72] for ST graph forecasting, which integrates Transformer and GNN to model the ST dependencies in traffic data." + }, + { + "type": "list", + "bbox": [ + 0.516, + 0.295, + 0.916, + 0.724 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.728, + 0.82, + 0.741 + ], + "angle": 0, + "content": "5 Conclusion and Future Directions" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.744, + 0.916, + 0.897 + ], + "angle": 0, + "content": "The rapid advancement of FMs has transformed ST data science, impacting sensing, management, and mining. This survey provides a comprehensive review of FMs for ST data science, identifying key capabilities such as perception, reasoning, and optimization while exploring diverse downstream tasks and datasets. We also establish a systematic taxonomy of methodologies, enhancing understanding of how STFMs model ST data. Despite progress, challenges remain in generalization, interpretability, and efficiency. By consolidating recent advances and outlining future directions (see Appendix A), this survey aims to inspire further innovations, driving the development of scalable and adaptive STFMs for real practice." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.447, + 0.088 + ], + "angle": 0, + "content": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey" + }, + { + "type": "header", + "bbox": [ + 0.685, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.106, + 0.178, + 0.12 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.124, + 0.483, + 0.154 + ], + "angle": 0, + "content": "[1] Oluwanifemi Adebayo Moses Adekanye. 2024. LIm-powered synthetic environments for self-driving scenarios. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 23721-23723." + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.154, + 0.483, + 0.194 + ], + "angle": 0, + "content": "[2] Nurwahyu Alamsyah, Muhamad Amirul Haq, and Chayadi Oktomy Noto Susanto. 2024. Automated Smart City Planning through Personalized Large Language Model with Retrieval Augmented Generation. In 2024 International Conference on Information Technology and Computing (ICITCOM). IEEE, 306-311." + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.195, + 0.482, + 0.224 + ], + "angle": 0, + "content": "[3] Sarah Alnegheimish, Linh Nguyen, Laure Berti-Equille, and Kalyan Veeramacheneni. 2024. Large language models can be zero-shot anomaly detectors for time series? arXiv preprint arXiv:2405.14755 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.225, + 0.482, + 0.265 + ], + "angle": 0, + "content": "[4] Abdul Fatir Ansari, Lorenzo Stella, Caner Turkmen, Xiyuan Zhang, Pedro Mercado, Huibin Shen, Oleksandr Shchur, Syama Sundar Rangapuram, Sebastian Pineda Arango, Shubham Kapoor, et al. 2024. Chronos: Learning the language of time series. arXiv preprint arXiv:2403.07815 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.265, + 0.482, + 0.294 + ], + "angle": 0, + "content": "[5] Growtham Atluri, Anuj Karpatne, and Vipin Kumar. 2018. Spatio-temporal data mining: A survey of problems and methods. ACM Computing Surveys (CSUR) 51, 4 (2018), 1-41." + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.295, + 0.482, + 0.324 + ], + "angle": 0, + "content": "[6] Lei Bai, Lina Yao, Can Li, Xianzhi Wang, and Can Wang. 2020. Adaptive graph convolutional recurrent network for traffic forecasting. In NeurIPS, Vol. 33. 17804-17815." + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.325, + 0.482, + 0.365 + ], + "angle": 0, + "content": "[7] Pasquale Balsebre, Weiming Huang, Gao Cong, and Yi Li. 2024. City foundation models for learning general purpose representations from openstreetmap. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 87-97." + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.365, + 0.482, + 0.395 + ], + "angle": 0, + "content": "[8] Sathya Kamesh Bhethanabhotla, Omar Swelam, Julien Siems, David Salinas, and Frank Hutter. 2024. Mamba4Cast: Efficient Zero-Shot Time Series Forecasting with State Space Models. arXiv preprint arXiv:2410.09385 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.396, + 0.482, + 0.425 + ], + "angle": 0, + "content": "[9] Kaifeng Bi, Lingxi Xie, Hengheng Zhang, Xin Chen, Xiaotao Gu, and Qi Tian. 2023. Accurate medium-range global weather forecasting with 3D neural networks. Nature 619, 7970 (2023), 533-538." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.426, + 0.482, + 0.455 + ], + "angle": 0, + "content": "[10] Kaifeng Bi, Lingxi Xie, Hengheng Zhang, Xin Chen, Xiaotao Gu, and Qi Tian. 2023. Accurate medium-range global weather forecasting with 3D neural networks. Nature 619, 7970 (2023), 533-538." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.456, + 0.482, + 0.496 + ], + "angle": 0, + "content": "[11] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in neural information processing systems 33 (2020), 1877-1901." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.496, + 0.482, + 0.525 + ], + "angle": 0, + "content": "[12] Defu Cao, Wen Ye, and Yan Liu. [n.d.]. TimeDiT: General-purpose Diffusion Transformers for Time Series Foundation Model. In ICML 2024 Workshop on Foundation Models in the Wild." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.526, + 0.482, + 0.555 + ], + "angle": 0, + "content": "[13] Ching Chang, Wen-Chih Peng, and Tien-Fu Chen. 2023. LLM4TS: Two-Stage Fine-Tuning for Time-Series Forecasting with Pre-Trained LLMs. arXiv preprint arXiv:2308.08469 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.556, + 0.482, + 0.586 + ], + "angle": 0, + "content": "[14] Jiaqi Chen, Bingqian Lin, Ran Xu, Zhenhua Chai, Xiaodan Liang, and KwanYee K Wong. 2024. Mapppt: Map-guided prompting with adaptive path planning for vision-and-language navigation. arXiv preprint arXiv:2401.07314 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.587, + 0.482, + 0.627 + ], + "angle": 0, + "content": "[15] Kang Chen, Tao Han, Junchao Gong, Lei Bai, Fenghua Ling, Jing-Jia Luo, Xi Chen, Leiming Ma, Tianning Zhang, Rui Su, et al. 2023. FengWu: Pushing the Skillful Global Medium-range Weather Forecast beyond 10 Days Lead. arXiv preprint arXiv:2304.02948 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.627, + 0.482, + 0.666 + ], + "angle": 0, + "content": "[16] Minze Chen, Zhenxiang Tao, Weitong Tang, Tingxin Qin, Rui Yang, and Chunli Zhu. 2024. Enhancing emergency decision-making with knowledge graphs and large language models. International Journal of Disaster Risk Reduction 113 (2024), 104804." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.667, + 0.482, + 0.697 + ], + "angle": 0, + "content": "[17] Jinguo Cheng, Chunwei Yang, Wanlin Cai, Yuxuan Liang, Qingsong Wen, and Yuankai Wu. 2024. NuwaTS: a Foundation Model Mending Every Incomplete Time Series. arXiv preprint arXiv:2405.15317 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.698, + 0.482, + 0.727 + ], + "angle": 0, + "content": "[18] Mingyue Cheng, Yiheng Chen, Qi Liu, Zhiding Liu, and Yucong Luo. 2024. Advancing time series classification with multimodal language modeling. arXiv preprint arXiv:2403.12371 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.728, + 0.482, + 0.768 + ], + "angle": 0, + "content": "[19] Garima Chhikara, Anurag Sharma, V Gurucharan, Kripabandhu Ghosh, and Abhijnan Chakraborty. 2024. LaMSUM: Amplifying Voices Against Harassment through LLM Guided Extractive Summarization of User Incident Reports. arXiv preprint arXiv:2406.15809 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.769, + 0.482, + 0.817 + ], + "angle": 0, + "content": "[20] Tushar Choudhary, Vikrant Dewangan, Shivam Chandhok, Shubham Priyadarshan, Anushka Jain, Arun K Singh, Siddharth Srivastava, Krishna Murthy Jatavalabhula, and K Madhava Krishna. 2024. Talk2BEV: Language-enhanced Bird's-eye view maps for autonomous driving. In 2024 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 16345-16352." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.818, + 0.482, + 0.857 + ], + "angle": 0, + "content": "[21] Chen Chu, Hengcai Zhang, and Feng Lu. 2023. TrajGDM: A New Trajectory Foundation Model for Simulating Human Mobility. In Proceedings of the 31st ACM International Conference on Advances in Geographic Information Systems. 1-2." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.858, + 0.482, + 0.888 + ], + "angle": 0, + "content": "[22] Longchao Da, Minchiuan Gao, Hao Mei, and Hua Wei. 2023. Lm powered sim-to-real transfer for traffic signal control. arXiv preprint arXiv:2308.14284 (2023)." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.124, + 0.483, + 0.888 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.11, + 0.914, + 0.15 + ], + "angle": 0, + "content": "[23] Gordon Dai, Weijia Zhang, Jinhan Li, Siqi Yang, Srihas Rao, Arthur Caetano, Misha Sra, et al. 2024. Artificial leviathan: Exploring social evolution of lIm agents through the lens of hobbesian social contract theory. arXiv preprint arXiv:2406.14373 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.151, + 0.914, + 0.201 + ], + "angle": 0, + "content": "[24] Zifeng Ding, Heling Cai, Jingpei Wu, Yunpu Ma, Ruotong Liao, Bo Xiong, and Volker Tresp. 2024. zrLLM: Zero-Shot Relational Learning on Temporal Knowledge Graphs with Large Language Models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers). 1877-1895." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.201, + 0.914, + 0.241 + ], + "angle": 0, + "content": "[25] Quang Minh Dinh, Minh Khoi Ho, Anh Quan Dang, and Hung Phong Tran. 2024. Trafficvlm: A controllable visual language model for traffic video captioning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshop. 7134-7143." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.242, + 0.914, + 0.261 + ], + "angle": 0, + "content": "[26] Manqing Dong, Hao Huang, and Longbing Cao. 2024. Can LLMs Serve As Time Series Anomaly Detectors? arXiv preprint arXiv:2408.03475 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.261, + 0.914, + 0.301 + ], + "angle": 0, + "content": "[27] Vitor Gaboardi dos Santos, Guto Leoni Santos, Theo Lynn, and Boualem Benatallah. 2024. Identifying Citizen-Related Issues from Social Media Using LLM-Based Data Augmentation. In International Conference on Advanced Information Systems Engineering. Springer, 531-546." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.301, + 0.914, + 0.33 + ], + "angle": 0, + "content": "[28] Jie Feng, Yuwei Du, Jie Zhao, and Yong Li. 2024. Agentmove: Predicting human mobility anywhere using large language model based agentic framework. arXiv preprint arXiv:2408.13986 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.331, + 0.914, + 0.372 + ], + "angle": 0, + "content": "[29] Chen Gao, Xiaochong Lan, Nian Li, Yuan Yuan, Jingtao Ding, Zhilun Zhou, Fengli Xu, and Yong Li. 2024. Large language models empowered agent-based modeling and simulation: A survey and perspectives. *Humanities and Social Sciences Communications* 11, 1 (2024), 1-24." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.372, + 0.914, + 0.402 + ], + "angle": 0, + "content": "[30] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, and Haofen Wang. 2023. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.402, + 0.914, + 0.432 + ], + "angle": 0, + "content": "[31] Vinicius G Goecks and Nicholas R Waytowich. 2023. Disasterresponsept: Large language models for accelerated plan of action development in disaster response scenarios. arXiv preprint arXiv:2306.17271 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.432, + 0.914, + 0.462 + ], + "angle": 0, + "content": "[32] Adam Goodge, Wee Siong Ng, Bryan Hooi, and See Kiong Ng. 2025. Spatio-Temporal Foundation Models: Vision, Challenges, and Opportunities. arXiv preprint arXiv:2501.09045 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.462, + 0.914, + 0.492 + ], + "angle": 0, + "content": "[33] Nate Gruver, Marc Finzi, Shikai Qiu, and Andrew Gordon Wilson. 2023. Large language models are zero-shot time series forecasters. Advances in neural information processing systems (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.492, + 0.914, + 0.523 + ], + "angle": 0, + "content": "[34] Nate Gruver, Marc Finzi, Shikai Qiu, and Andrew G Wilson. 2024. Large language models are zero-shot time series forecasters. Advances in Neural Information Processing Systems 36 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.523, + 0.914, + 0.553 + ], + "angle": 0, + "content": "[35] Qinghua Guan, Jinhui Ouyang, Di Wu, and Weiren Yu. 2024. CityGPT: Towards Urban IoT Learning, Analysis and Interaction with Multi-Agent System. arXiv preprint arXiv:2405.14691 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.553, + 0.914, + 0.593 + ], + "angle": 0, + "content": "[36] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.593, + 0.914, + 0.623 + ], + "angle": 0, + "content": "[37] Shengnan Guo, Youfang Lin, Ning Feng, Chao Song, and Huaiyu Wan. 2019. Attention based spatial-temporal graph convolutional networks for traffic flow forecasting. In AAAI, Vol. 33: 922-929." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.623, + 0.914, + 0.663 + ], + "angle": 0, + "content": "[38] Devashish Vikas Gupta, Azeez Syed Ali Ishaqui, and Divya Kiran Kadiyala. 2024. Geode: A Zero-shot Geospatial Question-Answering Agent with Explicit Reasoning and Precise Spatio-Temporal Retrieval. arXiv preprint arXiv:2407.11014 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.663, + 0.914, + 0.684 + ], + "angle": 0, + "content": "[39] Wes Gurnee and Max Tegmark. 2023. Language models represent space and time. arXiv preprint arXiv:2310.02207 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.684, + 0.914, + 0.723 + ], + "angle": 0, + "content": "[40] Xixuan Hao, Wei Chen, Yibo Yan, Siru Zhong, Kun Wang, Qingsong Wen, and Yuxuan Liang. 2024. UrbanVLP: A Multi-Granularity Vision-Language Pre-Trained Foundation Model for Urban Indicator Prediction. arXiv preprint arXiv:2403.16831 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.723, + 0.914, + 0.754 + ], + "angle": 0, + "content": "[41] Ce Hou, Fan Zhang, Yong Li, Haifeng Li, Gengchen Mai, Yuhao Kang, Ling Yao, Wenhao Yu, Yao Yao, Song Gao, et al. 2025. Urban sensing in the era of large language models. The Innovation 6, 1 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.755, + 0.914, + 0.784 + ], + "angle": 0, + "content": "[42] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.784, + 0.914, + 0.814 + ], + "angle": 0, + "content": "[43] Jiaxi Hu, Disen Lan, Ziyu Zhou, Qingsong Wen, and Yuxuan Liang. 2024. TimeSSM: Simplifying and Unifying State Space Models for Time Series Forecasting. arXiv preprint arXiv:2405.16312 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.814, + 0.914, + 0.855 + ], + "angle": 0, + "content": "[44] Yiheng Huang, Xiaowei Mao, Shengnan Guo, Yubin Chen, Junfeng Shen, Tiankuo Li, Youfang Lin, and Huaiyu Wan. 2024. STD-PLM: Understanding Both Spatial and Temporal Properties of Spatial-Temporal Data with PLM. arXiv preprint arXiv:2407.09096 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.855, + 0.914, + 0.885 + ], + "angle": 0, + "content": "[45] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. 2024. Gpt-40 system card. arXiv preprint arXiv:2410.21276 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.885, + 0.914, + 0.905 + ], + "angle": 0, + "content": "[46] Junzhong Ji, Fan Yu, and Minglong Lei. 2022. Self-Supervised Spatiotemporal Graph Neural Networks With Self-Distillation for Traffic Prediction. IEEE TITS" + }, + { + "type": "list", + "bbox": [ + 0.523, + 0.11, + 0.914, + 0.905 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.314, + 0.088 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.82, + 0.077, + 0.913, + 0.088 + ], + "angle": 0, + "content": "Yuxuan Liang et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.11, + 0.148, + 0.12 + ], + "angle": 0, + "content": "(2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.121, + 0.482, + 0.151 + ], + "angle": 0, + "content": "[47] Yue Jiang, Qin Chao, Yile Chen, Xiucheng Li, Shuai Liu, and Gao Cong. 2024. UrbanLLM: Autonomous Urban Activity Planning and Management with Large Language Models. arXiv preprint arXiv:2406.12360 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.151, + 0.482, + 0.211 + ], + "angle": 0, + "content": "[48] Yushan Jiang, Zijie Pan, Xikun Zhang, Sahil Garg, Anderson Schneider, Yuriy Nevmyvaka, and Dongjin Song. 2024. Empowering Time Series Analysis with Large Language Models: A Survey. In Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence, IfCAI-24, Kate Larson (Ed.). International Joint Conferences on Artificial Intelligence Organization, 8095-8103. https://doi.org/10.24963/ijcai.2024/895 Survey Track." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.211, + 0.482, + 0.242 + ], + "angle": 0, + "content": "[49] Yushan Jiang, Wenzhao Yu, Geon Lee, Dongjin Song, Kijung Shin, Wei Cheng, Yanchi Liu, and Haifeng Chen. 2026. Explanable Multi-modal Time Series Prediction with LLM-in-the-Loop. arXiv preprint arXiv:2503.01013 (2026)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.242, + 0.482, + 0.282 + ], + "angle": 0, + "content": "[50] WANG JIAWEI, Renhe Jiang, Chuang Yang, Zengqing Wu, Ryosuke Shibasaki, Noboru Koshizuka, Chuan Xiao, et al. 2024. Large language models as urban residents: An llm agent framework for personal mobility generation. Advances in Neural Information Processing Systems 37 (2024), 124547-124574." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.282, + 0.482, + 0.322 + ], + "angle": 0, + "content": "[51] Guangyin Jin, Yuxuan Liang, Yuchen Fang, Zezhi Shao, Jincai Huang, Junbo Zhang, and Yu Zheng. 2023. Spatio-temporal graph neural networks for predictive learning in urban computing: A survey. IEEE Transactions on Knowledge and Data Engineering (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.322, + 0.482, + 0.362 + ], + "angle": 0, + "content": "[52] Ming Jin, Huan Yee Koh, Qingsong Wen, Daniele Zambon, Cesare Alippi, Geoffrey I Webb, Irwin King, and Shirui Pan. 2024. A survey on graph neural networks for time series: Forecasting, classification, imputation, and anomaly detection. IEEE Transactions on Pattern Analysis and Machine Intelligence (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.362, + 0.482, + 0.402 + ], + "angle": 0, + "content": "[53] Ming Jin, Shiyu Wang, Lintao Ma, Zhixuan Chu, James Y Zhang, Xiaoming Shi, Pin-Yu Chen, Yuxuan Liang, Yuan-Fang Li, Shirui Pan, et al. 2023. Time-LLM: Time series forecasting by reprogramming large language models. arXiv preprint arXiv:2310.01728 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.402, + 0.482, + 0.442 + ], + "angle": 0, + "content": "[54] Ming Jin, Qingsong Wen, Yuxuan Liang, Chaoli Zhang, Siqiao Xue, Xue Wang, James Zhang, Yi Wang, Haifeng Chen, Xiaoli Li, et al. 2023. Large models for time series and spatio-temporal data: A survey and outlook. arXiv preprint arXiv:2310.10196 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.443, + 0.482, + 0.483 + ], + "angle": 0, + "content": "[55] Ye Jin, Xiaoxi Shen, Huiling Peng, Xiaohan Liu, Jingli Qin, Jiayang Li, Jintao Xie, Peizhong Gao, Guyue Zhou, and Jiangtao Gong. 2023. Surrealdriver: Designing generative driver agent simulation framework in urban contexts based on large language model. arXiv preprint arXiv:2309.13193 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.483, + 0.482, + 0.513 + ], + "angle": 0, + "content": "[56] Chenlu Ju, Jiaxin Liu, Shobhit Sinha, Hao Xue, and Flora Salim. 2025. TrajLLM: A Modular LLM-Enhanced Agent-Based Framework for Realistic Human Trajectory Simulation. (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.513, + 0.482, + 0.553 + ], + "angle": 0, + "content": "[57] Subbarao Kambhampati, Karthik Valmeekam, Lin Guan, Mudit Verma, Kaya Stechly, Siddhant Bhambri, Lucas Saldyt, and Anil Murthy. 2024. LLMs can't plan, but can help planning in LLM-modulo frameworks. arXiv preprint arXiv:2402.01817 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.553, + 0.482, + 0.583 + ], + "angle": 0, + "content": "[58] Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of naacL-HLT, Vol. 1. Minneapolis, Minnesota." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.583, + 0.482, + 0.623 + ], + "angle": 0, + "content": "[59] Dmitrii Kochkov, Janni Yuval, Ian Langmore, Peter Norgaard, Jamie Smith, Griffin Mooers, Milan Klower, James Lottes, Stephan Rasp, Peter Duben, et al. 2024. Neural general circulation models for weather and climate. Nature 632, 8027 (2024), 1060–1066." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.623, + 0.482, + 0.664 + ], + "angle": 0, + "content": "[60] Alexandre Lacoste, Nils Lehmann, Pau Rodriguez, Evan Sherwin, Hannah Kerner, Björn Lütjens, Jeremy Irvin, David Dao, Hamed Alemohammad, Alexandre Drouin, et al. 2024. Geo-bench: Toward foundation models for earth monitoring. Advances in Neural Information Processing Systems 36 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.664, + 0.482, + 0.694 + ], + "angle": 0, + "content": "[61] Siqi Lai, Zhao Xu, Weijia Zhang, Hao Liu, and Hui Xiong. 2025. Large language models as traffic signal control agents: Capacity and opportunity. In Proceedings of the 31st ACM SIGKDD conference on knowledge discovery and data mining." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.694, + 0.482, + 0.735 + ], + "angle": 0, + "content": "[62] Remi Lam, Alvaro Sanchez-Gonzalez, Matthew Willson, Peter Wirsnsberger, Meire Fortunato, Ferran Alet, Suman Ravuri, Timo Ewalds, Zach Eaton-Rosen, Weihua Hu, et al. 2023. GraphCast: Learning skillful medium-range global weather forecasting. Science 382, 6677 (2023), 1416-1421." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.735, + 0.482, + 0.765 + ], + "angle": 0, + "content": "[63] Geon Lee, Wenchao Yu, Kijung Shin, Wei Cheng, and Haifeng Chen. 2025. TimeCAP: Learning to Contextualize, Augment, and Predict Time Series Events with Large Language Model Agents. arXiv preprint arXiv:2502.11418 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.765, + 0.482, + 0.794 + ], + "angle": 0, + "content": "[64] Mingcong Lei, Yiming Zhao, Ge Wang, Zhixin Mai, Shuguang Cui, Yatong Han, and Jinke Ren. 2025. STMA: A Spatio-Temporal Memory Agent for Long-Horizon Embodied Task Planning. arXiv preprint arXiv:2502.10177 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.795, + 0.482, + 0.825 + ], + "angle": 0, + "content": "[65] Zhenyu Lei, Yushun Dong, Weiyu Li, Rong Ding, Qi Wang, and Jundong Li. 2025. Harnessing Large Language Models for Disaster Management: A Survey. arXiv preprint arXiv:2501.06932 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.825, + 0.482, + 0.865 + ], + "angle": 0, + "content": "[66] Jiangtong Li, Li Niu, and Liqing Zhang. 2022. From representation to reasoning: Towards both evidence and commonsense reasoning for video question-answering. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 21273–21282." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.865, + 0.482, + 0.896 + ], + "angle": 0, + "content": "[67] Jinpeng Li, Haiping Wang, Yuan Liu, Zhiyang Dou, Yuexin Ma, Sibei Yang, Yuan Li, Wenping Wang, Zhen Dong, Bisheng Yang, et al. [n.d.]. CityAnchor: City-scale 3D Visual Grounding with Multi-modality LLMs. In The Thirteenth" + }, + { + "type": "list", + "bbox": [ + 0.092, + 0.11, + 0.482, + 0.896 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.548, + 0.11, + 0.795, + 0.12 + ], + "angle": 0, + "content": "International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.121, + 0.914, + 0.161 + ], + "angle": 0, + "content": "[68] Peibo Li, Maarten de Rijke, Hao Xue, Shuang Ao, Yang Song, and Flora D Salim. 2024. Large language models for next point-of-interest recommendation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1463-1472." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.161, + 0.914, + 0.2 + ], + "angle": 0, + "content": "[69] Wenbin Li, Di Yao, Ruibo Zhao, Wenjie Chen, Zijie Xu, Chengxue Luo, Chang Gong, Quanliang Jing, Haining Tan, and Jingping Bi. 2024. STBench: Assessing the ability of large language models in spatio-temporal analysis. arXiv preprint arXiv:2406.19065 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.201, + 0.914, + 0.231 + ], + "angle": 0, + "content": "[70] Yaguang Li, Rose Yu, Cyrus Shahabi, and Yan Liu. 2017. Diffusion convolutional recurrent neural network: Data-driven traffic forecasting. arXiv preprint arXiv:1707.01926 (2017)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.231, + 0.914, + 0.261 + ], + "angle": 0, + "content": "[71] Zekun Li, Jina Kim, Yao-Yi Chiang, and Muhao Chen. 2022. SpaBERT: A pretrained language model from geographic data for geo-entity representation. arXiv preprint arXiv:2210.12213 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.261, + 0.914, + 0.292 + ], + "angle": 0, + "content": "[72] Zhonghang Li, Long Xia, Lei Shi, Yong Xu, Dawei Yin, and Chao Huang. 2024. Opencity: Open spatio-temporal foundation models for traffic prediction. arXiv preprint arXiv:2408.10269 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.292, + 0.914, + 0.332 + ], + "angle": 0, + "content": "[73] Zhonghang Li, Lianghao Xia, Jiabin Tang, Yong Xu, Lei Shi, Long Xia, Dawei Yin, and Chao Huang. 2024. Urbangpt: Spatio-temporal large language models. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 5351-5362." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.332, + 0.914, + 0.362 + ], + "angle": 0, + "content": "[74] Zhonghang Li, Lianghao Xia, Yong Xu, and Chao Huang. 2024. GPT-ST: generative pre-training of spatio-temporal graph neural networks. Advances in Neural Information Processing Systems 36 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.362, + 0.914, + 0.393 + ], + "angle": 0, + "content": "[75] Zongrong Li, Junhao Xu, Siqin Wang, Yifan Wu, and Haiyang Li. 2024. StreetviewLLM: Extracting Geographic Information Using a Chain-of-Thought Multimodal Large Language Model. arXiv preprint arXiv:2411.14476 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.393, + 0.914, + 0.432 + ], + "angle": 0, + "content": "[76] Zhe Li, Ronghui Xu, Jilin Hu, Zhong Peng, Xi Lu, Chenjuan Guo, and Bin Yang. 2024. Ocean Significant Wave Height Estimation with Spatio-temporally Aware Large Language Models. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 3892-3896." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.433, + 0.914, + 0.462 + ], + "angle": 0, + "content": "[77] Zekun Li, Wenxuan Zhou, Yao-Yi Chiang, and Muhao Chen. 2023. Geolm: Empowering language models for geospatially grounded language understanding. arXiv preprint arXiv:2310.14478 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.462, + 0.914, + 0.492 + ], + "angle": 0, + "content": "[78] Yuxuan Liang, Songyu Ke, Junbo Zhang, Xiwen Yi, and Yu Zheng. 2018. Geom: Multi-level attention networks for geo-sensory time series prediction.. In ICAI, Vol. 2018. 3428-3434." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.492, + 0.914, + 0.523 + ], + "angle": 0, + "content": "[79] Yuebing Liang, Yichao Liu, Xiaohan Wang, and Zhan Zhao. 2023. Exploring large language models for human mobility prediction under public events. arXiv preprint arXiv:2311.17351 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.523, + 0.914, + 0.563 + ], + "angle": 0, + "content": "[80] Yuxuan Liang, Kun Ouyang, Yiwei Wang, Zheyi Pan, Yifang Yin, Hongyang Chen, Junbo Zhang, Yu Zheng, David S Rosenblum, and Roger Zimmermann. 2022. Mixed-Order Relation-Aware Recurrent Neural Networks for Spatio-Temporal Forecasting. IEEE TKDE (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.563, + 0.914, + 0.604 + ], + "angle": 0, + "content": "[81] Yuxuan Liang, Haomin Wen, Yuqi Nie, Yushan Jiang, Ming Jin, Dongjin Song, Shirui Pan, and Qingsong Wen. 2024. Foundation models for time series analysis: A tutorial and survey. In Proceedings of the 30th ACM SIGKDD conference on knowledge discovery and data mining. 6555-6565." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.604, + 0.914, + 0.644 + ], + "angle": 0, + "content": "[82] Yuxuan Liang, Yutong Xia, Songyu Ke, Yiwei Wang, Qingsong Wen, Junbo Zhang, Yu Zheng, and Roger Zimmermann. 2023. Airformer: Predicting nationwide air quality in china with transformers. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 37. 14329-14337." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.644, + 0.914, + 0.673 + ], + "angle": 0, + "content": "[83] Jaesung Lim, Seunghwan An, Gyeongdong Woo, ChangHyun Kim, and Jong-June Jeon. [n.d.]. Context-Driven Missing Data Imputation via Large Language Model. ([n.d.])." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.673, + 0.914, + 0.713 + ], + "angle": 0, + "content": "[84] Yan Lin, Yichen Liu, Zeyu Zhou, Haomin Wen, Erwen Zheng, Shengnan Guo, Youfang Lin, and Huaiyu Wan. 2024. PTraJM: Efficient and Semantic-rich Trajectory Learning with Pretrained Trajectory-Mamba. arXiv preprint arXiv:2408.04916 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.713, + 0.914, + 0.755 + ], + "angle": 0, + "content": "[85] Yan Lin, Tonglong Wei, Zeyu Zhou, Haomin Wen, Jilin Hu, Shengnan Guo, Youfang Lin, and Huaiyu Wan. 2024. TrajFM: A Vehicle Trajectory Foundation Model for Region and Task Transferability. arXiv preprint arXiv:2408.15251 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.755, + 0.914, + 0.785 + ], + "angle": 0, + "content": "[86] Chenxi Liu, Sun Yang, Qianxiong Xu, Zhishuai Li, Cheng Long, Ziyue Li, and Rui Zhao. 2024. Spatial-temporal large language model for traffic prediction. arXiv preprint arXiv:2401.10134 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.785, + 0.914, + 0.825 + ], + "angle": 0, + "content": "[87] Haoxin Liu, Zhiyuan Zhao, Jindong Wang, Harshavardhan Kamarthi, and B Aditya Prakash. 2024. Lstprompt: Large language models as zero-shot time series forecasters by long-short-term prompting. arXiv preprint arXiv:2402.16132 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.825, + 0.914, + 0.865 + ], + "angle": 0, + "content": "[88] Jiaqi Liu, Peng Hang, Xiaocong Zhao, Jianqiang Wang, and Jian Sun. 2024. DDM-lag: A diffusion-based decision-making model for autonomous vehicles with lagrangian safety enhancement. IEEE Transactions on Artificial Intelligence (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.865, + 0.914, + 0.896 + ], + "angle": 0, + "content": "[89] Jun Liu, Chaoyun Zhang, Jiaxu Qian, Minghua Ma, Si Qin, Chetan Bansal, Qingwei Lin, Saravanan Rajmohan, and Dongmei Zhang. 2024. Large language models can deliver accurate and interpretable time series anomaly detection." + }, + { + "type": "list", + "bbox": [ + 0.523, + 0.11, + 0.914, + 0.896 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.447, + 0.088 + ], + "angle": 0, + "content": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey" + }, + { + "type": "header", + "bbox": [ + 0.685, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.11, + 0.295, + 0.12 + ], + "angle": 0, + "content": "arXiv preprint arXiv:2405.15370 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.121, + 0.482, + 0.15 + ], + "angle": 0, + "content": "[90] Lei Liu, Shuo Yu, Runze Wang, Zhenxun Ma, and Yanming Shen. 2024. How can large language models understand spatial-temporal data? arXiv preprint arXiv:2401.14192 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.151, + 0.482, + 0.19 + ], + "angle": 0, + "content": "[91] Xu Liu, Junfeng Hu, Yuan Li, Shizhe Diao, Yuxuan Liang, Bryan Hooi, and Roger Zimmermann. 2024. Unitime: A language-empowered unified model for cross-domain time series forecasting. In Proceedings of the ACM Web Conference 2024. 4095-4106." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.191, + 0.482, + 0.221 + ], + "angle": 0, + "content": "[92] Xu Liu, Yuxuan Liang, Chao Huang, Yu Zheng, Bryan Hooi, and Roger Zimmermann. 2022. When do contrastive learning signals help spatio-temporal graph forecasting? In SIGSPATIAL. 1-12." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.221, + 0.482, + 0.261 + ], + "angle": 0, + "content": "[93] Xu Liu, Juncheng Liu, Gerald Woo, Taha Aksu, Yuxuan Liang, Roger Zimmermann, Chenghao Liu, Silvio Savarese, Caiming Xiong, and Doyen Sahoo. 2024. Moirai-MoE: Empowering Time Series Foundation Models with Sparse Mixture of Experts. arXiv preprint arXiv:2410.10469 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.261, + 0.482, + 0.291 + ], + "angle": 0, + "content": "[94] Yu Liu, Jingtao Ding, Yanjie Fu, and Yong Li. 2023. Urban knowledge graph system. ACM Transactions on Intelligent Systems and Technology 14, 4 (2023), 1-25." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.291, + 0.482, + 0.322 + ], + "angle": 0, + "content": "[95] Lin Long, Rui Wang, Ruixuan Xiao, Junbo Zhao, Xiao Ding, Gang Chen, and Haobo Wang. 2024. On llms-driven synthetic data generation, curation, and evaluation: A survey. arXiv preprint arXiv:2406.15126 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.322, + 0.482, + 0.342 + ], + "angle": 0, + "content": "[96] Qingyue Long, Yuan Yuan, and Yong Li. 2024. A Universal Model for Human Mobility Prediction. arXiv preprint arXiv:2412.15294 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.342, + 0.482, + 0.381 + ], + "angle": 0, + "content": "[97] Haoyu Ma, Yushu Chen, Wenlai Zhao, Jinzhe Yang, Yingsheng Ji, Xinghua Xu, Xiaozhu Liu, Hao Jing, Shengzhuo Liu, and Guangwen Yang. 2024. A Mamba Foundation Model for Time Series Forecasting. arXiv preprint arXiv:2411.02941 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.382, + 0.482, + 0.412 + ], + "angle": 0, + "content": "[98] Xin Man, Chenghong Zhang, Changyu Li, and Jie Shao. 2023. W-MAE: Pretrained weather model with masked autoencoder for multi-variable weather forecasting. arXiv preprint arXiv:2304.08754 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.412, + 0.482, + 0.442 + ], + "angle": 0, + "content": "[99] Rohin Manvi, Samar Khanna, Gengchen Mai, Marshall Burke, David Lobell, and Stefano Ermon. 2023. Geolm: Extracting geospatial knowledge from large language models. arXiv preprint arXiv:2310.06213 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.443, + 0.482, + 0.482 + ], + "angle": 0, + "content": "[100] Rohin Manvi, Samar Khanna, Gengchen Mai, Marshall Burke, David B Lobell, and Stefano Ermon. 2024. GeoLLM: Extracting Geospatial Knowledge from Large Language Models. In The Twelfth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.483, + 0.482, + 0.513 + ], + "angle": 0, + "content": "[101] Justin M Mittelstädt, Julia Maier, Panja Goerke, Frank Zinn, and Michael Hermes. 2024. Large language models can outperform humans in social situational judgments. Scientific Reports 14, 1 (2024), 27449." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.513, + 0.482, + 0.553 + ], + "angle": 0, + "content": "[102] Seungwhan Moon, Andrea Madotto, Zhaojiang Lin, Aparajita Saraf, Amy Bearman, and Babak Damavandi. 2023. IMU2CLIP: Language-grounded Motion Sensor Translation with Multimodal Contrastive Learning. In Findings of the Association for Computational Linguistics: EMNLP 2023. 13246-13253." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.553, + 0.482, + 0.573 + ], + "angle": 0, + "content": "[103] Alameen Najjar. 2023. Towards A Foundation Model For Trajectory Intelligence. In IEEE ICDMW. IEEE, 832-835." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.573, + 0.482, + 0.603 + ], + "angle": 0, + "content": "[104] Tung Nguyen, Johannes Brandstetter, Ashish Kapoor, Jayesh K Gupta, and Aditya Grover. 2023. Climax: A foundation model for weather and climate. International Conference on Machine Learning (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.604, + 0.482, + 0.633 + ], + "angle": 0, + "content": "[105] Yansong Ning and Hao Liu. 2024. UrbanKGent: A Unified Large Language Model Agent Framework for Urban Knowledge Graph Construction. arXiv preprint arXiv:2402.06861 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.634, + 0.482, + 0.664 + ], + "angle": 0, + "content": "[106] Yansong Ning, Hao Liu, Hao Wang, Zhenyu Zeng, and Hui Xiong. 2024. UUKG: unified urban knowledge graph dataset for urban spatiotemporal prediction. Advances in Neural Information Processing Systems 36 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.664, + 0.482, + 0.704 + ], + "angle": 0, + "content": "[107] Joon Sung Park, Joseph O'Brien, Carrie Jun Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. 2023. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology. 1-22." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.704, + 0.482, + 0.754 + ], + "angle": 0, + "content": "[108] Jaideep Pathak, Shashank Subramanian, Peter Harrington, Sanjeev Raja, Ashesh Chattopadhyay, Morteza Mardani, Thorsten Kurth, David Hall, Zongyi Li, Kamyar Azizzadenesheli, et al. 2022. Fourcastnet: A global data-driven high-resolution weather model using adaptive fourier neural operators. arXiv preprint arXiv:2202.11214 (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.755, + 0.482, + 0.784 + ], + "angle": 0, + "content": "[109] Jinghua Piao, Zhihong Lu, Chen Gao, Fengli Xu, Fernando P Santos, Yong Li, and James Evans. 2025. Emergence of human-like polarization among large language model agents. arXiv preprint arXiv:2501.05171 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.785, + 0.482, + 0.824 + ], + "angle": 0, + "content": "[110] Arian Prabowo, Wei Shao, Hao Xue, Piotr Koniusz, and Flora D Salim. 2023. Because every sensor is unique, so is every pair: Handling dynamicity in traffic forecasting. In Proceedings of the 8th ACM/IEEE Conference on Internet of Things Design and Implementation. 93-104." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.825, + 0.482, + 0.855 + ], + "angle": 0, + "content": "[111] Arian Prabowo, Hao Xue, Wei Shao, Piotr Koniusz, and Flora D Salim. 2024. Traffic forecasting on new roads using spatial contrastive pre-training (SCPT). Data Mining and Knowledge Discovery 38, 3 (2024), 913-937." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.855, + 0.482, + 0.903 + ], + "angle": 0, + "content": "[112] Siyuan Qi, Shuo Chen, Yexin Li, Xiangyu Kong, Junqi Wang, Bangcheng Yang, Pring Wong, Yifan Zhong, Xiaoyuan Zhang, Zhaowei Zhang, et al. 2024. CivRealm: A Learning and Reasoning Odyssey in Civilization for Decision-Making Agents. In The Twelfth International Conference on Learning Representations." + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.11, + 0.482, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.11, + 0.913, + 0.15 + ], + "angle": 0, + "content": "[113] Kyle K Qin, Yongli Ren, Wei Shao, Brennan Lake, Filippo Privitera, and Flora D Salim. 2023. Multiple-level point embedding for solving human trajectory imputation with prediction. ACM Transactions on Spatial Algorithms and Systems 9, 2 (2023), 1-22." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.151, + 0.913, + 0.19 + ], + "angle": 0, + "content": "[114] Hao Sha, Yao Mu, Yuxuan Jiang, Li Chen, Chenfeng Xu, Ping Luo, Shengbo Eben Li, Masayoshi Tomizuka, Wei Zhan, and Mingyu Ding. 2023. *Languagempc: Large language models as decision makers for autonomous driving.* arXiv preprint arXiv:2310.03026 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.191, + 0.913, + 0.23 + ], + "angle": 0, + "content": "[115] Jie-Jing Shao, Xiao-Wen Yang, Bo-Wen Zhang, Baizhi Chen, Wen-Da Wei, Lan-Zhe Guo, and Yu-feng Li. 2024. ChinaTravel: A Real-World Benchmark for Language Agents in Chinese Travel Planning. arXiv preprint arXiv:2412.13682 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.231, + 0.913, + 0.271 + ], + "angle": 0, + "content": "[116] Wei Shao, Zhiling Jin, Shuo Wang, Yufan Kang, Xiao Xiao, Hamid Menouar, Zhaofeng Zhang, Junshan Zhang, and Flora Salim. 2022. Long-term spatiotemporal forecasting via dynamic multiple-graph attention. In Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence, JFCAI-22." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.271, + 0.913, + 0.311 + ], + "angle": 0, + "content": "[117] Zezhi Shao, Zhao Zhang, Fei Wang, and Yongjun Xu. 2022. Pre-training enhanced spatial-temporal graph neural network for multivariate time series forecasting. In Proceedings of the 28th ACM SIGKDD conference on knowledge discovery and data mining. 1567-1577." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.312, + 0.913, + 0.352 + ], + "angle": 0, + "content": "[118] Qichao Shentu, Beibu Li, Kai Zhao, Yang Shu, Zhongwen Rao, Lujia Pan, Bin Yang, and Chenjuan Guo. 2024. Towards a General Time Series Anomaly Detector with Adaptive Bottlenecks and Dual Adversarial Decoders. arXiv preprint arXiv:2405.15273 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.352, + 0.913, + 0.392 + ], + "angle": 0, + "content": "[119] Xiaoming Shi, Shiyu Wang, Yuqi Nie, Dianqi Li, Zhou Ye, Qingsong Wen, and Ming Jin. 2025. Time-MoE: Billion-Scale Time Series Foundation Models with Mixture of Experts. In The Thirteenth International Conference on Learning Representations (ICLR)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.392, + 0.913, + 0.432 + ], + "angle": 0, + "content": "[120] Xiaoming Shi, Sqiao Xue, Kangrui Wang, Fan Zhou, James Zhang, Jun Zhou, Chenhao Tan, and Hongyuan Mei. 2023. Language models can improve event prediction by few-shot abductive reasoning. Advances in Neural Information Processing Systems 36 (2023), 29532-29557." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.433, + 0.913, + 0.462 + ], + "angle": 0, + "content": "[121] Ethan Steinberg, Jason Fries, Yizhe Xu, and Nigam Shah. 2023. MOTOR: A Time-To-Event Foundation Model For Structured Medical Records. arXiv preprint arXiv:2301.03150 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.463, + 0.913, + 0.502 + ], + "angle": 0, + "content": "[122] Mingtian Tan, Mike A Merrill, Vinayak Gupta, Tim Althoff, and Thomas Hartvigsen. 2024. Are language models actually useful for time series forecasting?. In The Thirty-eighth Annual Conference on Neural Information Processing Systems." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.503, + 0.913, + 0.553 + ], + "angle": 0, + "content": "[123] Yihong Tang, Zhaokai Wang, Ao Qu, Yihao Yan, Zhaofeng Wu, Dingyi Zhuang, Jushi Kai, Kebing Hou, Xiaotong Guo, Jinhua Zhao, et al. 2024. ITINERA: Integrating Spatial Optimization with Large Language Models for Open-domain Urban Itinerary Planning. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track. 1413-1432." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.553, + 0.913, + 0.593 + ], + "angle": 0, + "content": "[124] Nicolas Tempelmeier, Simon Gottschalk, and Elena Demidova. 2021. GeoVectors: a linked open corpus of OpenStreetMap Embeddings on world scale. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management. 4604-4612." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.593, + 0.913, + 0.633 + ], + "angle": 0, + "content": "[125] Saeid Ashraf Vaghefi, Dominik Stammbach, Veruska Muccione, Julia Bingler, Jingwei Ni, Mathias Kraus, Simon Allen, Chiara Colesanti-Senni, Tobias Wekhof, Tobias Schimanski, et al. 2023. ChatClimate: Grounding conversational AI in climate science. Communications Earth & Environment 4, 1 (2023), 480." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.634, + 0.913, + 0.673 + ], + "angle": 0, + "content": "[126] Jiawei Wang, Renhe Jiang, Chuang Yang, Zengqing Wu, Makoto Onizuka, Ryosuke Shibasaki, Noboru Koshizuka, and Chuan Xiao. 2024. Large language models as urban residents: An llm agent framework for personal mobility generation. Advances in Neural Information Processing Systems (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.673, + 0.913, + 0.704 + ], + "angle": 0, + "content": "[127] Junyang Wang, Haiyang Xu, Jiabo Ye, Ming Yan, Weizhou Shen, Ji Zhang, Fei Huang, and Jitao Sang. 2024. Mobile-agent: Autonomous multi-modal mobile device agent with visual perception. arXiv preprint arXiv:2401.16158 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.704, + 0.913, + 0.744 + ], + "angle": 0, + "content": "[128] Kun Wang, Hao Wu, Yifan Duan, Guibin Zhang, Kai Wang, Xiaojiang Peng, Yu Zheng, Yuxuan Liang, and Yang Wang. 2024. NuwaDynamics: Discovering and Updating in Causal Spatio-Temporal Modeling. In The Twelfth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.744, + 0.913, + 0.765 + ], + "angle": 0, + "content": "[129] Senzhang Wang, Jiannong Cao, and Philip Yu. 2020. Deep learning for spatiotemporal data mining: A survey. IEEE TKDE (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.765, + 0.913, + 0.804 + ], + "angle": 0, + "content": "[130] Xuhong Wang, Ding Wang, Liang Chen, Fei-Yue Wang, and Yilun Lin. 2023. Building transportation foundation model via generative graph transformer. In 2023 IEEE 26th International Conference on Intelligent Transportation Systems (ITSC). IEEE, 6042-6047." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.805, + 0.913, + 0.844 + ], + "angle": 0, + "content": "[131] Yihang Wang, Yuying Qiu, Peng Chen, Kai Zhao, Yang Shu, Zhongwen Rao, Lujia Pan, Bin Yang, and Chenjuan Guo. 2024. ROSE: Register Assisted General Time Series Forecasting with Decomposed Frequency Learning. CoRR abs/2405.17478 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.845, + 0.913, + 0.886 + ], + "angle": 0, + "content": "[132] Yu Wang, Tongya Zheng, Shunyu Liu, Zunlei Feng, Kaixuan Chen, Yunzhi Hao, and Mingli Song. 2024. Spatiotemporal-Augmented Graph Neural Networks for Human Mobility Simulation. IEEE Transactions on Knowledge and Data Engineering (2024)." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.11, + 0.913, + 0.886 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.314, + 0.087 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.819, + 0.077, + 0.913, + 0.088 + ], + "angle": 0, + "content": "Yuxuan Liang et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.109, + 0.482, + 0.16 + ], + "angle": 0, + "content": "[133] Zihao Wang, Shaofei Cai, Guanzhou Chen, Anji Liu, Xiaojian Ma, Yitao Liang, and Team CraftJarvis. 2023. Describe, explain, plan and select: interactive planning with large language models enables open-world multi-task agents. In Proceedings of the 37th International Conference on Neural Information Processing Systems. 34153-34189." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.16, + 0.482, + 0.19 + ], + "angle": 0, + "content": "[134] Zhaonan Wang, Renhe Jiang, Hao Xue, Flora D Salim, Xuan Song, and Ryosuke Shibasaki. 2022. Event-aware multimodal mobility nowcasting. In AAAI, Vol. 36. 4228-4236." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.19, + 0.482, + 0.221 + ], + "angle": 0, + "content": "[135] Tonglong Wei, Yan Lin, Youfang Lin, Shengnan Guo, Jilin Hu, Gao Cong, and Huaiyu Wan. 2024. PTR: A Pre-trained Language Model for Trajectory Recovery. arXiv preprint arXiv:2410.14281 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.221, + 0.482, + 0.252 + ], + "angle": 0, + "content": "[136] Haomin Wen, Youfang Lin, Yutong Xia, Huaiyu Wan, Qingsong Wen, Roger Zimmermann, and Yuxuan Liang. 2023. Diffstg: Probabilistic spatio-temporal graph forecasting with denoising diffusion models. In ACM SIGSPATIAL. 1-12." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.252, + 0.482, + 0.281 + ], + "angle": 0, + "content": "[137] Gerald Woo, Chenghao Liu, Akshit Kumar, Caiming Xiong, Silvio Savarese, and Doyen Sahoo. 2024. Unified training of universal time series forecasting transformers. (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.281, + 0.482, + 0.322 + ], + "angle": 0, + "content": "[138] Lixia Wu, Jianlin Liu, Junhong Lou, Minhui Deng, Jianbin Zheng, Haomin Wen, Chao Song, and Shu He. 2024. G2PTL: A Geography-Graph Pre-trained Model. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 4991-4999." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.322, + 0.482, + 0.362 + ], + "angle": 0, + "content": "[139] Wansen Wu, Weiyi Yang, Juanjuan Li, Yong Zhao, Zhengqiu Zhu, Bin Chen, Sihang Qiu, Yong Peng, and Fei-Yue Wang. 2024. Autonomous crowdsensing: operating and organizing crowdsensing for sensing automation. IEEE Transactions on Intelligent Vehicles (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.362, + 0.482, + 0.392 + ], + "angle": 0, + "content": "[140] Zonghan Wu, Shirui Pan, Guodong Long, Jing Jiang, Xiaojun Chang, and Chengqi Zhang. 2020. Connecting the dots: Multivariate time series forecasting with graph neural networks. In SIGKDD. 753-763." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.392, + 0.482, + 0.422 + ], + "angle": 0, + "content": "[141] Zonghan Wu, Shirui Pan, Guodong Long, Jing Jiang, and Chengqi Zhang. 2019. Graph wavenet for deep spatial-temporal graph modeling. arXiv preprint arXiv:1906.00121 (2019)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.422, + 0.482, + 0.463 + ], + "angle": 0, + "content": "[142] Congxi Xiao, Jingbo Zhou, Yixiong Xiao, Jizhou Huang, and Hui Xiong. 2024. ReFound: Crafting a Foundation Model for Urban Region Understanding upon Language and Visual Foundations. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 3527-3538." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.463, + 0.482, + 0.502 + ], + "angle": 0, + "content": "[143] Mengxi Xiao, Zihao Jiang, Lingfei Qian, Zhengyu Chen, Yueru He, Yijing Xu, Yuecheng Jiang, Dong Li, Ruey-Ling Weng, Min Peng, et al. 2025. Retrievalaugmented Large Language Models for Financial Time Series Forecasting. arXiv preprint arXiv:2502.05878 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.503, + 0.482, + 0.533 + ], + "angle": 0, + "content": "[144] Fengli Xu, Jun Zhang, Chen Gao, Jie Feng, and Yong Li. 2023. Urban generative intelligence (ugi): A foundational platform for agents in embodied city environment. arXiv preprint arXiv:2312.11813 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.533, + 0.482, + 0.563 + ], + "angle": 0, + "content": "[145] Jiehui Xu, Haixu Wu, Jianmin Wang, and Mingsheng Long. 2022. Anomaly Transformer: Time Series Anomaly Detection with Association Discrepancy. In International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.563, + 0.482, + 0.593 + ], + "angle": 0, + "content": "[146] Mingxing Xu, Wenrui Dai, Chunmiao Liu, Xing Gao, Weiyao Lin, Guo-Jun Qi, and Hongkai Xiong. 2020. Spatial-temporal transformer networks for traffic flow forecasting. arXiv preprint arXiv:2001.02908 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.593, + 0.482, + 0.623 + ], + "angle": 0, + "content": "[147] Hao Xue and Flora D Salim. 2023. Promptcast: A new prompt-based learning paradigm for time series forecasting. IEEE Transactions on Knowledge and Data Engineering 36, 11 (2023), 6851-6864." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.623, + 0.482, + 0.654 + ], + "angle": 0, + "content": "[148] Hao Xue, Tianye Tang, Ali Payani, and Flora D Salim. 2024. Prompt Mining for Language Models-based Mobility Flow Forecasting. In Proceedings of the 32nd ACM International Conference on Advances in Geographic Information Systems." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.654, + 0.482, + 0.684 + ], + "angle": 0, + "content": "[149] Hao Xue, Bhanu Prakash Voutharoja, and Flora D Salim. 2022. Leveraging language foundation models for human mobility forecasting. In Proceedings of the 30th International Conference on Advances in Geographic Information Systems." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.684, + 0.482, + 0.723 + ], + "angle": 0, + "content": "[150] Yibo Yan, Haomin Wen, Siru Zhong, Wei Chen, Haodong Chen, Qingsong Wen, Roger Zimmermann, and Yuxuan Liang. 2024. Urbanclip: Learning text-enhanced urban region profiling with contrastive language-image pretraining from the web. In Proceedings of the ACM on Web Conference 2024. 4006-4017." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.723, + 0.482, + 0.754 + ], + "angle": 0, + "content": "[151] Yuwei Yan, Qingbin Zeng, Zhiheng Zheng, Jingzhe Yuan, Jie Feng, Jun Zhang, Fengli Xu, and Yong Li. 2024. OpenCity: A Scalable Platform to Simulate Urban Activities with Massive LLM Agents. arXiv preprint arXiv:2410.21286 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.754, + 0.482, + 0.784 + ], + "angle": 0, + "content": "[152] Jianwei Yang, Reuben Tan, Qianhui Wu, Ruijie Zheng, Baolin Peng, Yongyuan Liang, Yu Gu, Mu Cai, Seonghyeon Ye, Joel Jang, et al. 2025. Magma: A Foundation Model for Multimodal AI Agents. arXiv preprint arXiv:2502.13130 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.784, + 0.482, + 0.814 + ], + "angle": 0, + "content": "[153] Jihan Yang, Shusheng Yang, Anjali W Gupta, Rilyn Han, Li Fei-Fei, and Saining Xie. 2024. Thinking in space: How multimodal large language models see, remember, and recall spaces. arXiv preprint arXiv:2412.14171 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.814, + 0.482, + 0.855 + ], + "angle": 0, + "content": "[154] Kairui Yang, Zihao Guo, Gengjie Lin, Haotian Dong, Zhao Huang, Yipeng Wu, Die Zuo, Jibin Peng, Ziyuan Zhong, Xin WANG, Qing Guo, Xiaosong Jia, Junchi Yan, and Di Lin. 2025. Trajectory-LLM: A Language-based Data Generator for Trajectory Prediction in Autonomous Driving. In ICLR." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.855, + 0.482, + 0.884 + ], + "angle": 0, + "content": "[155] Silin Yang, Dong Wang, Haoqi Zheng, and Ruochun Jin. 2024. TimeRAG: BOOSTING LLM Time Series Forecasting via Retrieval-Augmented Generation. arXiv preprint arXiv:2412.16643 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.884, + 0.482, + 0.904 + ], + "angle": 0, + "content": "[156] Tiankai Yang, Yi Nian, Shawn Li, Ruiyao Xu, Yuangang Li, Jiaqi Li, Zhuo Xiao, Xiyang Hu, Ryan Rossi, Kaize Ding, et al. 2024. Ad-llm: Benchmarking large" + }, + { + "type": "list", + "bbox": [ + 0.086, + 0.109, + 0.482, + 0.904 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.109, + 0.91, + 0.12 + ], + "angle": 0, + "content": "language models for anomaly detection. arXiv preprint arXiv:2412.11142 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.12, + 0.913, + 0.15 + ], + "angle": 0, + "content": "[157] Xinli Yu, Zheng Chen, Yuan Ling, Shujing Dong, Zongyi Liu, and Yanbin Lu. 2023. Temporal data meets LLM-explainable financial time series forecasting. arXiv preprint arXiv:2306.11025 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.15, + 0.913, + 0.19 + ], + "angle": 0, + "content": "[158] Yuan Yuan, Jingtao Ding, Jie Feng, Depeng Jin, and Yong Li. 2024. Unist: A prompt-empowered universal model for urban spatio-temporal prediction. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4095-4106." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.19, + 0.913, + 0.221 + ], + "angle": 0, + "content": "[159] Yuan Yuan, Jingtao Ding, Chonghua Han, Depeng Jin, and Yong Li. 2024. A Foundation Model for Unified Urban Spatio-Temporal Flow Prediction. arXiv preprint arXiv:2411.12972 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.221, + 0.913, + 0.251 + ], + "angle": 0, + "content": "[160] Yuan Yuan, Chonghua Han, Jingtao Ding, Depeng Jin, and Yong Li. 2024. Urbanfit: A foundation model for open-world urban spatio-temporal learning. arXiv preprint arXiv:2411.12164 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.251, + 0.913, + 0.281 + ], + "angle": 0, + "content": "[161] Yuan Yuan, Chenyang Shao, Jingtao Ding, Depeng Jin, and Yong Li. 2024. Spatiotemporal few-shot learning via diffusive neural network generation. In The Twelfth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.281, + 0.913, + 0.312 + ], + "angle": 0, + "content": "[162] Ye Yuan, Yong Zhang, Boyue Wang, Yuan Peng, Yongli Hu, and Baocai Yin. 2022. STGAN: Spatio-temporal generative adversarial network for traffic data imputation. IEEE Transactions on Big Data 9, 1 (2022), 200-211." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.312, + 0.913, + 0.342 + ], + "angle": 0, + "content": "[163] Zhenghang Yuan, Zhitong Xiong, Lichao Mou, and Xiao Xiang Zhu. 2024. Chatearthnet: A global-scale, high-quality image-text dataset for remote sensing. arXiv preprint arXiv:2402.11325 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.342, + 0.913, + 0.372 + ], + "angle": 0, + "content": "[164] Kunpeng Zhang, Feng Zhou, Lan Wu, Na Xie, and Zhengbing He. 2024. Semantic understanding and prompt engineering for large-scale traffic data imputation. Information Fusion 102 (2024), 102038." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.372, + 0.913, + 0.392 + ], + "angle": 0, + "content": "[165] Libo Zhang and Yue Ning. 2024. Large Language Models as Event Forecasters. arXiv preprint arXiv:2406.10492 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.392, + 0.913, + 0.422 + ], + "angle": 0, + "content": "[166] Qianru Zhang, Xubin Ren, Lianghao Xia, Siu Ming Yiu, and Chao Huang. 2024. Spatio-Temporal Graph Learning With Large Language Model. https://openreview.net/forum?id=QUKcfq6GX" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.422, + 0.913, + 0.462 + ], + "angle": 0, + "content": "[167] Qianru Zhang, Haixin Wang, Cheng Long, Liangcai Su, Xingwei He, Jianlong Chang, Tailin Wu, Hongzhi Yin, Siu-Ming Yiu, Qi Tian, et al. 2024. A Survey of Generative Techniques for Spatial-Temporal Data Mining. arXiv preprint arXiv:2405.09592 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.462, + 0.913, + 0.492 + ], + "angle": 0, + "content": "[168] Siyao Zhang, Daocheng Fu, Wenzhe Liang, Zhao Zhang, Bin Yu, Pinlong Cai, and Baozhen Yao. 2024. Trafficcpt: Viewing, processing and interacting with traffic foundation models. Transport Policy 150 (2024), 95-105." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.492, + 0.913, + 0.523 + ], + "angle": 0, + "content": "[169] Weijia Zhang, Jindong Han, Zhao Xu, Hang Ni, Hao Liu, and Hui Xiong. 2024. Urban Foundation Models: A Survey. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 6633-6643." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.523, + 0.913, + 0.553 + ], + "angle": 0, + "content": "[170] Xin Zhang, Tianjian Ouyang, Yu Shang, Qingmin Liao, and Yong Li. [n.d.]. UrbanMLLM: Joint Learning of Cross-view Imagery for Urban Understanding. ([n.d.])." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.553, + 0.913, + 0.593 + ], + "angle": 0, + "content": "[171] Yu Zhang, Weiming Huang, Yao Yao, Song Gao, Lizhen Cui, and Zhongmin Yan. 2024. Urban region representation learning with human trajectories: a multiview approach incorporating transition, spatial, and temporal perspectives. GIScience & Remote Sensing 61, 1 (2024), 2387392." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.593, + 0.913, + 0.623 + ], + "angle": 0, + "content": "[172] Yimei Zhang, Xiangjie Kong, Wenfeng Zhou, Jin Liu, Yanjie Fu, and Guojiang Shen. 2024. A comprehensive survey on traffic missing data imputation. IEEE Transactions on Intelligent Transportation Systems (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.623, + 0.913, + 0.654 + ], + "angle": 0, + "content": "[173] Yunxiang Zhang and Xiaojun Wan. 2024. SITUATEDGEN: incorporating geographical and temporal contexts into generative commonsense reasoning. Advances in Neural Information Processing Systems 36 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.654, + 0.913, + 0.684 + ], + "angle": 0, + "content": "[174] Yifan Zhang, Cheng Wei, Shangyou Wu, Zhengting He, and Wenhao Yu. 2023. GeoGPT: understanding and processing geospatial tasks through an autonomous GPT. arXiv preprint arXiv:2307.07930 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.684, + 0.913, + 0.724 + ], + "angle": 0, + "content": "[175] Zeyang Zhang, Xin Wang, Ziwei Zhang, Haoyang Li, Yijian Qin, and Wenwu Zhu. 2024. LLM4DyG: can large language models solve spatial-temporal problems on dynamic graphs? In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4350-4361." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.724, + 0.913, + 0.765 + ], + "angle": 0, + "content": "[176] Yu Zhao, Pan Deng, Junting Liu, Xiaofeng Jia, and Jianwei Zhang. 2023. Generative Causal Interpretation Model for Spatio-Temporal Representation Learning. In Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 3537-3548." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.764, + 0.913, + 0.794 + ], + "angle": 0, + "content": "[177] Chuanpan Zheng, Xiaoliang Fan, Cheng Wang, and Jianzhong Qi. 2020. Gman: A graph multi-attention network for traffic prediction. In AAAI, Vol. 34. 1234–1241." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.794, + 0.913, + 0.815 + ], + "angle": 0, + "content": "[178] Yu Zheng, Licia Capra, Ouri Wolfson, and Hai Yang. 2014. Urban computing: concepts, methodologies, and applications. ACM TIST 5, 3 (2014), 1-55." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.815, + 0.913, + 0.855 + ], + "angle": 0, + "content": "[179] Siru Zhong, Xixuan Hao, Yibo Yan, Ying Zhang, Yangqiu Song, and Yuxuan Liang. 2024. Urbancross: Enhancing satellite image-text retrieval with cross-domain adaptation. In Proceedings of the 32nd ACM International Conference on Multimedia. 6307-6315." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.855, + 0.913, + 0.886 + ], + "angle": 0, + "content": "[180] Siru Zhong, Weilin Ruan, Ming Jin, Huan Li, Qingsong Wen, and Yuxuan Liang. 2025. Time-VLM: Exploring Multimodal Vision-Language Models for Augmented Time Series Forecasting. arXiv preprint arXiv:2502.04395 (2025)." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.109, + 0.913, + 0.886 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.447, + 0.088 + ], + "angle": 0, + "content": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey" + }, + { + "type": "header", + "bbox": [ + 0.685, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.109, + 0.484, + 0.14 + ], + "angle": 0, + "content": "[181] Gengze Zhou, Yicong Hong, Zun Wang, Xin Eric Wang, and Qi Wu. 2024. Navigpt-2: Unleashing navigational reasoning capability for large vision-language models. In European Conference on Computer Vision. Springer, 260-278." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.141, + 0.484, + 0.171 + ], + "angle": 0, + "content": "[182] Gengze Zhou, Yicong Hong, and Qi Wu. 2024. Navigpt: Explicit reasoning in vision-and-language navigation with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 7641-7649." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.171, + 0.483, + 0.201 + ], + "angle": 0, + "content": "[183] Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, and Rong Jin. 2023. One Fits All: Power General Time Series Analysis by Pretrained LM. Advances in Neural Information Processing Systems (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.201, + 0.482, + 0.231 + ], + "angle": 0, + "content": "[184] Xingcheng Zhou, Mingyu Liu, Bare Luka Zagar, Ekim Yurtsever, and Alois C Knoll. 2023. Vision language models in autonomous driving and intelligent transportation systems. arXiv preprint arXiv:2310.14414 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.231, + 0.482, + 0.251 + ], + "angle": 0, + "content": "[185] Zhilun Zhou, Yuming Lin, Depeng Jin, and Yong Li. 2024. Large language model for participatory urban planning. arXiv preprint arXiv:2402.17161 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.251, + 0.482, + 0.271 + ], + "angle": 0, + "content": "[186] Zihao Zhou and Rose Yu. 2024. Can LLMs Understand Time Series Anomalies? arXiv preprint arXiv:2410.05440 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.271, + 0.482, + 0.322 + ], + "angle": 0, + "content": "[187] Xizhou Zhu, Yuntao Chen, Hao Tian, Chenxin Tao, Weijie Su, Chenyu Yang, Gao Huang, Bin Li, Lewei Lu, Xiaogang Wang, et al. 2023. Ghost in the minecraft: Generally capable agents for open-world environments via large language models with text-based knowledge and memory. arXiv preprint arXiv:2305.17144 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.322, + 0.482, + 0.372 + ], + "angle": 0, + "content": "[188] Yuanshao Zhu, James Jianqiao Yu, Xiangyu Zhao, Qidong Liu, Yongchao Ye, Wei Chen, Zijian Zhang, Xuetao Wei, and Yuxuan Liang. 2024. Controllraj: Controllable trajectory generation with topology-constrained diffusion model. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4676-4687." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.372, + 0.482, + 0.402 + ], + "angle": 0, + "content": "[189] Yuanshao Zhu, James Jianqiao Yu, Xiangyu Zhao, Xuetao Wei, and Yuxuan Liang. 2024. UniTraj: Universal Human Trajectory Modeling from Billion-Scale Worldwide Traces. arXiv preprint arXiv:2411.03859 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.402, + 0.482, + 0.432 + ], + "angle": 0, + "content": "[190] Zhengqiu Zhu, Yatai Ji, Sihang Qiu, Yong Zhao, Kai Xu, Rusheng Ju, and Bin Chen. 2024. A Prototype Design of LLM-Based Autonomous Web Crowdsensing. In International Conference on Web Engineering. Springer, 406-409." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.432, + 0.482, + 0.472 + ], + "angle": 0, + "content": "[191] Zhengqiu Zhu, Yong Zhao, Bin Chen, Sihang Qiu, Kai Xu, Quanjun Yin, Jincai Huang, Zhong Liu, and Fei-Yue Wang. 2024. Conversational Crowdsensing: A Parallel Intelligence Powered Novel Sensing Approach. arXiv preprint arXiv:2402.06654 (2024)." + }, + { + "type": "list", + "bbox": [ + 0.085, + 0.109, + 0.484, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.485, + 0.424, + 0.502 + ], + "angle": 0, + "content": "A Limitations and Future Opportunities" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.504, + 0.482, + 0.546 + ], + "angle": 0, + "content": "We further discuss the potential limitations of current research and identify several key future directions aimed at advancing the development of more powerful, transparent, and reliable STFMs:" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.548, + 0.483, + 0.7 + ], + "angle": 0, + "content": "- The curse of accuracy against interpretability. We have identified a significant challenge in developing FMs for addressing numerical problems in ST data science. Directly leveraging LLMs for numerical tasks such as forecasting proves to be non-trivial [34]. Meanwhile, fine-tuning LLMs or training STFMs from scratch using large-scale, cross-domain ST data often comes at the cost of interactive capabilities, thereby hindering interpretability in the prediction outcomes. These limitations motivate us to explore a novel paradigm that not only retains strong numerical reasoning abilities but also enhances interpretability, bridging the gap between predictive accuracy and explanatory insight." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.7, + 0.483, + 0.852 + ], + "angle": 0, + "content": "- Large foundation models are all we need? While the extensive parameterization of FMs enables impressive generalization capabilities, particularly in zero/few-shot settings, their superiority over smaller expert models remains context-dependent. In ST domains such as time series analysis [122] and urban planning [57], smaller expert models often outperform FMs when provided with sufficient domain-specific training data. This raises fundamental questions about the trade-offs between model scalability, efficiency, and task-specific optimization. Future research should delve into hybrid approaches that combine the adaptability of large models with the precision of expert models." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.852, + 0.482, + 0.894 + ], + "angle": 0, + "content": "- One-fit-all FMs across the full workflow. While current FMs are typically designed to support only specific stages of ST data science, we envision a more unified FM capable of seamlessly" + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.548, + 0.483, + 0.894 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.107, + 0.916, + 0.177 + ], + "angle": 0, + "content": "spanning the entire workflow, from initial data sensing and management to mining and supporting downstream applications. Achieving this goal will likely require the development of advanced LLM agents that can function as full-stack engineers (i.e., strongly benefiting all stages) for ST data science." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.177, + 0.916, + 0.302 + ], + "angle": 0, + "content": "- Integrating STFMs with multimodal understanding. While current STFMs excel in processing structured ST data, their ability to integrate and reason over multimodal information, including text, images, video, and sensor data, remains underdeveloped. Many tasks require models to jointly interpret geospatial context, temporal dynamics, and text descriptions. Future research can focus on designing multimodal STFMs that effectively align, fuse, and reason over heterogeneous data sources, enabling more context-aware and human-interpretable decision-making." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.313, + 0.791, + 0.327 + ], + "angle": 0, + "content": "B Zero-shot Utilization of LLMs" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.331, + 0.912, + 0.345 + ], + "angle": 0, + "content": "There are three ways of directly using LLMs for various ST tasks:" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.348, + 0.915, + 0.445 + ], + "angle": 0, + "content": "- LLM-as-Augmenter. Pretrained LLMs can enhance both data understanding and model performance. On the one hand, it can serve as the input augmenter, which enhances data interoperability or provides external information [40, 79] (e.g., textual or visual). On the other hand, LLMs can serve as a parameter-frozen model component [102, 150, 166], thus augmenting domain models by injecting the pretrained external knowledge in LLMs." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.445, + 0.915, + 0.514 + ], + "angle": 0, + "content": "- LLM-as-Predictor. LLMs can be directly employed as predictors [33, 53, 73, 125] for various tasks. Due to the modality gap between text and ST data, preprocessing is required to fit the input spaces of LLMs. Such step typically contains prompt engineering [73, 125, 147-149] or patch & tokenization [53]." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.514, + 0.915, + 0.624 + ], + "angle": 0, + "content": "- LLM-as-Agent. LLM-based agents are typically equipped with the ability to memorize and call various tools. When applied to ST data science, various domain-expert models can be wrapped as a tool and added into the agent in a plug-and-play manner [144, 168, 174]. As such, LLM serves as a router to access different models with both flexibility and performance guarantees. Furthermore, multi-agent systems [185] can be built to solve more complex tasks in the ST domain." + }, + { + "type": "list", + "bbox": [ + 0.516, + 0.348, + 0.915, + 0.624 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.636, + 0.855, + 0.652 + ], + "angle": 0, + "content": "C Comparison between LLMs and PFMs" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.655, + 0.915, + 0.738 + ], + "angle": 0, + "content": "Table 3 demonstrates the comparison between LLMs and PFMs on their capabilities, including perception, optimization, and reasoning. For example, PFMs excel in exceptional numerical reasoning abilities, yet they often struggle with common-sense understanding. There is still no free lunch, and the user can choose either LLMs or PFMs according to the downstream applications." + }, + { + "type": "table_caption", + "bbox": [ + 0.515, + 0.751, + 0.915, + 0.779 + ], + "angle": 0, + "content": "Table 3: A capability comparison between LLMs and PFMs for ST data science." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.793, + 0.908, + 0.891 + ], + "angle": 0, + "content": "
CapabilitiesLarge Language Models (LLMs)Pretrained Foundation Models (PFMs)
Perception▲ Limited native ST perception; can be enhanced via fine-tuning✓ Strong ST perception, integrating sensor data and domain-specific learning
Optimization✓ Agent-based reasoning for decision-making; relies on prompting and heuristics▲ Limited; lacks decision-making ability for control and planning
Common-sense Reasoning✓ Strong via pretraining on vast textual data; can be enhanced with fine-tuning▲ Limited; relies on structured ST data rather than broad world knowledge
Numerical Reasoning▲ Handles arithmetic but struggles with structured ST computations✓ Designed for numerical problems, e.g., forecasting, anomaly detection
Causal Reasoning▲ Can infer causal relations from text but lacks structured ST modeling✓ Built-in graph-based and ST causal modeling
" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.314, + 0.088 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.82, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "Yuxuan Liang et al." + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.104, + 0.913, + 0.479 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.311, + 0.493, + 0.687, + 0.508 + ], + "angle": 0, + "content": "Figure 7: Taxonomy from the methodology perspective." + } + ] +] \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13502/db12ade8-3943-4647-bb0d-ce8160710750_origin.pdf b/data/2025/2503_13xxx/2503.13502/db12ade8-3943-4647-bb0d-ce8160710750_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8a9279f518b89ea7231843d80dba89fe9abd3f55 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13502/db12ade8-3943-4647-bb0d-ce8160710750_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:921bec8beb6829b7708e9c18780360a634a628e54bf7c721c7c3d8e09a19da6d +size 3508704 diff --git a/data/2025/2503_13xxx/2503.13502/full.md b/data/2025/2503_13xxx/2503.13502/full.md new file mode 100644 index 0000000000000000000000000000000000000000..11e9de6545465a37851496e82bbcaeb79d20df51 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13502/full.md @@ -0,0 +1,442 @@ +# Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey + +Yuxuan Liang $^{1}$ , Haomin Wen $^{2,1}$ , Yutong Xia $^{3}$ , Ming Jin $^{4}$ , Bin Yang $^{5}$ , Flora Salim $^{6}$ , Qingsong Wen $^{7}$ , Shirui Pan $^{4}$ , Gao Cong $^{8}$ + +1The Hong Kong University of Science and Technology (Guangzhou) 2Carnegie Mellon University 3National University of Singapore 4Griffith University 5East China Normal University 6University of New South Wales 7Squirrel AI Learning, USA 8Nanyang Technology University {yuxiang,yutong.x}@outlook.com,{wenhaomin.whm,mingjinedu,qingsongedu}@gmail.com flora.salim@unsw.edu.au,byang@dase.ecnu.edu.cn,s.pan@griffith.edu.au,gaocong@ntu.edu.sg + +# Abstract + +Spatio-Temporal (ST) data science, which includes sensing, managing, and mining large-scale data across space and time, is fundamental to understanding complex systems in domains such as urban computing, climate science, and intelligent transportation. Traditional deep learning approaches have significantly advanced this field, particularly in the stage of ST data mining. However, these models remain task-specific and often require extensive labeled data. Inspired by the success of Foundation Models (FM), especially large language models, researchers have begun exploring the concept of Spatio-Temporal Foundation Models (STFMs) to enhance adaptability and generalization across diverse ST tasks. Unlike prior architectures, STFMs empower the entire workflow of ST data science, ranging from data sensing, management, to mining, thereby offering a more holistic and scalable approach. Despite rapid progress, a systematic study of STFMs for ST data science remains lacking. This survey aims to provide a comprehensive review of STFMs, categorizing existing methodologies and identifying key research directions to advance ST general intelligence. + +# 1 Introduction + +Humans live in a world shaped by the dynamic interplay of countless elements across space and time. Spatio-Temporal (ST) Data, which refer to data that encapsulate ST phenomena, track the evolution of objects or events across locations and time [5], such as meteorological records, traffic patterns, and human traces. These data are frequently sourced from a wide array of platforms, ranging from IoT devices, GPS sensors, social media, to remote sensing. + +Within this context, Spatio-Temporal Data Science focuses on sensing, managing, and mining these datasets to uncover patterns, understand complex systems, and predict future dynamics. Motivated by its transformative potential, this field addresses critical challenges across urban environments and even the entire planet, enabling decision-making and fostering innovations that contribute to building smarter, more sustainable, and resilient systems [178]. + +Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org. + +Conference'17, July 2017, Washington, DC, USA + +© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM. + +ACM ISBN 978-x-xxxxx-xxxxx-x/YY/MM + +https://doi.org/10.1145/nnnnnnn.nnnnnnn + +![](images/585a84a4e1c2d564b53b7198e4a5ac28aaf09f4523682713f3c283074ed415a4.jpg) +Figure 1: ST Foundation Models (STFM), which include LLM and PFM, are pretrained with or applied to diverse ST data, with the abilities of perception, optimization, and reasoning. STFMs can, in turn, enhance each stage of ST data science. + +In the era of deep learning, the community has primarily concentrated on spatio-temporal representation learning, as a fundamental step of ST data mining [129]. Key advancements include the development of Spatio-Temporal Graph Neural Networks (STGNN) [51] and transformer-based architectures, which have shown remarkable success in tasks such as traffic forecasting [80, 146], air quality prediction [82], and human mobility analytics [132]. STGNNs integrate Graph Neural Networks (GNN) with temporal learning modules (e.g., GRU [6, 70], TCN [140, 141]) to model ST correlations, while transformer models leverage self-attention mechanisms [37, 78, 177] to process complex dependencies across space and time. Additionally, there has been significant research on self-supervised learning [46, 74, 92], where models are trained to extract powerful representations with minimal reliance on large annotated datasets. + +Driven by the success of Foundation Models (FM), especially Large Language Models (LLM), researchers have recently begun exploring the concept of Spatio-Temporal Foundation Models (STFM) [32, 81, 169]. By harnessing LLMs, it becomes possible to develop more generalized, adaptable solutions that can be fine-tuned for specific tasks with minimal data. Another prominent approach involves pretraining FMs (denoted as PFM) on cross-domain ST data and adapting them for particular domains. In contrast to previous architectures (e.g., STGNNs), STFMs integrates the capabilities of perception, reasoning and optimization, which not only promises to revolutionize ST data mining, but also empowers other stages of ST data science, such as ST data sensing and management (See Figure 1). This shift has the potential to enhance the scalability and efficiency of ST applications, offering a more holistic approach to addressing challenges in urban computing, climate science, etc. + +Table 1: Our survey vs. related surveys on FMs for learning ST data, such as locations (L), trajectories (T), events (E), ST rasters (R), and ST graphs (G). The applications (App.) include numerical (N) and inferential (I) problems. + +
SurveyYearVenueSensingManage.MiningApp.Data
Jin et al. [54]2023-NR,G
Jiang et al. [48]2024IJCAINR,G
Liang et al. [81]2024KDDNT,E,R,G
Zhang et al. [169]2024KDDN,IL,T,E,R,G
Goodge et al. [32]2025-NT,E,R,G
Ours2025-N,IL,T,E,R,G
+ +Despite their rapid advancements, a systematic analysis of STFMs across the entire workflow of ST data science remains lacking. First, prior surveys have primarily focused on utilizing LLMs as the key tool for ST data mining [32, 54, 81, 169], leaving a significant gap in understanding how these models can be integrated throughout the entire process, i.e., with less focus placed on their role in the earlier stages of sensing and management. Second, they predominantly examine the applications of STFMs to numerical problems (e.g., forecasting, imputation) while overlooking their role in inferential problem-solving such as decision-making systems. + +To bridge these gaps, this paper aims to provide a more comprehensive survey of STFMs across all stages of ST data science, spanning data sensing, management, and mining (see Figure 1). For example, LLMs can enhance ST data sensing by actively processing citizen reports, optimizing participatory sensing strategies, and generating synthetic data at scale. In terms of data management, they can automate data cleaning tasks, construct meaningful knowledge graphs for data integration, and facilitate more efficient retrieval of cross-modal datasets. Beyond these stages, our survey also explores how STFMs support a broader range of downstream applications, including numerical and inferential problems. Through this endeavor, we seek to illuminate an overall vision of STFMs, thereby enhancing comprehension regarding their potential to optimize ST data science, fostering more integrated and adaptable solutions. + +Meanwhile, we systematically investigate the key methodologies of STFMs for modeling a variety of ST data. We begin by categorizing existing STFMs into two main classes: LLMs and Pretrained Foundation Models (PFMs). For LLMs, which are pretrained on linguistic data, we focus on their usage as a zero-shot [33] or few-shot learner [53, 73], where various prompting and fine-tuning strategies have been explored, respectively. For PFMs, which are trained from scratch based on cross-domain ST data [40, 158, 189], we examine their neural architectures, pretraining methods, and their adaptability to different types of ST data, including location data, trajectory data, events, ST raster data, and ST graph data. + +In summary, our major contributions lie in three aspects: + +- Comprehensive and up-to-date survey: We provide the first comprehensive and modern survey of FMs across the entire workflow of ST data science, covering data sensing, management, and mining. We also explore a broader range of downstream tasks and data types compared to most existing surveys (See Table 1). +- Vision and Methodologies: We propose a vision for STFMs, identifying key capabilities essential for their success, and discuss current methodologies for implementing these abilities in detail. +- Future directions: We highlight promising directions for advancing ST data science with foundation models, encouraging further research and exploration in this emerging field. + +Paper Organization. The remainder of this paper is organized as follows: Sec. 2 provides essential background on FMs and ST data. Sec. 3 and 4 present a taxonomy of STFMs regarding the workflow and methodologies, respectively. Sec. 5 offers concluding remarks, and Appendix A highlights promising avenues for future research. + +# 2 Background + +Foundation models. FMs are deep neural networks trained on vast datasets, enabling them to acquire broad, cross-domain knowledge and exceptional adaptability [45]. Unlike earlier task-specific models, FMs can be efficiently fine-tuned with relatively small amounts of task-specific data, offering remarkable flexibility, effectiveness, and cost efficiency. Pioneering attempts like BERT [58] and GPT-3 [11] have reshaped natural language processing. More recent models, e.g., GPT-4o [45] and DeepSeek-R1 [36], further push the frontiers of generative capabilities, enabling more nuanced reasoning, robust domain adaptation, and improved context-awareness in diverse tasks. In ST domains, recent FMs like Time-MoE [119], Chronos [4], and UniST [158] have made remarkable strides in time series analysis and universal ST forecasting, while UniTraj [189] serves as a versatile foundation for various trajectory-related tasks. Inspired by these successes, this survey delves into the utilization of FMs in the entire workflow of ST data science, covering data sensing, management, and mining. + +Formulation of Spatio-Temporal Data. ST data refer to datasets that integrate spatial (location-based) and temporal (time-based) information, capturing dynamic patterns and relationships over space and time. Figure 2 depicts the basic ST data structures discussed in this survey, including locations, trajectories, events, ST rasters, and ST graphs. Their definitions are delineated as follows. + +Definition 1 (Location). A location refers to a fixed spatial point or object in a geographical space, represented by the geospatial coordinates $l \in \mathbb{R}^2$ , i.e., latitude and longitude. It is often profiled by the corresponding satellite image, street-view image, and descriptions. + +Definition 2 (Trajectory). A trajectory is a sequence of time-ordered locations that describe the movements of an object in the geographical space. It can be formulated as $\mathcal{T} = p_1\rightarrow p_2\rightarrow \dots \rightarrow p_T$ where $p_i = (l_i,t_i)$ , and $l_{i}$ denotes the object's location at time $t_i$ . + +Definition 3 (Event). An event sequence is a series of timestamped events, denoted as $\mathcal{E} = v_{1}\rightarrow v_{2}\rightarrow \dots \rightarrow v_{T}$ , describing the progress of actions or occurrences, where $v_{i} = (e_{i},t_{i})$ and $e_i\in \mathbb{R}^d$ is an event and $t_i$ denotes the time when $e_i$ occurs. + +![](images/037cc7fecab764f3c9489d19c64e195a2e4237fe21b75687cf6cec155fb66656.jpg) +Figure 2: Illustration of various types of ST data. + +Definition 4 (Spatio-Temporal Raster). An ST raster can be denoted as $\mathcal{X} = < \mathbf{X}_1,\mathbf{X}_2,\dots ,\mathbf{X}_T > \in \mathbb{R}^{H\times W\times T\times D}$ , where $\mathbf{X}_t\in \mathbb{R}^{H\times W\times D}$ denotes the signals collected from $N = HW$ evenly distributed locations at time $t$ , each characterized by $D$ feature attributes. + +Definition 5 (Spatio-Temporal Graph). An ST graph extends the ST raster to be $X = < \mathbf{X}_1, \mathbf{X}_2, \ldots, \mathbf{X}_T > \in \mathbb{R}^{N \times T \times D}$ by explicitly incorporating spatial correlations with a graph $G_t = (V, E_t, \mathbf{A}_t)$ when $N$ locations are not uniformly distributed. Here $V$ is the set of nodes, $E_t$ is the set of edges, and $\mathbf{A}_t \in \mathbb{R}^{N \times N}$ is the adjacency matrix at time $t$ . The size of $V$ is usually static. + +# 3 The Workflow Perspective + +As shown in Figure 3, we examine STFMs from a holistic, bottom-up perspective, emphasizing their composition across four key aspects: + +- ST Data Sensing refers to the acquisition of data that varies over both space and time from diverse resources (e.g., sensors, satellites, social media), to capture dynamic environmental, geographic, or social phenomena. We also consider synthetic data generation for enhancing data diversity and quantity. +- ST Data Management focuses on storing, indexing, and organizing these large-scale, heterogeneous ST datasets, incorporating strategies like distributed architectures for efficient retrieval and integration. FMs can enhance this process by facilitating data cleaning, query & retrieval, and data integration. +- ST Data Mining involves learning and analyzing ST data that varies across both space and time to uncover patterns, trends, and relationships, using data mining (DM), deep learning (DL) techniques, or the newly-proposed STFMs with strong capabilities in perception, optimization, and reasoning. +- Downstream Applications: This stage harnesses the above insights from ST data to drive real-world applications, ranging from numerical problems to inferential problems, where informed actions and policies are formulated. + +By examining these four aspects, we can better understand how STFMs advance from raw data acquisition to high-level service providing, ultimately enabling more intelligent, adaptable, and impactful solutions. We will detail each stage in the following sections. + +![](images/7550fb89f980864400839d7d1a4cff676d111c15a655bac2fa282342c99fd317.jpg) +Figure 3: The framework of STFMs for ST data science. + +# 3.1 Spatio-Temporal Data Sensing + +FMs revolutionize ST data sensing from two complementary aspects: real-world data sensing, which involves collecting data from physical sources, and synthetic data generation, which creates synthetic ST data through foundation models. + +3.1.1 Real-World Data Sensing. Advances in sensing and data acquisition technologies have led to the generation of vast amounts of ST data. FMs are increasingly applied in human-centric active sensing, particularly in the context of citizen reporting for urban and environmental monitoring [41]. These models act as powerful agents for collecting and processing real-time data from citizens, enabling the efficient handling of ST data [19, 27, 101]. For example, citizens might constantly report incidents, environmental changes, or social events through text or voice [178]. By understanding these reports, LLMs can categorize, prioritize, and trigger appropriate responses for various urban issues, from traffic congestion to environmental hazards. This enhances the decision-making process by continuously updating their models with new data streams. Thus, LLMs are not just passive analytical tools but active participants that help make urban environments more responsive and adaptive to citizen inputs, transforming traditional citizen feedback into actionable knowledge, enabling more sustainable and resilient cities. + +FMs can also function as intelligent schedulers or simulate multiagent systems to optimize the recruitment and coordination of participants for crowdsensing, particularly under budget constraints [41, 139, 191]. By analyzing ST data and understanding context, LLMs can identify regions and times where crowdsensing efforts will yield the most valuable information. They dynamically recruit participants based on proximity, availability, and past contributions, reducing redundant data collection. Additionally, LLMs simulate multiple agents interacting in real time, ensuring the efficient distribution of sensing tasks across a network of citizens or devices [190]. This strategic scheduling and agent-based coordination maximize coverage while minimizing costs, ensuring that crowdsensing delivers valuable, real-time insights under budgetary constraints. + +3.1.2 Synthetic Data Generation. FMs can also facilitate data generation, which enhances ST data by increasing its diversity, improving model robustness, and compensating for missing or sparse information [95]. This is crucial for ST tasks like mobility analytics, where collecting real-world data is often costly or raises privacy concerns. For instance, Trajectory-LLM [154] generates vehicle trajectories from brief textual descriptions of vehicle interactions, whereas Traj-LLM [56] generates human trajectories by leveraging personas, memory modules, and routine profiles. LLMob [126] advances mobility data generation, offering flexibility in modeling diverse urban activities and personal mobility patterns, thus improving transportation system modeling and analysis. LLMs have also been employed to construct synthetic environments that replicate real-world conditions across diverse domains, including intelligent transportation [1] and disaster management [31]. + +# 3.2 Spatio-Temporal Data Management + +Upon the acquisition of ST data, the challenge of effective management emerges, particularly in addressing data quality issues (e.g., missing values/views) and facilitating data retrieval and integration. Within this context, FMs can be harnessed in the following ways. + +3.2.1 Data Cleaning. Data cleaning is the process of improving data quality by addressing issues such as missing values, low sampling rates, and noise. For example, ST data often exhibit missing values due to various factors like sensor malfunctions and transmission disruptions [178]. Filling in these missing values[113] is crucial for ensuring the integrity of predictive models, optimizing strategies, and facilitating informed decision-making. Recent literature reveals that LLMs can serve as powerful zero-shot [164] or few-shot [17, 172] learners to data imputation by leveraging their ability to identify and learn complex ST patterns. PLMTrajRec [135], utilizing a pretrained language model to recover sparse trajectory data by unifying intervals and inferring road conditions, showing effective generalization across varied sampling intervals in tests. Moreover, scholars have investigated the potential of leveraging LLMs to augment missing views or information, such as urban region profiling [40, 150, 163] and traffic video captioning [25]. + +3.2.2 Query & Retrieval. Meanwhile, LLM can be applied to querying and retrieval to enhance information retrieval accuracy under the ST context. By leveraging their advanced natural language understanding capabilities, LLMs can process user queries in a more contextual and semantically rich manner, enabling precise retrieval of relevant information from structured and unstructured data sources. For instance, UrbanLLM [47] finetunes LLMs for urban activity planning and management, which serves as a problem solver that decodes urban-related queries into several sub-tasks, with each one solved by suitable spatio-temporal AI models. Alamsyah et al. [2] propose an automated smart city planning system that utilizes a personalized LLM with Retrieval Augmented Generation (RAG) [30] to generate tailored urban planning recommendations while ensuring data privacy, where RAG is used to retrieve relevant urban planning documents for context-aware responses. Another line of work [67, 75, 170, 179] utilizes Multimodal LLM for cross-modal information retrieval to enhance urban computing tasks. + +3.2.3 Data Integration. Data integration seeks to combine information from disparate sources, often necessitating the understanding and mapping of relationships between entities in heterogeneous datasets. LLMs are increasingly being employed in this domain, particularly for knowledge graph construction [24], where they automate and enhance the extraction, integration, and reasoning of related data. In the context of ST data, LLMs facilitate data integration by leveraging heterogeneous urban data sources, performing relational triplet extraction, and completing knowledge graphs through geospatial reasoning [94, 106]. A pioneering study UrbanKGent [105] proposes an LLM-based Agent framework to automate the process of urban knowledge graph construction. + +# 3.3 Spatio-Temporal Data Mining + +Unlike traditional data mining, which primarily focuses on structured datasets, ST data mining captures intricate spatial and temporal dependencies within ST data using machine learning or deep learning techniques [51, 129, 167]. With the emergence of FMs and LLMs, Spatio-Temporal Foundation Models (STFMs) offer new possibilities by integrating perception, optimization, and reasoning capabilities to enhance ST data mining. In this section, we explore these key capabilities, while their specific applications across different domains are detailed in Sec. 3.4. + +3.3.1 Perception. In STFMs, perception encompasses the ability to effectively model, interpret, and generalize complex spatial and temporal patterns, enabling a deeper understanding of dynamic environments. This capability can be categorized into two key perspectives. The first view pertains to an agent's ability to perceive and understand its surrounding environment, capturing visual or contextual interactions within real-world scenarios such as smart cities [151], indoor activities [152, 153], and mobile Apps [127]. + +The second aspect involves interpreting and extracting ST patterns from sensor data, ensuring accurate predictions across diverse domains. Domain-agnostic approaches, such as STEP [117] and GPT-ST [74], have employed pretraining strategies that leverage historical observations to enhance forecasting performance. In urban computing, models like TFM [130] and OpenCity [72] utilize graph-based FMs to analyze behaviors and interactions within transportation systems, yielding promising results in traffic prediction. In climate science, Pangu [9], trained on 39 years of global climate data, delivers superior deterministic forecasting outcomes across all evaluated variables when compared to leading numerical weather prediction systems. Additional notable examples in this area include the works [60, 76, 104, 108]. Despite these advances, achieving robust generalization remains a critical challenge, as most existing research has been confined to in-domain applications. While models like UniST [158] are designed as one-for-all solutions for diverse ST scenarios, their training datasets and evaluation testbeds are predominantly limited to transportation. Nevertheless, their underlying technique stacks show promise for broader cross-domain and cross-modality generalization. Other significant contributions in this realm include UniFlow [159] and UrbanDiT [160]. + +3.3.2 Optimization. Building upon the perceptual foundations, the optimization ability focuses on refining and adapting models to achieve specific, task-oriented objectives. In other words, models are not only expected to capture rich ST patterns but also to drive actionable decision-making in dynamic, real-world scenarios. This involves integrating advanced optimization strategies that tailor model behavior to the unique demands of applications. + +A prominent approach involves agent-based frameworks. For example, in traffic signal control, traditional methods (e.g., RL) are now augmented by frameworks that use LLMs as decision-making agents [61]. These systems leverage real-time traffic data and expert prompts to enable human-like planning, resulting in more adaptive and interpretable control strategies. Similarly, CityGPT [35] decomposes ST analysis into specialized sub-tasks, handled by temporal, spatial, and fusion agents, to efficiently process IoT data and generate insightful visualizations. AgentMove [28] addresses human mobility prediction by breaking down the task into modules for individual pattern mining, urban structure analysis, and collective behavior extraction. In geo-science, systems like Geode [38] integrate explicit optimization modules with ST data retrieval and machine learning inference to tackle zero-shot geospatial QA with enhanced precision. In urban planning, an innovative work [185] simulates planners and residents by LLM agents and enables their interactions to optimize inclusive land-use plans efficiently. Despite these promising developments, significant challenges remain. Seamlessly integrating perceptual capabilities with targeted optimization strategies is crucial for next-generation ST models that are both versatile and effective across diverse operational contexts. + +Table 2: Summary of representative FMs tailored for ST data science. + +
StageTask & CapabilityExampleMethodCategoryVenueYear
SensingReal-World Data SensingIdentifying Citizen-Related Issues from Social Mediados Santos et al. [27]LLMCAiSE2024
Real-World Data SensingIntelligent Crowdsensing CoordinationAutoWebCrowds [190]LLMICWE2024
Synthetic Data GenerationTrajectories GenerationTrajectory-LLM [154]LLMICLR2025
Synthetic Data GenerationHuman Activity Data GenerationLLMob [126]LLMNeurIPS2024
ManagementData CleaningFew-Shot Learner for Filling Missing ValuesNuwaTS [17]PFMPreprint2024
Data CleaningTrajectory RecoveryPLMTrajRec [135]LLMPreprint2024
Data CleaningAugment Additional Views of DataUrbanCLIP [150]LLMWWW2024
Query & RetrievalAutonomous Query Processor for Urban ManagementUrbanLLM [47]LLMEMNLP2024
Data IntegrationUrban Knowledge Graph ConstructionUrbanKGent [105]LLMNeurIPS2024
MiningPerceptionUnderstand the EnvironmentMagma [152]PFMCVPR2025
PerceptionInterpret and Extract ST PatternsSTEP [117]PFMKDD2022
OptimizationDrive Actionable Decision-Making in Dynamic ScenariosAgentMove [28]LLMPreprint2024
OptimizationOptimize Land-Use Plans by LLM AgentsZhou et al. [185]LLMPreprint2024
ReasoningCommon-sense ReasoningCausal-VidQA [66]PFMCVPR2022
ReasoningNumerical ReasoningUrbanGPT [73]LLMKDD2024
ReasoningCausal ReasoningNuwaDynamics [128]PFMICLR2024
ApplicationForecastingGlobal Weather ForecastingPangu [9]PFMNature2023
ImputationGenerative Adversarial Network for Traffic Data ImputationSTGAN [162]PFMIEEE TBD2022
Anomaly DetectionTransformer-based Anomaly DetectorXu et al. [145]PFMICLR2022
Event AnalysisDetecting and Interpreting EventsLAMP [120]LLMNeurIPS2023
Physical GroundingGeo-localizationGeoGPT [174]LLMJAG2023
Decision MakingTransportation Analytics and ControlTrafficGPT [168]LLMTransport Policy2024
Scenario SimulationSimulation of Human BehaviorPark et al. [107]LLMUIST2023
+ +3.3.3 Reasoning. While current ST models have demonstrated notable success in recognition and agent-based tasks, their reasoning and cognitive capabilities remain underdeveloped compared to advanced systems like DeepSeek-R1 [36]. To progress toward ST general intelligence, we identify three key aspects of reasoning: + +- Common-sense Reasoning harnesses everyday knowledge and contextual cues to draw implicit inferences from complex data. For instance, Causal-VidQA [66] enables models to infer explanations, predict future states, and generate counterfactual scenarios in video question-answering, while SituatedGen [173] integrates geographical and temporal contexts to generate coherent and contextually plausible statements. +- Numerical Reasoning involves interpreting and manipulating quantitative information to perform arithmetic operations, assess uncertainties, and discern relationships within ST data; for instance, STBench [69] evaluates these abilities in LLMs, while UrbanGPT [73] enhances ST forecasting with instruction tuning. +- Causal Reasoning seeks to uncover cause-effect relations within ST data, crucial for robust and interpretable predictions. For example, NuwaDynamics [128] identifies causal regions and applies interventions to improve generalization, and GCIM [176] learns latent causal structures to disentangle spurious correlations. + +Collectively, these dimensions offer a promising yet underexplored pathway toward achieving ST general intelligence, bridging the gap between pattern recognition and true cognitive understanding. + +# 3.4 Downstream Applications + +3.4.1 STFMs for Numerical Problems. ST data is predominately numeric in many real-world scenarios. Addressing these numeric challenges is critical for tasks like forecasting, imputation, and anomaly detection [52], which demand an accurate understanding of the physical world. STFMs excel in these areas by uncovering intricate patterns and dependencies, ultimately enabling more reliable data-driven decision-making. + +- Forecasting. Early forecasting approaches often relied on task-specific neural networks like STGNNs [51, 52, 110, 116], whereas recent developments have shifted toward universal forecasting [91, + +137, 167]. For instance, GPT-ST [74] leverages pretraining on historical observations to boost predictive performance, while UniST [158] unifies multiple traffic prediction tasks within a single model by coupling sequence modeling with attention-based mechanisms. Building on this progress, ST-LLM [86] and STG-LLM [90] enhance traffic predictions by combining ST inputs with partially frozen large language models, and UrbanGPT [73] extends this paradigm further by employing ST instruction tuning to better align textual and ST data. Similar approaches have also been widely used in other domains, such as ClimaX [104], Geo-Bench [60], and Orca [76]. + +- Imputation. This has likewise benefited from techniques that capture ST dependencies to accurately restore missing or corrupted data. For instance, NuwaTS [17] repurposes pretrained language models with contrastive learning and specialized patch embeddings (capturing missing patterns/statistics) to enable cross-domain time series imputation through a unified framework. STD-LLM [44] employs LLMs with spatial-temporal tokenizers and hypergraph learning modules to handle missing values in spatio-temporal data while capturing non-pairwise correlations through topology-aware node embeddings. DrIM [83] combines LLM-derived text representations (from masked tabular data conversions) with contrastive learning to measure similarities for nearest-neighbor imputation in heterogeneous datasets. + +- Anomaly Detection. Anomaly detection in ST data has advanced by leveraging models that learn the normal dynamics of ST systems to identify deviations indicative of abnormal events. Whereas prior methods relied on statistical thresholding and clustering to flag outliers, recent FMs learn robust ST representations to detect even subtle anomalies. For example, early attempts [26, 89, 186] investigate the feasibility of using LLMs for anomaly detection in time series data. SigLLM [3] employs GPT-series with signal-to-text conversion techniques, offering dual pipelines (anomaly prompting and deviation detection) for time series analysis through textual or visual representations of numerical data. AD-LLM [156] introduces a benchmark framework combining GPT-4's zero-shot reasoning with contrastive learning for anomaly context enrichment and automated model selection through chain-of-thought prompting. + +- Others. Furthermore, FMs have demonstrated great potential in other numerical problems such as time series classification [18], geospatial prediction [39, 100], traffic speed inference [7], and socioeconomic indicator prediction [40, 142, 150]. + +3.4.2 STFMs for Inferential Problems. Inferential problems in ST data require the integration of both reasoning and understanding of environments. These problems involve high-level cognitive tasks where accurate representation of locations, movements, and environmental context is essential. Addressing such problems goes beyond numerical predictions — it necessitates answering critical inferential questions: What happened? Where is it? What to do? What if? FMs have shown their potential to enhance solutions for these challenges by leveraging their capacity to handle ST knowledge and interpret complex, unstructured data. + +"What happened?" - Event Analysis. Detecting events aims to recognize and explain significant events in time and space. Traditional models struggle with scalability, interpretability, and incorporating external knowledge. To this end, LAMP [120] integrates LLMs with event models, using abductive reasoning to suggest plausible causes for predicted events, retrieve supporting evidence, and rank predictions for improved accuracy. Meanwhile, LEAP [165] replaces GNNs and RNNs with LLMs by framing event detection as a question-answering task, predicting missing event components and forecasting future relations through self-attention mechanisms. + +"Where is it?"- Physical Grounding. Grounding ST models in real-world geographical contexts is essential for various applications such as geo-localization, map reconstruction, intelligent routing and navigation. Geo-localization aims to determine an object's location based on multimodal inputs, including images, text, and sensor data. By processing these cues in conjunction with map data, LLMs such as GPT-4o, DeepSeek [36], and GeoGPT [174] can infer geographic coordinates or identify specific locations described in natural language. Map reconstruction, on the other hand, involves creating or updating digital maps by synthesizing information from satellite imagery, sensor readings, and textual reports. LLMs contribute by interpreting and generating map content, correcting inaccuracies, and filling in missing details. For instance, MapGPT [14] employs language-guided updates, incorporating textual descriptions of environmental changes into existing map structures. In personalized routing, ItiNera [123] combines LLMs with spatial optimization to generate personalized "Citywalk" itineraries, providing user-specific and spatially coherent urban exploration; ChinaTravel [115] provides a benchmark for real-world Chinese travel planning, enabling scalable evaluation of constraint satisfaction and preference optimization while highlighting the strengths of neuro-symbolic agents. Navigation systems further benefit from LLMs' ability to understand contextual instructions, interpret user queries, and reason about dynamic environments. For example, NavGPT [182] and NavGPT-v2 [181] integrate natural language with real-time traffic and indoor video data to generate personalized and optimized routing solutions. By incorporating STFMs across these domains, physical grounding models facilitate more precise localization, efficient navigation, and adaptive urban mobility solutions, ultimately bridging the gap between digital intelligence and real-world spatial reasoning. + +![](images/0dd90631ab1ad6054f2db1a2e36db5801f8436c980eebbece0578f2d29976011.jpg) +Figure 4: STFMs for addressing inferential problems. + +"What to do?" - Decision Making. Optimizing policies and real-time decision-making in dynamic environments based on inferential insights plays a crucial role in a wide range of applications, including traffic control, autonomous vehicles, and disaster response. In traffic control and management, LLMs improve adaptability and interpretability compared to traditional reinforcement learning approaches [61]. Additionally, they facilitate sim-to-real transfer by modeling real-world traffic dynamics, improving the reliability of traffic signal optimization [22]. Beyond signal control, models like TrafficGPT [168] integrate multimodal traffic data with structured reasoning to analyze, predict, and optimize traffic efficiency and safety in real time. In autonomous vehicles, STFMs contribute to decision-making through both direct and indirect mechanisms. Directly, models such as DDM-Lag [88] employ diffusion-based frameworks with Lagrangian safety enhancements and hybrid policy updates to refine policy articulation and ensure safety. Indirectly, STFMs enhance autonomous driving by predicting realistic driving behaviors [55, 114] and leveraging multi-modal perception to integrate sensor data, bird's eye view maps, and traffic contexts [20, 184], improving situational awareness and vehicle control. Beyond transportation, STFMs play a critical role in disaster management and emergency response by integrating diverse spatio-temporal data sources, such as weather forecasts, remote sensing, and social media signals, to predict disaster impacts and optimize evacuation strategies [16, 31, 65]. + +"What if?"- Scenario Simulation. STFMs, with their advanced perception and reasoning capabilities, enable the development of STFM-based agents that integrate into Multi-Agent Systems (MAS) to model complex interactions across diverse domains [29]. In urban planning and social simulation, MAS facilitates participatory urban design by simulating interactions between planners and residents. For example, LLM-driven MAS has been used to collaboratively refine land-use plans, leading to improved accessibility and ecological outcomes that surpass human expert solutions [185]. Beyond urban planning, MAS contributes to social science research by modeling human-like behaviors in AI-driven networks. Studies such as [23, 107, 109] demonstrate that LLM-based agents can naturally develop social structures, providing valuable insights into emergent social dynamics. Beyond urban applications, MAS significantly advances game AI and strategic decision-making. Recent studies [112, 133, 187] highlight how MAS-powered reinforcement learning enables strategic gameplay, real-time opponent modeling, and interactive storytelling, fostering the development of more adaptive, intelligent, and realistic virtual agents. + +# 4 The Methodology Perspective + +As shown in Figure 5, we delve into STFMs from a methodology perspective, focusing on $i)$ LLM-based models, which are widely applied across the entire workflow of $ST$ data science by zero-shot utilization or fine-tuning and $ii)$ PFM-based models, i.e., pretraining FMs from scratch, which is mainly utilized for $ST$ data mining. The comparison between them can be found in Appendix C. + +![](images/60d69dbcc8c85b62a9fac9cb5b695647836692fdc243ddfde0484cf9e89d35d3.jpg) +Figure 5: A method-centric taxonomy. Full version: Fig. 7. + +# 4.1 Large Language Models (LLM) + +4.1.1 Zero-shot Learner. LLMs exhibit strong reasoning and contextual understanding capabilities, making them highly effective across various ST tasks, including data sensing, management, and mining. As shown in Appendix B, they can function as augmenters, predictors, or agents. To ease the presentation, we adopt a broad definition of LLMs, encompassing standard LLMs, Vision-Language Models (VLM), and Multimodal LLMs (MLLM). The zero-shot utilization of LLMs can be categorized into two primary classes. + +- Prompt Engineering. When taking LLMs as zero-shot predictors [33, 53, 125] or data augmenters [150] for various tasks, prompt engineering plays an essential role in shaping model outputs. Below, we summarize key aspects for prompt engineering in current research: a) Prompt Construction: A well-designed prompt typically contains key elements like Task Instruction, Tokenization, and Few-shot Examples. Task instruction [53, 147, 149] aims to explicitly guide LLMs to execute specific operations, incorporating domain knowledge [157] if applicable. Tokenization [33, 53] is crucial to aligning ST data formats with LLM input structures. Additionally, presenting a small number of annotated examples [175] facilitates in-context learning, enabling LLMs to better generalize to complex tasks while ensuring output consistency and adherence to the expected format. b) Prompt Learning: [73, 148] Also known as instruction-tuning, this method learns prompts dynamically rather than relying on manually crafted ones. By optimizing prompt structures during training, it provides a flexible and efficient way to adapt LLMs to new tasks without altering their underlying model weights. + +c) Chain-of-Thought (CoT) Prompting: CoT [87, 175] enhances LLMs' reasoning capabilities by guiding them through step-by-step logical progression. This method improves their ability to tackle complex spatio-temporal tasks, ensuring more interpretable, structured, and accurate outputs in decision-making processes. + +- Agentic Engineering. The emergence of LLM-based agents [49, 144, 168, 174, 185] with reasoning, memory and tool-calling capabilities is transforming ST data science, enabling more adaptive and autonomous decision-making. When designing agent-based solutions, existing works primarily consider the following key aspects: a) Role Assignment. [50, 144, 174] clearly specify the responsibilities and functional boundaries of each agent within the system. b) Memorization [64, 174] refers to the agent's capability to store, recall, and leverage past information and context during task execution. A basic approach involves embedding past interactions into prompts, while more advanced techniques like Retrieval-Augmented Generation (RAG) [143, 155] dynamically retrieve relevant information from external knowledge bases, incorporating only the most pertinent content into the prompt. c) Tool Definition [168, 174], which identify and integrate various tools and functionalities that an agent can call upon to solve complex tasks. In ST data science, various expert models like STGNNs [51] can be wrapped as a tool and added into the agent in a plug-and-play manner. d) Multi-Agent System. Deploying multiple specialized agents to work collaboratively (each with distinct roles) enhances the efficiency and robustness of solutions for intricate ST challenges [49, 63, 185]. + +4.1.2 Supervised Fine-Tuning for LLMs. Fine-tuning adapts LLMs to ST tasks by adjusting their parameters based on domain-specific datasets, sometimes incorporating additional modalities such as texts [79, 150] and vision [180]. We categorize fine-tuning methods into three approaches based on the extent of parameter updates: + +- Full Parameter Fine-Tuning [68, 98, 100, 104, 108] updates all model parameters based on downstream ST datasets, achieving maximal adaptation to specific tasks. However, it requires substantial labeled data and high computational resources, making it impractical for many real-world applications. +- Partial Parameter Fine-tuning. To reduce computational overhead, this method [13, 183] freezes most parameters, such as attention weights, while fine-tuning only a small subset (e.g., position encodings and layer normalization). However, modifying a subset of parameters can disrupt the LLM's learned representations, leading to catastrophic forgetting of general knowledge. +- Add-on Parameter Fine-Tuning. To mitigate forgetting while maintaining efficiency, this technique [61] introduces trainable low-rank matrices (e.g., LoRA [42]), while keeping the original LLM weights frozen. This strategy preserves pretrained knowledge while enabling efficient adaptation to ST tasks. Besides fine-tuning LLMs' weights, another way is training additional layers for input tokenization or task adaption. For instance, TimeLLM [53] trains a self-attention layer that aligns patched time series representations with pretrained text prototype embeddings. Similarly, Time-VLM [180] trains a memory-enhanced attention to capture both short- and long-term dependencies. For task adaption, existing methods typically train an additional prediction head (e.g., linear layers) to project the LLM's output embeddings into a domain-specific space [53, 180]. + +# 4.2 Pretrained Foundation Models (PFM) + +Unlike LLMs, which build STFMs by directly using or fine-tuning LLMs, PFMs are developed from scratch, independent of existing LLMs. This approach enables domain-specific optimization, allowing models to better capture ST dependencies from cross-domain ST data without constraints imposed by linguistic priors. Following this, we examine PFMs through three key dimensions: + +4.2.1 Neural Architecture. The architecture of PFMs is a fundamental design choice that directly influences their capabilities, efficiency, and adaptability in ST tasks, which can be categorized into: + +- Transformer-based PFMs. Transformers have been the predominant architecture choice for building PFMs thanks to its powerful sequential modeling ability introduced by the self-attention mechanism [7, 72, 77, 85, 158]. +- Diffusion-based PFMs. Diffusion-based models have recently emerged as a powerful approach for ST representation learning [12, 21, 136, 160, 161, 188], particularly in generative and predictive modeling. These models iteratively learn to reverse a stochastic noise process, enabling them to generate high-fidelity spatio-temporal sequences with strong generalization properties. +- Graph-based PFMs. Unlike sequential models, GNNs excel at representing spatially structured data such as road networks. [62, 130] build FMs based on graph neural networks to learn the complex correlation between different entities in ST applications. +- Others. Another emerging class of PFMs is State Space Model (SSM)-based models [8, 43, 97], which construct PFMs using structured state-space representations. Meanwhile, several studies utilize CNNs [118] as backbones for developing PFMs. + +4.2.2 Pretraining Scheme. To enhance generalization ability, PFMs are usually pretrained based on cross-domain datasets [91, 137, 158], enabling them to learn diverse ST patterns across multiple domains. Existing pretraining schemes of PFMs can be classified into three types based on the training objectives: a) Generative Pretraining [85, 98, 130, 138, 189] focuses on reconstructing input data by learning its underlying distribution, enabling the model to generate realistic time series or ST data, while b) Contrastive Pretraining [7, 84, 171] emphasize distinguishing between similar and dissimilar data pairs to learn robust representations by maximizing agreement between augmented views of the same sample. It is particularly effective in multimodal ST learning, aligning heterogeneous data sources such as satellite imagery and its text description. c) Hybrid Pretraining [77] integrates both generative and contrastive objectives, leveraging their complementary strengths. + +4.2.3 Data Modality. ST data manifests in various modalities, each characterized by unique properties (see Section 2), necessitating the development of modality-specific STFMs: + +- Location. PFMs for location data [7, 40, 71, 124, 138, 150, 171] aim to learn general embedding for geographical entities. For instance, GeoVectors [124] and SpaBERT [71] learn location embeddings based on open-source data such OpenStreetMap, while G2PTL [138] learns from massive logistics delivery data. Notably, there is a noticeable trend that leverages multi-modalities (such as satellite image and text) for comprehensive location embeddings. For example, both UrbanCLIP [150], UrbanVLP [40], and ReFound [142] utilize satellite images for urban region profiling. + +![](images/04153b45ab02f3e79c0ab49c30b7605ee00c77f9060519cff8dcabcf84cc5fc2.jpg) +Figure 6: Representative PFMs for different types of ST data. + +- Trajectory & Event. PFMs for trajectory/event data [21, 84, 85, 103, 121, 189] are designed to learn general sequential patterns from inputs. A pioneering effort in this direction is TrajFM [85], which introduces a trajectory FM capable of supporting both regional and task transferability. Pretrained on vehicle trajectories from multiple cities, TrajFM employs a trajectory-masking and autoregressive recovery mechanism to enhance its learning capabilities. To tackle the limited resources of cross-domain trajectories, UniTraj [189] curates a billion-scale mobility dataset spanning diverse geographic regions to facilitate the advancement of trajectory-based FMs. For event data, MOTOR [121] proposes a time-to-event FM for structured medical records. +- ST Raster. PFMs for ST raster data [10, 15, 98, 104, 108, 117, 160] organize spatial information in a grid-like format, with a typical applied domain being weather/climate forecasting. For instance, W-MAE [98] trains a mask autoencoder for ST grid forecasting. CimaX [104] develops a general-purpose climate foundation model, pretrained on diverse datasets spanning various variables, ST scales, and physical contexts. Pangu [10] is trained on 39 years of global climate data, which achieves superior forecasting performance compared to leading numerical weather prediction systems. UniST [158] first pretrains the model in various ST raster data via masked pretraining, and then proposes a learnable ST prompt to enhance the model's generalization ability. +- ST Graph. PFMs for ST graph data [62, 72, 93, 111, 117, 134] learn the ST dependencies from ST graphs that generalize effectively in unseen spatial and temporal contexts. Unlike ST Raster PFMs, there are limited works in this area, which is more challenging due to the complex graph correlation. One representative is OpenCity [72] for ST graph forecasting, which integrates Transformer and GNN to model the ST dependencies in traffic data. + +# 5 Conclusion and Future Directions + +The rapid advancement of FMs has transformed ST data science, impacting sensing, management, and mining. This survey provides a comprehensive review of FMs for ST data science, identifying key capabilities such as perception, reasoning, and optimization while exploring diverse downstream tasks and datasets. We also establish a systematic taxonomy of methodologies, enhancing understanding of how STFMs model ST data. Despite progress, challenges remain in generalization, interpretability, and efficiency. By consolidating recent advances and outlining future directions (see Appendix A), this survey aims to inspire further innovations, driving the development of scalable and adaptive STFMs for real practice. + +# References + +[1] Oluwanifemi Adebayo Moses Adekanye. 2024. LIm-powered synthetic environments for self-driving scenarios. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 23721-23723. +[2] Nurwahyu Alamsyah, Muhamad Amirul Haq, and Chayadi Oktomy Noto Susanto. 2024. Automated Smart City Planning through Personalized Large Language Model with Retrieval Augmented Generation. In 2024 International Conference on Information Technology and Computing (ICITCOM). IEEE, 306-311. +[3] Sarah Alnegheimish, Linh Nguyen, Laure Berti-Equille, and Kalyan Veeramacheneni. 2024. Large language models can be zero-shot anomaly detectors for time series? arXiv preprint arXiv:2405.14755 (2024). +[4] Abdul Fatir Ansari, Lorenzo Stella, Caner Turkmen, Xiyuan Zhang, Pedro Mercado, Huibin Shen, Oleksandr Shchur, Syama Sundar Rangapuram, Sebastian Pineda Arango, Shubham Kapoor, et al. 2024. Chronos: Learning the language of time series. arXiv preprint arXiv:2403.07815 (2024). +[5] Growtham Atluri, Anuj Karpatne, and Vipin Kumar. 2018. Spatio-temporal data mining: A survey of problems and methods. ACM Computing Surveys (CSUR) 51, 4 (2018), 1-41. +[6] Lei Bai, Lina Yao, Can Li, Xianzhi Wang, and Can Wang. 2020. Adaptive graph convolutional recurrent network for traffic forecasting. In NeurIPS, Vol. 33. 17804-17815. +[7] Pasquale Balsebre, Weiming Huang, Gao Cong, and Yi Li. 2024. City foundation models for learning general purpose representations from openstreetmap. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 87-97. +[8] Sathya Kamesh Bhethanabhotla, Omar Swelam, Julien Siems, David Salinas, and Frank Hutter. 2024. Mamba4Cast: Efficient Zero-Shot Time Series Forecasting with State Space Models. arXiv preprint arXiv:2410.09385 (2024). +[9] Kaifeng Bi, Lingxi Xie, Hengheng Zhang, Xin Chen, Xiaotao Gu, and Qi Tian. 2023. Accurate medium-range global weather forecasting with 3D neural networks. Nature 619, 7970 (2023), 533-538. +[10] Kaifeng Bi, Lingxi Xie, Hengheng Zhang, Xin Chen, Xiaotao Gu, and Qi Tian. 2023. Accurate medium-range global weather forecasting with 3D neural networks. Nature 619, 7970 (2023), 533-538. +[11] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in neural information processing systems 33 (2020), 1877-1901. +[12] Defu Cao, Wen Ye, and Yan Liu. [n.d.]. TimeDiT: General-purpose Diffusion Transformers for Time Series Foundation Model. In ICML 2024 Workshop on Foundation Models in the Wild. +[13] Ching Chang, Wen-Chih Peng, and Tien-Fu Chen. 2023. LLM4TS: Two-Stage Fine-Tuning for Time-Series Forecasting with Pre-Trained LLMs. arXiv preprint arXiv:2308.08469 (2023). +[14] Jiaqi Chen, Bingqian Lin, Ran Xu, Zhenhua Chai, Xiaodan Liang, and KwanYee K Wong. 2024. Mapppt: Map-guided prompting with adaptive path planning for vision-and-language navigation. arXiv preprint arXiv:2401.07314 (2024). +[15] Kang Chen, Tao Han, Junchao Gong, Lei Bai, Fenghua Ling, Jing-Jia Luo, Xi Chen, Leiming Ma, Tianning Zhang, Rui Su, et al. 2023. FengWu: Pushing the Skillful Global Medium-range Weather Forecast beyond 10 Days Lead. arXiv preprint arXiv:2304.02948 (2023). +[16] Minze Chen, Zhenxiang Tao, Weitong Tang, Tingxin Qin, Rui Yang, and Chunli Zhu. 2024. Enhancing emergency decision-making with knowledge graphs and large language models. International Journal of Disaster Risk Reduction 113 (2024), 104804. +[17] Jinguo Cheng, Chunwei Yang, Wanlin Cai, Yuxuan Liang, Qingsong Wen, and Yuankai Wu. 2024. NuwaTS: a Foundation Model Mending Every Incomplete Time Series. arXiv preprint arXiv:2405.15317 (2024). +[18] Mingyue Cheng, Yiheng Chen, Qi Liu, Zhiding Liu, and Yucong Luo. 2024. Advancing time series classification with multimodal language modeling. arXiv preprint arXiv:2403.12371 (2024). +[19] Garima Chhikara, Anurag Sharma, V Gurucharan, Kripabandhu Ghosh, and Abhijnan Chakraborty. 2024. LaMSUM: Amplifying Voices Against Harassment through LLM Guided Extractive Summarization of User Incident Reports. arXiv preprint arXiv:2406.15809 (2024). +[20] Tushar Choudhary, Vikrant Dewangan, Shivam Chandhok, Shubham Priyadarshan, Anushka Jain, Arun K Singh, Siddharth Srivastava, Krishna Murthy Jatavalabhula, and K Madhava Krishna. 2024. Talk2BEV: Language-enhanced Bird's-eye view maps for autonomous driving. In 2024 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 16345-16352. +[21] Chen Chu, Hengcai Zhang, and Feng Lu. 2023. TrajGDM: A New Trajectory Foundation Model for Simulating Human Mobility. In Proceedings of the 31st ACM International Conference on Advances in Geographic Information Systems. 1-2. +[22] Longchao Da, Minchiuan Gao, Hao Mei, and Hua Wei. 2023. Lm powered sim-to-real transfer for traffic signal control. arXiv preprint arXiv:2308.14284 (2023). + +[23] Gordon Dai, Weijia Zhang, Jinhan Li, Siqi Yang, Srihas Rao, Arthur Caetano, Misha Sra, et al. 2024. Artificial leviathan: Exploring social evolution of lIm agents through the lens of hobbesian social contract theory. arXiv preprint arXiv:2406.14373 (2024). +[24] Zifeng Ding, Heling Cai, Jingpei Wu, Yunpu Ma, Ruotong Liao, Bo Xiong, and Volker Tresp. 2024. zrLLM: Zero-Shot Relational Learning on Temporal Knowledge Graphs with Large Language Models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers). 1877-1895. +[25] Quang Minh Dinh, Minh Khoi Ho, Anh Quan Dang, and Hung Phong Tran. 2024. Trafficvlm: A controllable visual language model for traffic video captioning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshop. 7134-7143. +[26] Manqing Dong, Hao Huang, and Longbing Cao. 2024. Can LLMs Serve As Time Series Anomaly Detectors? arXiv preprint arXiv:2408.03475 (2024). +[27] Vitor Gaboardi dos Santos, Guto Leoni Santos, Theo Lynn, and Boualem Benatallah. 2024. Identifying Citizen-Related Issues from Social Media Using LLM-Based Data Augmentation. In International Conference on Advanced Information Systems Engineering. Springer, 531-546. +[28] Jie Feng, Yuwei Du, Jie Zhao, and Yong Li. 2024. Agentmove: Predicting human mobility anywhere using large language model based agentic framework. arXiv preprint arXiv:2408.13986 (2024). +[29] Chen Gao, Xiaochong Lan, Nian Li, Yuan Yuan, Jingtao Ding, Zhilun Zhou, Fengli Xu, and Yong Li. 2024. Large language models empowered agent-based modeling and simulation: A survey and perspectives. *Humanities and Social Sciences Communications* 11, 1 (2024), 1-24. +[30] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, and Haofen Wang. 2023. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997 (2023). +[31] Vinicius G Goecks and Nicholas R Waytowich. 2023. Disasterresponsept: Large language models for accelerated plan of action development in disaster response scenarios. arXiv preprint arXiv:2306.17271 (2023). +[32] Adam Goodge, Wee Siong Ng, Bryan Hooi, and See Kiong Ng. 2025. Spatio-Temporal Foundation Models: Vision, Challenges, and Opportunities. arXiv preprint arXiv:2501.09045 (2025). +[33] Nate Gruver, Marc Finzi, Shikai Qiu, and Andrew Gordon Wilson. 2023. Large language models are zero-shot time series forecasters. Advances in neural information processing systems (2023). +[34] Nate Gruver, Marc Finzi, Shikai Qiu, and Andrew G Wilson. 2024. Large language models are zero-shot time series forecasters. Advances in Neural Information Processing Systems 36 (2024). +[35] Qinghua Guan, Jinhui Ouyang, Di Wu, and Weiren Yu. 2024. CityGPT: Towards Urban IoT Learning, Analysis and Interaction with Multi-Agent System. arXiv preprint arXiv:2405.14691 (2024). +[36] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025). +[37] Shengnan Guo, Youfang Lin, Ning Feng, Chao Song, and Huaiyu Wan. 2019. Attention based spatial-temporal graph convolutional networks for traffic flow forecasting. In AAAI, Vol. 33: 922-929. +[38] Devashish Vikas Gupta, Azeez Syed Ali Ishaqui, and Divya Kiran Kadiyala. 2024. Geode: A Zero-shot Geospatial Question-Answering Agent with Explicit Reasoning and Precise Spatio-Temporal Retrieval. arXiv preprint arXiv:2407.11014 (2024). +[39] Wes Gurnee and Max Tegmark. 2023. Language models represent space and time. arXiv preprint arXiv:2310.02207 (2023). +[40] Xixuan Hao, Wei Chen, Yibo Yan, Siru Zhong, Kun Wang, Qingsong Wen, and Yuxuan Liang. 2024. UrbanVLP: A Multi-Granularity Vision-Language Pre-Trained Foundation Model for Urban Indicator Prediction. arXiv preprint arXiv:2403.16831 (2024). +[41] Ce Hou, Fan Zhang, Yong Li, Haifeng Li, Gengchen Mai, Yuhao Kang, Ling Yao, Wenhao Yu, Yao Yao, Song Gao, et al. 2025. Urban sensing in the era of large language models. The Innovation 6, 1 (2025). +[42] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685 (2021). +[43] Jiaxi Hu, Disen Lan, Ziyu Zhou, Qingsong Wen, and Yuxuan Liang. 2024. TimeSSM: Simplifying and Unifying State Space Models for Time Series Forecasting. arXiv preprint arXiv:2405.16312 (2024). +[44] Yiheng Huang, Xiaowei Mao, Shengnan Guo, Yubin Chen, Junfeng Shen, Tiankuo Li, Youfang Lin, and Huaiyu Wan. 2024. STD-PLM: Understanding Both Spatial and Temporal Properties of Spatial-Temporal Data with PLM. arXiv preprint arXiv:2407.09096 (2024). +[45] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. 2024. Gpt-40 system card. arXiv preprint arXiv:2410.21276 (2024). +[46] Junzhong Ji, Fan Yu, and Minglong Lei. 2022. Self-Supervised Spatiotemporal Graph Neural Networks With Self-Distillation for Traffic Prediction. IEEE TITS + +(2022). +[47] Yue Jiang, Qin Chao, Yile Chen, Xiucheng Li, Shuai Liu, and Gao Cong. 2024. UrbanLLM: Autonomous Urban Activity Planning and Management with Large Language Models. arXiv preprint arXiv:2406.12360 (2024). +[48] Yushan Jiang, Zijie Pan, Xikun Zhang, Sahil Garg, Anderson Schneider, Yuriy Nevmyvaka, and Dongjin Song. 2024. Empowering Time Series Analysis with Large Language Models: A Survey. In Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence, IfCAI-24, Kate Larson (Ed.). International Joint Conferences on Artificial Intelligence Organization, 8095-8103. https://doi.org/10.24963/ijcai.2024/895 Survey Track. +[49] Yushan Jiang, Wenzhao Yu, Geon Lee, Dongjin Song, Kijung Shin, Wei Cheng, Yanchi Liu, and Haifeng Chen. 2026. Explanable Multi-modal Time Series Prediction with LLM-in-the-Loop. arXiv preprint arXiv:2503.01013 (2026). +[50] WANG JIAWEI, Renhe Jiang, Chuang Yang, Zengqing Wu, Ryosuke Shibasaki, Noboru Koshizuka, Chuan Xiao, et al. 2024. Large language models as urban residents: An llm agent framework for personal mobility generation. Advances in Neural Information Processing Systems 37 (2024), 124547-124574. +[51] Guangyin Jin, Yuxuan Liang, Yuchen Fang, Zezhi Shao, Jincai Huang, Junbo Zhang, and Yu Zheng. 2023. Spatio-temporal graph neural networks for predictive learning in urban computing: A survey. IEEE Transactions on Knowledge and Data Engineering (2023). +[52] Ming Jin, Huan Yee Koh, Qingsong Wen, Daniele Zambon, Cesare Alippi, Geoffrey I Webb, Irwin King, and Shirui Pan. 2024. A survey on graph neural networks for time series: Forecasting, classification, imputation, and anomaly detection. IEEE Transactions on Pattern Analysis and Machine Intelligence (2024). +[53] Ming Jin, Shiyu Wang, Lintao Ma, Zhixuan Chu, James Y Zhang, Xiaoming Shi, Pin-Yu Chen, Yuxuan Liang, Yuan-Fang Li, Shirui Pan, et al. 2023. Time-LLM: Time series forecasting by reprogramming large language models. arXiv preprint arXiv:2310.01728 (2023). +[54] Ming Jin, Qingsong Wen, Yuxuan Liang, Chaoli Zhang, Siqiao Xue, Xue Wang, James Zhang, Yi Wang, Haifeng Chen, Xiaoli Li, et al. 2023. Large models for time series and spatio-temporal data: A survey and outlook. arXiv preprint arXiv:2310.10196 (2023). +[55] Ye Jin, Xiaoxi Shen, Huiling Peng, Xiaohan Liu, Jingli Qin, Jiayang Li, Jintao Xie, Peizhong Gao, Guyue Zhou, and Jiangtao Gong. 2023. Surrealdriver: Designing generative driver agent simulation framework in urban contexts based on large language model. arXiv preprint arXiv:2309.13193 (2023). +[56] Chenlu Ju, Jiaxin Liu, Shobhit Sinha, Hao Xue, and Flora Salim. 2025. TrajLLM: A Modular LLM-Enhanced Agent-Based Framework for Realistic Human Trajectory Simulation. (2025). +[57] Subbarao Kambhampati, Karthik Valmeekam, Lin Guan, Mudit Verma, Kaya Stechly, Siddhant Bhambri, Lucas Saldyt, and Anil Murthy. 2024. LLMs can't plan, but can help planning in LLM-modulo frameworks. arXiv preprint arXiv:2402.01817 (2024). +[58] Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of naacL-HLT, Vol. 1. Minneapolis, Minnesota. +[59] Dmitrii Kochkov, Janni Yuval, Ian Langmore, Peter Norgaard, Jamie Smith, Griffin Mooers, Milan Klower, James Lottes, Stephan Rasp, Peter Duben, et al. 2024. Neural general circulation models for weather and climate. Nature 632, 8027 (2024), 1060–1066. +[60] Alexandre Lacoste, Nils Lehmann, Pau Rodriguez, Evan Sherwin, Hannah Kerner, Björn Lütjens, Jeremy Irvin, David Dao, Hamed Alemohammad, Alexandre Drouin, et al. 2024. Geo-bench: Toward foundation models for earth monitoring. Advances in Neural Information Processing Systems 36 (2024). +[61] Siqi Lai, Zhao Xu, Weijia Zhang, Hao Liu, and Hui Xiong. 2025. Large language models as traffic signal control agents: Capacity and opportunity. In Proceedings of the 31st ACM SIGKDD conference on knowledge discovery and data mining. +[62] Remi Lam, Alvaro Sanchez-Gonzalez, Matthew Willson, Peter Wirsnsberger, Meire Fortunato, Ferran Alet, Suman Ravuri, Timo Ewalds, Zach Eaton-Rosen, Weihua Hu, et al. 2023. GraphCast: Learning skillful medium-range global weather forecasting. Science 382, 6677 (2023), 1416-1421. +[63] Geon Lee, Wenchao Yu, Kijung Shin, Wei Cheng, and Haifeng Chen. 2025. TimeCAP: Learning to Contextualize, Augment, and Predict Time Series Events with Large Language Model Agents. arXiv preprint arXiv:2502.11418 (2025). +[64] Mingcong Lei, Yiming Zhao, Ge Wang, Zhixin Mai, Shuguang Cui, Yatong Han, and Jinke Ren. 2025. STMA: A Spatio-Temporal Memory Agent for Long-Horizon Embodied Task Planning. arXiv preprint arXiv:2502.10177 (2025). +[65] Zhenyu Lei, Yushun Dong, Weiyu Li, Rong Ding, Qi Wang, and Jundong Li. 2025. Harnessing Large Language Models for Disaster Management: A Survey. arXiv preprint arXiv:2501.06932 (2025). +[66] Jiangtong Li, Li Niu, and Liqing Zhang. 2022. From representation to reasoning: Towards both evidence and commonsense reasoning for video question-answering. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 21273–21282. +[67] Jinpeng Li, Haiping Wang, Yuan Liu, Zhiyang Dou, Yuexin Ma, Sibei Yang, Yuan Li, Wenping Wang, Zhen Dong, Bisheng Yang, et al. [n.d.]. CityAnchor: City-scale 3D Visual Grounding with Multi-modality LLMs. In The Thirteenth + +International Conference on Learning Representations. +[68] Peibo Li, Maarten de Rijke, Hao Xue, Shuang Ao, Yang Song, and Flora D Salim. 2024. Large language models for next point-of-interest recommendation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1463-1472. +[69] Wenbin Li, Di Yao, Ruibo Zhao, Wenjie Chen, Zijie Xu, Chengxue Luo, Chang Gong, Quanliang Jing, Haining Tan, and Jingping Bi. 2024. STBench: Assessing the ability of large language models in spatio-temporal analysis. arXiv preprint arXiv:2406.19065 (2024). +[70] Yaguang Li, Rose Yu, Cyrus Shahabi, and Yan Liu. 2017. Diffusion convolutional recurrent neural network: Data-driven traffic forecasting. arXiv preprint arXiv:1707.01926 (2017). +[71] Zekun Li, Jina Kim, Yao-Yi Chiang, and Muhao Chen. 2022. SpaBERT: A pretrained language model from geographic data for geo-entity representation. arXiv preprint arXiv:2210.12213 (2022). +[72] Zhonghang Li, Long Xia, Lei Shi, Yong Xu, Dawei Yin, and Chao Huang. 2024. Opencity: Open spatio-temporal foundation models for traffic prediction. arXiv preprint arXiv:2408.10269 (2024). +[73] Zhonghang Li, Lianghao Xia, Jiabin Tang, Yong Xu, Lei Shi, Long Xia, Dawei Yin, and Chao Huang. 2024. Urbangpt: Spatio-temporal large language models. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 5351-5362. +[74] Zhonghang Li, Lianghao Xia, Yong Xu, and Chao Huang. 2024. GPT-ST: generative pre-training of spatio-temporal graph neural networks. Advances in Neural Information Processing Systems 36 (2024). +[75] Zongrong Li, Junhao Xu, Siqin Wang, Yifan Wu, and Haiyang Li. 2024. StreetviewLLM: Extracting Geographic Information Using a Chain-of-Thought Multimodal Large Language Model. arXiv preprint arXiv:2411.14476 (2024). +[76] Zhe Li, Ronghui Xu, Jilin Hu, Zhong Peng, Xi Lu, Chenjuan Guo, and Bin Yang. 2024. Ocean Significant Wave Height Estimation with Spatio-temporally Aware Large Language Models. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 3892-3896. +[77] Zekun Li, Wenxuan Zhou, Yao-Yi Chiang, and Muhao Chen. 2023. Geolm: Empowering language models for geospatially grounded language understanding. arXiv preprint arXiv:2310.14478 (2023). +[78] Yuxuan Liang, Songyu Ke, Junbo Zhang, Xiwen Yi, and Yu Zheng. 2018. Geom: Multi-level attention networks for geo-sensory time series prediction.. In ICAI, Vol. 2018. 3428-3434. +[79] Yuebing Liang, Yichao Liu, Xiaohan Wang, and Zhan Zhao. 2023. Exploring large language models for human mobility prediction under public events. arXiv preprint arXiv:2311.17351 (2023). +[80] Yuxuan Liang, Kun Ouyang, Yiwei Wang, Zheyi Pan, Yifang Yin, Hongyang Chen, Junbo Zhang, Yu Zheng, David S Rosenblum, and Roger Zimmermann. 2022. Mixed-Order Relation-Aware Recurrent Neural Networks for Spatio-Temporal Forecasting. IEEE TKDE (2022). +[81] Yuxuan Liang, Haomin Wen, Yuqi Nie, Yushan Jiang, Ming Jin, Dongjin Song, Shirui Pan, and Qingsong Wen. 2024. Foundation models for time series analysis: A tutorial and survey. In Proceedings of the 30th ACM SIGKDD conference on knowledge discovery and data mining. 6555-6565. +[82] Yuxuan Liang, Yutong Xia, Songyu Ke, Yiwei Wang, Qingsong Wen, Junbo Zhang, Yu Zheng, and Roger Zimmermann. 2023. Airformer: Predicting nationwide air quality in china with transformers. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 37. 14329-14337. +[83] Jaesung Lim, Seunghwan An, Gyeongdong Woo, ChangHyun Kim, and Jong-June Jeon. [n.d.]. Context-Driven Missing Data Imputation via Large Language Model. ([n.d.]). +[84] Yan Lin, Yichen Liu, Zeyu Zhou, Haomin Wen, Erwen Zheng, Shengnan Guo, Youfang Lin, and Huaiyu Wan. 2024. PTraJM: Efficient and Semantic-rich Trajectory Learning with Pretrained Trajectory-Mamba. arXiv preprint arXiv:2408.04916 (2024). +[85] Yan Lin, Tonglong Wei, Zeyu Zhou, Haomin Wen, Jilin Hu, Shengnan Guo, Youfang Lin, and Huaiyu Wan. 2024. TrajFM: A Vehicle Trajectory Foundation Model for Region and Task Transferability. arXiv preprint arXiv:2408.15251 (2024). +[86] Chenxi Liu, Sun Yang, Qianxiong Xu, Zhishuai Li, Cheng Long, Ziyue Li, and Rui Zhao. 2024. Spatial-temporal large language model for traffic prediction. arXiv preprint arXiv:2401.10134 (2024). +[87] Haoxin Liu, Zhiyuan Zhao, Jindong Wang, Harshavardhan Kamarthi, and B Aditya Prakash. 2024. Lstprompt: Large language models as zero-shot time series forecasters by long-short-term prompting. arXiv preprint arXiv:2402.16132 (2024). +[88] Jiaqi Liu, Peng Hang, Xiaocong Zhao, Jianqiang Wang, and Jian Sun. 2024. DDM-lag: A diffusion-based decision-making model for autonomous vehicles with lagrangian safety enhancement. IEEE Transactions on Artificial Intelligence (2024). +[89] Jun Liu, Chaoyun Zhang, Jiaxu Qian, Minghua Ma, Si Qin, Chetan Bansal, Qingwei Lin, Saravanan Rajmohan, and Dongmei Zhang. 2024. Large language models can deliver accurate and interpretable time series anomaly detection. + +arXiv preprint arXiv:2405.15370 (2024). +[90] Lei Liu, Shuo Yu, Runze Wang, Zhenxun Ma, and Yanming Shen. 2024. How can large language models understand spatial-temporal data? arXiv preprint arXiv:2401.14192 (2024). +[91] Xu Liu, Junfeng Hu, Yuan Li, Shizhe Diao, Yuxuan Liang, Bryan Hooi, and Roger Zimmermann. 2024. Unitime: A language-empowered unified model for cross-domain time series forecasting. In Proceedings of the ACM Web Conference 2024. 4095-4106. +[92] Xu Liu, Yuxuan Liang, Chao Huang, Yu Zheng, Bryan Hooi, and Roger Zimmermann. 2022. When do contrastive learning signals help spatio-temporal graph forecasting? In SIGSPATIAL. 1-12. +[93] Xu Liu, Juncheng Liu, Gerald Woo, Taha Aksu, Yuxuan Liang, Roger Zimmermann, Chenghao Liu, Silvio Savarese, Caiming Xiong, and Doyen Sahoo. 2024. Moirai-MoE: Empowering Time Series Foundation Models with Sparse Mixture of Experts. arXiv preprint arXiv:2410.10469 (2024). +[94] Yu Liu, Jingtao Ding, Yanjie Fu, and Yong Li. 2023. Urban knowledge graph system. ACM Transactions on Intelligent Systems and Technology 14, 4 (2023), 1-25. +[95] Lin Long, Rui Wang, Ruixuan Xiao, Junbo Zhao, Xiao Ding, Gang Chen, and Haobo Wang. 2024. On llms-driven synthetic data generation, curation, and evaluation: A survey. arXiv preprint arXiv:2406.15126 (2024). +[96] Qingyue Long, Yuan Yuan, and Yong Li. 2024. A Universal Model for Human Mobility Prediction. arXiv preprint arXiv:2412.15294 (2024). +[97] Haoyu Ma, Yushu Chen, Wenlai Zhao, Jinzhe Yang, Yingsheng Ji, Xinghua Xu, Xiaozhu Liu, Hao Jing, Shengzhuo Liu, and Guangwen Yang. 2024. A Mamba Foundation Model for Time Series Forecasting. arXiv preprint arXiv:2411.02941 (2024). +[98] Xin Man, Chenghong Zhang, Changyu Li, and Jie Shao. 2023. W-MAE: Pretrained weather model with masked autoencoder for multi-variable weather forecasting. arXiv preprint arXiv:2304.08754 (2023). +[99] Rohin Manvi, Samar Khanna, Gengchen Mai, Marshall Burke, David Lobell, and Stefano Ermon. 2023. Geolm: Extracting geospatial knowledge from large language models. arXiv preprint arXiv:2310.06213 (2023). +[100] Rohin Manvi, Samar Khanna, Gengchen Mai, Marshall Burke, David B Lobell, and Stefano Ermon. 2024. GeoLLM: Extracting Geospatial Knowledge from Large Language Models. In The Twelfth International Conference on Learning Representations. +[101] Justin M Mittelstädt, Julia Maier, Panja Goerke, Frank Zinn, and Michael Hermes. 2024. Large language models can outperform humans in social situational judgments. Scientific Reports 14, 1 (2024), 27449. +[102] Seungwhan Moon, Andrea Madotto, Zhaojiang Lin, Aparajita Saraf, Amy Bearman, and Babak Damavandi. 2023. IMU2CLIP: Language-grounded Motion Sensor Translation with Multimodal Contrastive Learning. In Findings of the Association for Computational Linguistics: EMNLP 2023. 13246-13253. +[103] Alameen Najjar. 2023. Towards A Foundation Model For Trajectory Intelligence. In IEEE ICDMW. IEEE, 832-835. +[104] Tung Nguyen, Johannes Brandstetter, Ashish Kapoor, Jayesh K Gupta, and Aditya Grover. 2023. Climax: A foundation model for weather and climate. International Conference on Machine Learning (2023). +[105] Yansong Ning and Hao Liu. 2024. UrbanKGent: A Unified Large Language Model Agent Framework for Urban Knowledge Graph Construction. arXiv preprint arXiv:2402.06861 (2024). +[106] Yansong Ning, Hao Liu, Hao Wang, Zhenyu Zeng, and Hui Xiong. 2024. UUKG: unified urban knowledge graph dataset for urban spatiotemporal prediction. Advances in Neural Information Processing Systems 36 (2024). +[107] Joon Sung Park, Joseph O'Brien, Carrie Jun Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. 2023. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology. 1-22. +[108] Jaideep Pathak, Shashank Subramanian, Peter Harrington, Sanjeev Raja, Ashesh Chattopadhyay, Morteza Mardani, Thorsten Kurth, David Hall, Zongyi Li, Kamyar Azizzadenesheli, et al. 2022. Fourcastnet: A global data-driven high-resolution weather model using adaptive fourier neural operators. arXiv preprint arXiv:2202.11214 (2022). +[109] Jinghua Piao, Zhihong Lu, Chen Gao, Fengli Xu, Fernando P Santos, Yong Li, and James Evans. 2025. Emergence of human-like polarization among large language model agents. arXiv preprint arXiv:2501.05171 (2025). +[110] Arian Prabowo, Wei Shao, Hao Xue, Piotr Koniusz, and Flora D Salim. 2023. Because every sensor is unique, so is every pair: Handling dynamicity in traffic forecasting. In Proceedings of the 8th ACM/IEEE Conference on Internet of Things Design and Implementation. 93-104. +[111] Arian Prabowo, Hao Xue, Wei Shao, Piotr Koniusz, and Flora D Salim. 2024. Traffic forecasting on new roads using spatial contrastive pre-training (SCPT). Data Mining and Knowledge Discovery 38, 3 (2024), 913-937. +[112] Siyuan Qi, Shuo Chen, Yexin Li, Xiangyu Kong, Junqi Wang, Bangcheng Yang, Pring Wong, Yifan Zhong, Xiaoyuan Zhang, Zhaowei Zhang, et al. 2024. CivRealm: A Learning and Reasoning Odyssey in Civilization for Decision-Making Agents. In The Twelfth International Conference on Learning Representations. + +[113] Kyle K Qin, Yongli Ren, Wei Shao, Brennan Lake, Filippo Privitera, and Flora D Salim. 2023. Multiple-level point embedding for solving human trajectory imputation with prediction. ACM Transactions on Spatial Algorithms and Systems 9, 2 (2023), 1-22. +[114] Hao Sha, Yao Mu, Yuxuan Jiang, Li Chen, Chenfeng Xu, Ping Luo, Shengbo Eben Li, Masayoshi Tomizuka, Wei Zhan, and Mingyu Ding. 2023. *Languagempc: Large language models as decision makers for autonomous driving.* arXiv preprint arXiv:2310.03026 (2023). +[115] Jie-Jing Shao, Xiao-Wen Yang, Bo-Wen Zhang, Baizhi Chen, Wen-Da Wei, Lan-Zhe Guo, and Yu-feng Li. 2024. ChinaTravel: A Real-World Benchmark for Language Agents in Chinese Travel Planning. arXiv preprint arXiv:2412.13682 (2024). +[116] Wei Shao, Zhiling Jin, Shuo Wang, Yufan Kang, Xiao Xiao, Hamid Menouar, Zhaofeng Zhang, Junshan Zhang, and Flora Salim. 2022. Long-term spatiotemporal forecasting via dynamic multiple-graph attention. In Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence, JFCAI-22. +[117] Zezhi Shao, Zhao Zhang, Fei Wang, and Yongjun Xu. 2022. Pre-training enhanced spatial-temporal graph neural network for multivariate time series forecasting. In Proceedings of the 28th ACM SIGKDD conference on knowledge discovery and data mining. 1567-1577. +[118] Qichao Shentu, Beibu Li, Kai Zhao, Yang Shu, Zhongwen Rao, Lujia Pan, Bin Yang, and Chenjuan Guo. 2024. Towards a General Time Series Anomaly Detector with Adaptive Bottlenecks and Dual Adversarial Decoders. arXiv preprint arXiv:2405.15273 (2024). +[119] Xiaoming Shi, Shiyu Wang, Yuqi Nie, Dianqi Li, Zhou Ye, Qingsong Wen, and Ming Jin. 2025. Time-MoE: Billion-Scale Time Series Foundation Models with Mixture of Experts. In The Thirteenth International Conference on Learning Representations (ICLR). +[120] Xiaoming Shi, Sqiao Xue, Kangrui Wang, Fan Zhou, James Zhang, Jun Zhou, Chenhao Tan, and Hongyuan Mei. 2023. Language models can improve event prediction by few-shot abductive reasoning. Advances in Neural Information Processing Systems 36 (2023), 29532-29557. +[121] Ethan Steinberg, Jason Fries, Yizhe Xu, and Nigam Shah. 2023. MOTOR: A Time-To-Event Foundation Model For Structured Medical Records. arXiv preprint arXiv:2301.03150 (2023). +[122] Mingtian Tan, Mike A Merrill, Vinayak Gupta, Tim Althoff, and Thomas Hartvigsen. 2024. Are language models actually useful for time series forecasting?. In The Thirty-eighth Annual Conference on Neural Information Processing Systems. +[123] Yihong Tang, Zhaokai Wang, Ao Qu, Yihao Yan, Zhaofeng Wu, Dingyi Zhuang, Jushi Kai, Kebing Hou, Xiaotong Guo, Jinhua Zhao, et al. 2024. ITINERA: Integrating Spatial Optimization with Large Language Models for Open-domain Urban Itinerary Planning. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track. 1413-1432. +[124] Nicolas Tempelmeier, Simon Gottschalk, and Elena Demidova. 2021. GeoVectors: a linked open corpus of OpenStreetMap Embeddings on world scale. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management. 4604-4612. +[125] Saeid Ashraf Vaghefi, Dominik Stammbach, Veruska Muccione, Julia Bingler, Jingwei Ni, Mathias Kraus, Simon Allen, Chiara Colesanti-Senni, Tobias Wekhof, Tobias Schimanski, et al. 2023. ChatClimate: Grounding conversational AI in climate science. Communications Earth & Environment 4, 1 (2023), 480. +[126] Jiawei Wang, Renhe Jiang, Chuang Yang, Zengqing Wu, Makoto Onizuka, Ryosuke Shibasaki, Noboru Koshizuka, and Chuan Xiao. 2024. Large language models as urban residents: An llm agent framework for personal mobility generation. Advances in Neural Information Processing Systems (2024). +[127] Junyang Wang, Haiyang Xu, Jiabo Ye, Ming Yan, Weizhou Shen, Ji Zhang, Fei Huang, and Jitao Sang. 2024. Mobile-agent: Autonomous multi-modal mobile device agent with visual perception. arXiv preprint arXiv:2401.16158 (2024). +[128] Kun Wang, Hao Wu, Yifan Duan, Guibin Zhang, Kai Wang, Xiaojiang Peng, Yu Zheng, Yuxuan Liang, and Yang Wang. 2024. NuwaDynamics: Discovering and Updating in Causal Spatio-Temporal Modeling. In The Twelfth International Conference on Learning Representations. +[129] Senzhang Wang, Jiannong Cao, and Philip Yu. 2020. Deep learning for spatiotemporal data mining: A survey. IEEE TKDE (2020). +[130] Xuhong Wang, Ding Wang, Liang Chen, Fei-Yue Wang, and Yilun Lin. 2023. Building transportation foundation model via generative graph transformer. In 2023 IEEE 26th International Conference on Intelligent Transportation Systems (ITSC). IEEE, 6042-6047. +[131] Yihang Wang, Yuying Qiu, Peng Chen, Kai Zhao, Yang Shu, Zhongwen Rao, Lujia Pan, Bin Yang, and Chenjuan Guo. 2024. ROSE: Register Assisted General Time Series Forecasting with Decomposed Frequency Learning. CoRR abs/2405.17478 (2024). +[132] Yu Wang, Tongya Zheng, Shunyu Liu, Zunlei Feng, Kaixuan Chen, Yunzhi Hao, and Mingli Song. 2024. Spatiotemporal-Augmented Graph Neural Networks for Human Mobility Simulation. IEEE Transactions on Knowledge and Data Engineering (2024). + +[133] Zihao Wang, Shaofei Cai, Guanzhou Chen, Anji Liu, Xiaojian Ma, Yitao Liang, and Team CraftJarvis. 2023. Describe, explain, plan and select: interactive planning with large language models enables open-world multi-task agents. In Proceedings of the 37th International Conference on Neural Information Processing Systems. 34153-34189. +[134] Zhaonan Wang, Renhe Jiang, Hao Xue, Flora D Salim, Xuan Song, and Ryosuke Shibasaki. 2022. Event-aware multimodal mobility nowcasting. In AAAI, Vol. 36. 4228-4236. +[135] Tonglong Wei, Yan Lin, Youfang Lin, Shengnan Guo, Jilin Hu, Gao Cong, and Huaiyu Wan. 2024. PTR: A Pre-trained Language Model for Trajectory Recovery. arXiv preprint arXiv:2410.14281 (2024). +[136] Haomin Wen, Youfang Lin, Yutong Xia, Huaiyu Wan, Qingsong Wen, Roger Zimmermann, and Yuxuan Liang. 2023. Diffstg: Probabilistic spatio-temporal graph forecasting with denoising diffusion models. In ACM SIGSPATIAL. 1-12. +[137] Gerald Woo, Chenghao Liu, Akshit Kumar, Caiming Xiong, Silvio Savarese, and Doyen Sahoo. 2024. Unified training of universal time series forecasting transformers. (2024). +[138] Lixia Wu, Jianlin Liu, Junhong Lou, Minhui Deng, Jianbin Zheng, Haomin Wen, Chao Song, and Shu He. 2024. G2PTL: A Geography-Graph Pre-trained Model. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 4991-4999. +[139] Wansen Wu, Weiyi Yang, Juanjuan Li, Yong Zhao, Zhengqiu Zhu, Bin Chen, Sihang Qiu, Yong Peng, and Fei-Yue Wang. 2024. Autonomous crowdsensing: operating and organizing crowdsensing for sensing automation. IEEE Transactions on Intelligent Vehicles (2024). +[140] Zonghan Wu, Shirui Pan, Guodong Long, Jing Jiang, Xiaojun Chang, and Chengqi Zhang. 2020. Connecting the dots: Multivariate time series forecasting with graph neural networks. In SIGKDD. 753-763. +[141] Zonghan Wu, Shirui Pan, Guodong Long, Jing Jiang, and Chengqi Zhang. 2019. Graph wavenet for deep spatial-temporal graph modeling. arXiv preprint arXiv:1906.00121 (2019). +[142] Congxi Xiao, Jingbo Zhou, Yixiong Xiao, Jizhou Huang, and Hui Xiong. 2024. ReFound: Crafting a Foundation Model for Urban Region Understanding upon Language and Visual Foundations. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 3527-3538. +[143] Mengxi Xiao, Zihao Jiang, Lingfei Qian, Zhengyu Chen, Yueru He, Yijing Xu, Yuecheng Jiang, Dong Li, Ruey-Ling Weng, Min Peng, et al. 2025. Retrievalaugmented Large Language Models for Financial Time Series Forecasting. arXiv preprint arXiv:2502.05878 (2025). +[144] Fengli Xu, Jun Zhang, Chen Gao, Jie Feng, and Yong Li. 2023. Urban generative intelligence (ugi): A foundational platform for agents in embodied city environment. arXiv preprint arXiv:2312.11813 (2023). +[145] Jiehui Xu, Haixu Wu, Jianmin Wang, and Mingsheng Long. 2022. Anomaly Transformer: Time Series Anomaly Detection with Association Discrepancy. In International Conference on Learning Representations. +[146] Mingxing Xu, Wenrui Dai, Chunmiao Liu, Xing Gao, Weiyao Lin, Guo-Jun Qi, and Hongkai Xiong. 2020. Spatial-temporal transformer networks for traffic flow forecasting. arXiv preprint arXiv:2001.02908 (2020). +[147] Hao Xue and Flora D Salim. 2023. Promptcast: A new prompt-based learning paradigm for time series forecasting. IEEE Transactions on Knowledge and Data Engineering 36, 11 (2023), 6851-6864. +[148] Hao Xue, Tianye Tang, Ali Payani, and Flora D Salim. 2024. Prompt Mining for Language Models-based Mobility Flow Forecasting. In Proceedings of the 32nd ACM International Conference on Advances in Geographic Information Systems. +[149] Hao Xue, Bhanu Prakash Voutharoja, and Flora D Salim. 2022. Leveraging language foundation models for human mobility forecasting. In Proceedings of the 30th International Conference on Advances in Geographic Information Systems. +[150] Yibo Yan, Haomin Wen, Siru Zhong, Wei Chen, Haodong Chen, Qingsong Wen, Roger Zimmermann, and Yuxuan Liang. 2024. Urbanclip: Learning text-enhanced urban region profiling with contrastive language-image pretraining from the web. In Proceedings of the ACM on Web Conference 2024. 4006-4017. +[151] Yuwei Yan, Qingbin Zeng, Zhiheng Zheng, Jingzhe Yuan, Jie Feng, Jun Zhang, Fengli Xu, and Yong Li. 2024. OpenCity: A Scalable Platform to Simulate Urban Activities with Massive LLM Agents. arXiv preprint arXiv:2410.21286 (2024). +[152] Jianwei Yang, Reuben Tan, Qianhui Wu, Ruijie Zheng, Baolin Peng, Yongyuan Liang, Yu Gu, Mu Cai, Seonghyeon Ye, Joel Jang, et al. 2025. Magma: A Foundation Model for Multimodal AI Agents. arXiv preprint arXiv:2502.13130 (2025). +[153] Jihan Yang, Shusheng Yang, Anjali W Gupta, Rilyn Han, Li Fei-Fei, and Saining Xie. 2024. Thinking in space: How multimodal large language models see, remember, and recall spaces. arXiv preprint arXiv:2412.14171 (2024). +[154] Kairui Yang, Zihao Guo, Gengjie Lin, Haotian Dong, Zhao Huang, Yipeng Wu, Die Zuo, Jibin Peng, Ziyuan Zhong, Xin WANG, Qing Guo, Xiaosong Jia, Junchi Yan, and Di Lin. 2025. Trajectory-LLM: A Language-based Data Generator for Trajectory Prediction in Autonomous Driving. In ICLR. +[155] Silin Yang, Dong Wang, Haoqi Zheng, and Ruochun Jin. 2024. TimeRAG: BOOSTING LLM Time Series Forecasting via Retrieval-Augmented Generation. arXiv preprint arXiv:2412.16643 (2024). +[156] Tiankai Yang, Yi Nian, Shawn Li, Ruiyao Xu, Yuangang Li, Jiaqi Li, Zhuo Xiao, Xiyang Hu, Ryan Rossi, Kaize Ding, et al. 2024. Ad-llm: Benchmarking large + +language models for anomaly detection. arXiv preprint arXiv:2412.11142 (2024). +[157] Xinli Yu, Zheng Chen, Yuan Ling, Shujing Dong, Zongyi Liu, and Yanbin Lu. 2023. Temporal data meets LLM-explainable financial time series forecasting. arXiv preprint arXiv:2306.11025 (2023). +[158] Yuan Yuan, Jingtao Ding, Jie Feng, Depeng Jin, and Yong Li. 2024. Unist: A prompt-empowered universal model for urban spatio-temporal prediction. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4095-4106. +[159] Yuan Yuan, Jingtao Ding, Chonghua Han, Depeng Jin, and Yong Li. 2024. A Foundation Model for Unified Urban Spatio-Temporal Flow Prediction. arXiv preprint arXiv:2411.12972 (2024). +[160] Yuan Yuan, Chonghua Han, Jingtao Ding, Depeng Jin, and Yong Li. 2024. Urbanfit: A foundation model for open-world urban spatio-temporal learning. arXiv preprint arXiv:2411.12164 (2024). +[161] Yuan Yuan, Chenyang Shao, Jingtao Ding, Depeng Jin, and Yong Li. 2024. Spatiotemporal few-shot learning via diffusive neural network generation. In The Twelfth International Conference on Learning Representations. +[162] Ye Yuan, Yong Zhang, Boyue Wang, Yuan Peng, Yongli Hu, and Baocai Yin. 2022. STGAN: Spatio-temporal generative adversarial network for traffic data imputation. IEEE Transactions on Big Data 9, 1 (2022), 200-211. +[163] Zhenghang Yuan, Zhitong Xiong, Lichao Mou, and Xiao Xiang Zhu. 2024. Chatearthnet: A global-scale, high-quality image-text dataset for remote sensing. arXiv preprint arXiv:2402.11325 (2024). +[164] Kunpeng Zhang, Feng Zhou, Lan Wu, Na Xie, and Zhengbing He. 2024. Semantic understanding and prompt engineering for large-scale traffic data imputation. Information Fusion 102 (2024), 102038. +[165] Libo Zhang and Yue Ning. 2024. Large Language Models as Event Forecasters. arXiv preprint arXiv:2406.10492 (2024). +[166] Qianru Zhang, Xubin Ren, Lianghao Xia, Siu Ming Yiu, and Chao Huang. 2024. Spatio-Temporal Graph Learning With Large Language Model. https://openreview.net/forum?id=QUKcfq6GX +[167] Qianru Zhang, Haixin Wang, Cheng Long, Liangcai Su, Xingwei He, Jianlong Chang, Tailin Wu, Hongzhi Yin, Siu-Ming Yiu, Qi Tian, et al. 2024. A Survey of Generative Techniques for Spatial-Temporal Data Mining. arXiv preprint arXiv:2405.09592 (2024). +[168] Siyao Zhang, Daocheng Fu, Wenzhe Liang, Zhao Zhang, Bin Yu, Pinlong Cai, and Baozhen Yao. 2024. Trafficcpt: Viewing, processing and interacting with traffic foundation models. Transport Policy 150 (2024), 95-105. +[169] Weijia Zhang, Jindong Han, Zhao Xu, Hang Ni, Hao Liu, and Hui Xiong. 2024. Urban Foundation Models: A Survey. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 6633-6643. +[170] Xin Zhang, Tianjian Ouyang, Yu Shang, Qingmin Liao, and Yong Li. [n.d.]. UrbanMLLM: Joint Learning of Cross-view Imagery for Urban Understanding. ([n.d.]). +[171] Yu Zhang, Weiming Huang, Yao Yao, Song Gao, Lizhen Cui, and Zhongmin Yan. 2024. Urban region representation learning with human trajectories: a multiview approach incorporating transition, spatial, and temporal perspectives. GIScience & Remote Sensing 61, 1 (2024), 2387392. +[172] Yimei Zhang, Xiangjie Kong, Wenfeng Zhou, Jin Liu, Yanjie Fu, and Guojiang Shen. 2024. A comprehensive survey on traffic missing data imputation. IEEE Transactions on Intelligent Transportation Systems (2024). +[173] Yunxiang Zhang and Xiaojun Wan. 2024. SITUATEDGEN: incorporating geographical and temporal contexts into generative commonsense reasoning. Advances in Neural Information Processing Systems 36 (2024). +[174] Yifan Zhang, Cheng Wei, Shangyou Wu, Zhengting He, and Wenhao Yu. 2023. GeoGPT: understanding and processing geospatial tasks through an autonomous GPT. arXiv preprint arXiv:2307.07930 (2023). +[175] Zeyang Zhang, Xin Wang, Ziwei Zhang, Haoyang Li, Yijian Qin, and Wenwu Zhu. 2024. LLM4DyG: can large language models solve spatial-temporal problems on dynamic graphs? In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4350-4361. +[176] Yu Zhao, Pan Deng, Junting Liu, Xiaofeng Jia, and Jianwei Zhang. 2023. Generative Causal Interpretation Model for Spatio-Temporal Representation Learning. In Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 3537-3548. +[177] Chuanpan Zheng, Xiaoliang Fan, Cheng Wang, and Jianzhong Qi. 2020. Gman: A graph multi-attention network for traffic prediction. In AAAI, Vol. 34. 1234–1241. +[178] Yu Zheng, Licia Capra, Ouri Wolfson, and Hai Yang. 2014. Urban computing: concepts, methodologies, and applications. ACM TIST 5, 3 (2014), 1-55. +[179] Siru Zhong, Xixuan Hao, Yibo Yan, Ying Zhang, Yangqiu Song, and Yuxuan Liang. 2024. Urbancross: Enhancing satellite image-text retrieval with cross-domain adaptation. In Proceedings of the 32nd ACM International Conference on Multimedia. 6307-6315. +[180] Siru Zhong, Weilin Ruan, Ming Jin, Huan Li, Qingsong Wen, and Yuxuan Liang. 2025. Time-VLM: Exploring Multimodal Vision-Language Models for Augmented Time Series Forecasting. arXiv preprint arXiv:2502.04395 (2025). + +[181] Gengze Zhou, Yicong Hong, Zun Wang, Xin Eric Wang, and Qi Wu. 2024. Navigpt-2: Unleashing navigational reasoning capability for large vision-language models. In European Conference on Computer Vision. Springer, 260-278. +[182] Gengze Zhou, Yicong Hong, and Qi Wu. 2024. Navigpt: Explicit reasoning in vision-and-language navigation with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 7641-7649. +[183] Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, and Rong Jin. 2023. One Fits All: Power General Time Series Analysis by Pretrained LM. Advances in Neural Information Processing Systems (2023). +[184] Xingcheng Zhou, Mingyu Liu, Bare Luka Zagar, Ekim Yurtsever, and Alois C Knoll. 2023. Vision language models in autonomous driving and intelligent transportation systems. arXiv preprint arXiv:2310.14414 (2023). +[185] Zhilun Zhou, Yuming Lin, Depeng Jin, and Yong Li. 2024. Large language model for participatory urban planning. arXiv preprint arXiv:2402.17161 (2024). +[186] Zihao Zhou and Rose Yu. 2024. Can LLMs Understand Time Series Anomalies? arXiv preprint arXiv:2410.05440 (2024). +[187] Xizhou Zhu, Yuntao Chen, Hao Tian, Chenxin Tao, Weijie Su, Chenyu Yang, Gao Huang, Bin Li, Lewei Lu, Xiaogang Wang, et al. 2023. Ghost in the minecraft: Generally capable agents for open-world environments via large language models with text-based knowledge and memory. arXiv preprint arXiv:2305.17144 (2023). +[188] Yuanshao Zhu, James Jianqiao Yu, Xiangyu Zhao, Qidong Liu, Yongchao Ye, Wei Chen, Zijian Zhang, Xuetao Wei, and Yuxuan Liang. 2024. Controllraj: Controllable trajectory generation with topology-constrained diffusion model. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4676-4687. +[189] Yuanshao Zhu, James Jianqiao Yu, Xiangyu Zhao, Xuetao Wei, and Yuxuan Liang. 2024. UniTraj: Universal Human Trajectory Modeling from Billion-Scale Worldwide Traces. arXiv preprint arXiv:2411.03859 (2024). +[190] Zhengqiu Zhu, Yatai Ji, Sihang Qiu, Yong Zhao, Kai Xu, Rusheng Ju, and Bin Chen. 2024. A Prototype Design of LLM-Based Autonomous Web Crowdsensing. In International Conference on Web Engineering. Springer, 406-409. +[191] Zhengqiu Zhu, Yong Zhao, Bin Chen, Sihang Qiu, Kai Xu, Quanjun Yin, Jincai Huang, Zhong Liu, and Fei-Yue Wang. 2024. Conversational Crowdsensing: A Parallel Intelligence Powered Novel Sensing Approach. arXiv preprint arXiv:2402.06654 (2024). + +# A Limitations and Future Opportunities + +We further discuss the potential limitations of current research and identify several key future directions aimed at advancing the development of more powerful, transparent, and reliable STFMs: + +- The curse of accuracy against interpretability. We have identified a significant challenge in developing FMs for addressing numerical problems in ST data science. Directly leveraging LLMs for numerical tasks such as forecasting proves to be non-trivial [34]. Meanwhile, fine-tuning LLMs or training STFMs from scratch using large-scale, cross-domain ST data often comes at the cost of interactive capabilities, thereby hindering interpretability in the prediction outcomes. These limitations motivate us to explore a novel paradigm that not only retains strong numerical reasoning abilities but also enhances interpretability, bridging the gap between predictive accuracy and explanatory insight. +- Large foundation models are all we need? While the extensive parameterization of FMs enables impressive generalization capabilities, particularly in zero/few-shot settings, their superiority over smaller expert models remains context-dependent. In ST domains such as time series analysis [122] and urban planning [57], smaller expert models often outperform FMs when provided with sufficient domain-specific training data. This raises fundamental questions about the trade-offs between model scalability, efficiency, and task-specific optimization. Future research should delve into hybrid approaches that combine the adaptability of large models with the precision of expert models. +- One-fit-all FMs across the full workflow. While current FMs are typically designed to support only specific stages of ST data science, we envision a more unified FM capable of seamlessly + +spanning the entire workflow, from initial data sensing and management to mining and supporting downstream applications. Achieving this goal will likely require the development of advanced LLM agents that can function as full-stack engineers (i.e., strongly benefiting all stages) for ST data science. + +- Integrating STFMs with multimodal understanding. While current STFMs excel in processing structured ST data, their ability to integrate and reason over multimodal information, including text, images, video, and sensor data, remains underdeveloped. Many tasks require models to jointly interpret geospatial context, temporal dynamics, and text descriptions. Future research can focus on designing multimodal STFMs that effectively align, fuse, and reason over heterogeneous data sources, enabling more context-aware and human-interpretable decision-making. + +# B Zero-shot Utilization of LLMs + +There are three ways of directly using LLMs for various ST tasks: + +- LLM-as-Augmenter. Pretrained LLMs can enhance both data understanding and model performance. On the one hand, it can serve as the input augmenter, which enhances data interoperability or provides external information [40, 79] (e.g., textual or visual). On the other hand, LLMs can serve as a parameter-frozen model component [102, 150, 166], thus augmenting domain models by injecting the pretrained external knowledge in LLMs. +- LLM-as-Predictor. LLMs can be directly employed as predictors [33, 53, 73, 125] for various tasks. Due to the modality gap between text and ST data, preprocessing is required to fit the input spaces of LLMs. Such step typically contains prompt engineering [73, 125, 147-149] or patch & tokenization [53]. +- LLM-as-Agent. LLM-based agents are typically equipped with the ability to memorize and call various tools. When applied to ST data science, various domain-expert models can be wrapped as a tool and added into the agent in a plug-and-play manner [144, 168, 174]. As such, LLM serves as a router to access different models with both flexibility and performance guarantees. Furthermore, multi-agent systems [185] can be built to solve more complex tasks in the ST domain. + +# C Comparison between LLMs and PFMs + +Table 3 demonstrates the comparison between LLMs and PFMs on their capabilities, including perception, optimization, and reasoning. For example, PFMs excel in exceptional numerical reasoning abilities, yet they often struggle with common-sense understanding. There is still no free lunch, and the user can choose either LLMs or PFMs according to the downstream applications. + +Table 3: A capability comparison between LLMs and PFMs for ST data science. + +
CapabilitiesLarge Language Models (LLMs)Pretrained Foundation Models (PFMs)
Perception▲ Limited native ST perception; can be enhanced via fine-tuning✓ Strong ST perception, integrating sensor data and domain-specific learning
Optimization✓ Agent-based reasoning for decision-making; relies on prompting and heuristics▲ Limited; lacks decision-making ability for control and planning
Common-sense Reasoning✓ Strong via pretraining on vast textual data; can be enhanced with fine-tuning▲ Limited; relies on structured ST data rather than broad world knowledge
Numerical Reasoning▲ Handles arithmetic but struggles with structured ST computations✓ Designed for numerical problems, e.g., forecasting, anomaly detection
Causal Reasoning▲ Can infer causal relations from text but lacks structured ST modeling✓ Built-in graph-based and ST causal modeling
+ +![](images/0e2c71ca92a3b1948076a1507053343e497304cd0c184fa18d2ff53d67b1e756.jpg) +Figure 7: Taxonomy from the methodology perspective. \ No newline at end of file diff --git a/data/2025/2503_13xxx/2503.13502/images/037cc7fecab764f3c9489d19c64e195a2e4237fe21b75687cf6cec155fb66656.jpg b/data/2025/2503_13xxx/2503.13502/images/037cc7fecab764f3c9489d19c64e195a2e4237fe21b75687cf6cec155fb66656.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a704052f550221538c6cb2830509d869828fe3ea --- /dev/null +++ b/data/2025/2503_13xxx/2503.13502/images/037cc7fecab764f3c9489d19c64e195a2e4237fe21b75687cf6cec155fb66656.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc49801ed0ed1f298a1005578ac5b8a05b9d409db16c6b0550dc39c49b4d30ad +size 51883 diff --git a/data/2025/2503_13xxx/2503.13502/images/04153b45ab02f3e79c0ab49c30b7605ee00c77f9060519cff8dcabcf84cc5fc2.jpg b/data/2025/2503_13xxx/2503.13502/images/04153b45ab02f3e79c0ab49c30b7605ee00c77f9060519cff8dcabcf84cc5fc2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b483ef1b6fdae27dfa2ef4e4200096b36a9532c7 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13502/images/04153b45ab02f3e79c0ab49c30b7605ee00c77f9060519cff8dcabcf84cc5fc2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba0217b9e627ce09f577c26b2259fe119b37c450bc531a4db9a2916f68c78458 +size 32929 diff --git a/data/2025/2503_13xxx/2503.13502/images/08bcc66a52134813071d6c4eb50489b0a630570ee89c53dc5e3c14381549b4d2.jpg b/data/2025/2503_13xxx/2503.13502/images/08bcc66a52134813071d6c4eb50489b0a630570ee89c53dc5e3c14381549b4d2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d4a655810c8d2686096a91b56f98c0244f67a8e8 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13502/images/08bcc66a52134813071d6c4eb50489b0a630570ee89c53dc5e3c14381549b4d2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e11fa5a17447511923c207f1597b11e49ea97b091f493751a884211c3e26960b +size 25071 diff --git a/data/2025/2503_13xxx/2503.13502/images/0dd90631ab1ad6054f2db1a2e36db5801f8436c980eebbece0578f2d29976011.jpg b/data/2025/2503_13xxx/2503.13502/images/0dd90631ab1ad6054f2db1a2e36db5801f8436c980eebbece0578f2d29976011.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8e508515464b72ffd7a8cf6dda4069580d92711 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13502/images/0dd90631ab1ad6054f2db1a2e36db5801f8436c980eebbece0578f2d29976011.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4db5c0fd7bac384416b8c82dc943f80123f8dba193e6f21a11f747eaf767aeea +size 41220 diff --git a/data/2025/2503_13xxx/2503.13502/images/0e2c71ca92a3b1948076a1507053343e497304cd0c184fa18d2ff53d67b1e756.jpg b/data/2025/2503_13xxx/2503.13502/images/0e2c71ca92a3b1948076a1507053343e497304cd0c184fa18d2ff53d67b1e756.jpg new file mode 100644 index 0000000000000000000000000000000000000000..00a1f729a98a0de6a1b14e434ce1d5135f450428 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13502/images/0e2c71ca92a3b1948076a1507053343e497304cd0c184fa18d2ff53d67b1e756.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f60920eabc76f22995b59c2b86123ead61eb7425794ca016e971bd193c4aa0c2 +size 215239 diff --git a/data/2025/2503_13xxx/2503.13502/images/146f81da04a218bcff0caa24a2b9f78a939ebf9ec6c05eb8643c1f4789377d85.jpg b/data/2025/2503_13xxx/2503.13502/images/146f81da04a218bcff0caa24a2b9f78a939ebf9ec6c05eb8643c1f4789377d85.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e16b9e92a8a86d9eeb6c68f2d33b48780ab24eaf --- /dev/null +++ b/data/2025/2503_13xxx/2503.13502/images/146f81da04a218bcff0caa24a2b9f78a939ebf9ec6c05eb8643c1f4789377d85.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77dd636c21a79c440fd7b4d678fc6b186894727c7dbf91255f64d5b117a491b4 +size 48557 diff --git a/data/2025/2503_13xxx/2503.13502/images/585a84a4e1c2d564b53b7198e4a5ac28aaf09f4523682713f3c283074ed415a4.jpg b/data/2025/2503_13xxx/2503.13502/images/585a84a4e1c2d564b53b7198e4a5ac28aaf09f4523682713f3c283074ed415a4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6e1ad7c2274e2329f4e8789088a5c445683be2cc --- /dev/null +++ b/data/2025/2503_13xxx/2503.13502/images/585a84a4e1c2d564b53b7198e4a5ac28aaf09f4523682713f3c283074ed415a4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c9dc87051a1a600976154d00641af59ffbe791486d5f977c3810c3a3f0f56fa +size 47415 diff --git a/data/2025/2503_13xxx/2503.13502/images/60d69dbcc8c85b62a9fac9cb5b695647836692fdc243ddfde0484cf9e89d35d3.jpg b/data/2025/2503_13xxx/2503.13502/images/60d69dbcc8c85b62a9fac9cb5b695647836692fdc243ddfde0484cf9e89d35d3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..caa3a3a2f951c91fd83983a9b57b6ea78b0e8c83 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13502/images/60d69dbcc8c85b62a9fac9cb5b695647836692fdc243ddfde0484cf9e89d35d3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b30d376f923c768f157dcad66760d6f4c546b3b869a42674b83a0b1741e1edd +size 71274 diff --git a/data/2025/2503_13xxx/2503.13502/images/7550fb89f980864400839d7d1a4cff676d111c15a655bac2fa282342c99fd317.jpg b/data/2025/2503_13xxx/2503.13502/images/7550fb89f980864400839d7d1a4cff676d111c15a655bac2fa282342c99fd317.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a38ce6aba40595e3c3c1dd31c655be6e7b5c7e02 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13502/images/7550fb89f980864400839d7d1a4cff676d111c15a655bac2fa282342c99fd317.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9832da25cf1312e45b2588ebf134c6006c95c3942b9da6f880e9a6b503c4bfc9 +size 78780 diff --git a/data/2025/2503_13xxx/2503.13502/images/7bf729db0134e8592812df53324e7a0bdf9e7208c0a94f185406b5acb97be084.jpg b/data/2025/2503_13xxx/2503.13502/images/7bf729db0134e8592812df53324e7a0bdf9e7208c0a94f185406b5acb97be084.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1b57f1a1ba3c3ab017ab712af041c3dfa7fef1fa --- /dev/null +++ b/data/2025/2503_13xxx/2503.13502/images/7bf729db0134e8592812df53324e7a0bdf9e7208c0a94f185406b5acb97be084.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e565b778cf67ee617121064229235d0195aff0ae6654472719953a94a4c47ced +size 154850 diff --git a/data/2025/2503_13xxx/2503.13502/layout.json b/data/2025/2503_13xxx/2503.13502/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..769c77a2136228bc98f5844a5f449a4ca6675a71 --- /dev/null +++ b/data/2025/2503_13xxx/2503.13502/layout.json @@ -0,0 +1,12913 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 95, + 80, + 518, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 80, + 518, + 120 + ], + "spans": [ + { + "bbox": [ + 95, + 80, + 518, + 120 + ], + "type": "text", + "content": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "spans": [ + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "text", + "content": "Yuxuan Liang" + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "text", + "content": ", Haomin Wen" + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "inline_equation", + "content": "^{2,1}" + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "text", + "content": ", Yutong Xia" + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "text", + "content": ", Ming Jin" + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "text", + "content": ", Bin Yang" + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "text", + "content": ", Flora Salim" + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "text", + "content": ", Qingsong Wen" + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "inline_equation", + "content": "^{7}" + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "text", + "content": ", Shirui Pan" + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "text", + "content": ", Gao Cong" + }, + { + "bbox": [ + 140, + 129, + 471, + 157 + ], + "type": "inline_equation", + "content": "^{8}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 102, + 157, + 508, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 157, + 508, + 219 + ], + "spans": [ + { + "bbox": [ + 102, + 157, + 508, + 219 + ], + "type": "text", + "content": "1The Hong Kong University of Science and Technology (Guangzhou) 2Carnegie Mellon University 3National University of Singapore 4Griffith University 5East China Normal University 6University of New South Wales 7Squirrel AI Learning, USA 8Nanyang Technology University {yuxiang,yutong.x}@outlook.com,{wenhaomin.whm,mingjinedu,qingsongedu}@gmail.com flora.salim@unsw.edu.au,byang@dase.ecnu.edu.cn,s.pan@griffith.edu.au,gaocong@ntu.edu.sg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 224, + 96, + 236 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 224, + 96, + 236 + ], + "spans": [ + { + "bbox": [ + 51, + 224, + 96, + 236 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 239, + 296, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 239, + 296, + 437 + ], + "spans": [ + { + "bbox": [ + 50, + 239, + 296, + 437 + ], + "type": "text", + "content": "Spatio-Temporal (ST) data science, which includes sensing, managing, and mining large-scale data across space and time, is fundamental to understanding complex systems in domains such as urban computing, climate science, and intelligent transportation. Traditional deep learning approaches have significantly advanced this field, particularly in the stage of ST data mining. However, these models remain task-specific and often require extensive labeled data. Inspired by the success of Foundation Models (FM), especially large language models, researchers have begun exploring the concept of Spatio-Temporal Foundation Models (STFMs) to enhance adaptability and generalization across diverse ST tasks. Unlike prior architectures, STFMs empower the entire workflow of ST data science, ranging from data sensing, management, to mining, thereby offering a more holistic and scalable approach. Despite rapid progress, a systematic study of STFMs for ST data science remains lacking. This survey aims to provide a comprehensive review of STFMs, categorizing existing methodologies and identifying key research directions to advance ST general intelligence." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 440, + 134, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 440, + 134, + 451 + ], + "spans": [ + { + "bbox": [ + 51, + 440, + 134, + 451 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 454, + 295, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 454, + 295, + 531 + ], + "spans": [ + { + "bbox": [ + 50, + 454, + 295, + 531 + ], + "type": "text", + "content": "Humans live in a world shaped by the dynamic interplay of countless elements across space and time. Spatio-Temporal (ST) Data, which refer to data that encapsulate ST phenomena, track the evolution of objects or events across locations and time [5], such as meteorological records, traffic patterns, and human traces. These data are frequently sourced from a wide array of platforms, ranging from IoT devices, GPS sensors, social media, to remote sensing." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 531, + 295, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 531, + 295, + 609 + ], + "spans": [ + { + "bbox": [ + 50, + 531, + 295, + 609 + ], + "type": "text", + "content": "Within this context, Spatio-Temporal Data Science focuses on sensing, managing, and mining these datasets to uncover patterns, understand complex systems, and predict future dynamics. Motivated by its transformative potential, this field addresses critical challenges across urban environments and even the entire planet, enabling decision-making and fostering innovations that contribute to building smarter, more sustainable, and resilient systems [178]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 617, + 295, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 617, + 295, + 675 + ], + "spans": [ + { + "bbox": [ + 50, + 617, + 295, + 675 + ], + "type": "text", + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 676, + 186, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 676, + 186, + 684 + ], + "spans": [ + { + "bbox": [ + 51, + 676, + 186, + 684 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 685, + 289, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 685, + 289, + 693 + ], + "spans": [ + { + "bbox": [ + 51, + 685, + 289, + 693 + ], + "type": "text", + "content": "© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 693, + 165, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 693, + 165, + 700 + ], + "spans": [ + { + "bbox": [ + 52, + 693, + 165, + 700 + ], + "type": "text", + "content": "ACM ISBN 978-x-xxxxx-xxxxx-x/YY/MM" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 700, + 174, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 700, + 174, + 709 + ], + "spans": [ + { + "bbox": [ + 52, + 700, + 174, + 709 + ], + "type": "text", + "content": "https://doi.org/10.1145/nnnnnnn.nnnnnnn" + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 318, + 224, + 555, + 337 + ], + "blocks": [ + { + "bbox": [ + 318, + 224, + 555, + 337 + ], + "lines": [ + { + "bbox": [ + 318, + 224, + 555, + 337 + ], + "spans": [ + { + "bbox": [ + 318, + 224, + 555, + 337 + ], + "type": "image", + "image_path": "585a84a4e1c2d564b53b7198e4a5ac28aaf09f4523682713f3c283074ed415a4.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 314, + 342, + 560, + 388 + ], + "lines": [ + { + "bbox": [ + 314, + 342, + 560, + 388 + ], + "spans": [ + { + "bbox": [ + 314, + 342, + 560, + 388 + ], + "type": "text", + "content": "Figure 1: ST Foundation Models (STFM), which include LLM and PFM, are pretrained with or applied to diverse ST data, with the abilities of perception, optimization, and reasoning. STFMs can, in turn, enhance each stage of ST data science." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 391, + 560, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 391, + 560, + 544 + ], + "spans": [ + { + "bbox": [ + 313, + 391, + 560, + 544 + ], + "type": "text", + "content": "In the era of deep learning, the community has primarily concentrated on spatio-temporal representation learning, as a fundamental step of ST data mining [129]. Key advancements include the development of Spatio-Temporal Graph Neural Networks (STGNN) [51] and transformer-based architectures, which have shown remarkable success in tasks such as traffic forecasting [80, 146], air quality prediction [82], and human mobility analytics [132]. STGNNs integrate Graph Neural Networks (GNN) with temporal learning modules (e.g., GRU [6, 70], TCN [140, 141]) to model ST correlations, while transformer models leverage self-attention mechanisms [37, 78, 177] to process complex dependencies across space and time. Additionally, there has been significant research on self-supervised learning [46, 74, 92], where models are trained to extract powerful representations with minimal reliance on large annotated datasets." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 545, + 560, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 545, + 560, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 545, + 560, + 710 + ], + "type": "text", + "content": "Driven by the success of Foundation Models (FM), especially Large Language Models (LLM), researchers have recently begun exploring the concept of Spatio-Temporal Foundation Models (STFM) [32, 81, 169]. By harnessing LLMs, it becomes possible to develop more generalized, adaptable solutions that can be fine-tuned for specific tasks with minimal data. Another prominent approach involves pretraining FMs (denoted as PFM) on cross-domain ST data and adapting them for particular domains. In contrast to previous architectures (e.g., STGNNs), STFMs integrates the capabilities of perception, reasoning and optimization, which not only promises to revolutionize ST data mining, but also empowers other stages of ST data science, such as ST data sensing and management (See Figure 1). This shift has the potential to enhance the scalability and efficiency of ST applications, offering a more holistic approach to addressing challenges in urban computing, climate science, etc." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 206, + 35, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 206, + 35, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 206, + 35, + 559 + ], + "type": "text", + "content": "arXiv:2503.13502v1 [cs.DB] 12 Mar 2025" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 53, + 129, + 293, + 190 + ], + "blocks": [ + { + "bbox": [ + 50, + 83, + 295, + 127 + ], + "lines": [ + { + "bbox": [ + 50, + 83, + 295, + 127 + ], + "spans": [ + { + "bbox": [ + 50, + 83, + 295, + 127 + ], + "type": "text", + "content": "Table 1: Our survey vs. related surveys on FMs for learning ST data, such as locations (L), trajectories (T), events (E), ST rasters (R), and ST graphs (G). The applications (App.) include numerical (N) and inferential (I) problems." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 129, + 293, + 190 + ], + "lines": [ + { + "bbox": [ + 53, + 129, + 293, + 190 + ], + "spans": [ + { + "bbox": [ + 53, + 129, + 293, + 190 + ], + "type": "table", + "html": "
SurveyYearVenueSensingManage.MiningApp.Data
Jin et al. [54]2023-NR,G
Jiang et al. [48]2024IJCAINR,G
Liang et al. [81]2024KDDNT,E,R,G
Zhang et al. [169]2024KDDN,IL,T,E,R,G
Goodge et al. [32]2025-NT,E,R,G
Ours2025-N,IL,T,E,R,G
", + "image_path": "08bcc66a52134813071d6c4eb50489b0a630570ee89c53dc5e3c14381549b4d2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 191, + 295, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 191, + 295, + 301 + ], + "spans": [ + { + "bbox": [ + 50, + 191, + 295, + 301 + ], + "type": "text", + "content": "Despite their rapid advancements, a systematic analysis of STFMs across the entire workflow of ST data science remains lacking. First, prior surveys have primarily focused on utilizing LLMs as the key tool for ST data mining [32, 54, 81, 169], leaving a significant gap in understanding how these models can be integrated throughout the entire process, i.e., with less focus placed on their role in the earlier stages of sensing and management. Second, they predominantly examine the applications of STFMs to numerical problems (e.g., forecasting, imputation) while overlooking their role in inferential problem-solving such as decision-making systems." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 301, + 295, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 301, + 295, + 453 + ], + "spans": [ + { + "bbox": [ + 50, + 301, + 295, + 453 + ], + "type": "text", + "content": "To bridge these gaps, this paper aims to provide a more comprehensive survey of STFMs across all stages of ST data science, spanning data sensing, management, and mining (see Figure 1). For example, LLMs can enhance ST data sensing by actively processing citizen reports, optimizing participatory sensing strategies, and generating synthetic data at scale. In terms of data management, they can automate data cleaning tasks, construct meaningful knowledge graphs for data integration, and facilitate more efficient retrieval of cross-modal datasets. Beyond these stages, our survey also explores how STFMs support a broader range of downstream applications, including numerical and inferential problems. Through this endeavor, we seek to illuminate an overall vision of STFMs, thereby enhancing comprehension regarding their potential to optimize ST data science, fostering more integrated and adaptable solutions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 454, + 295, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 454, + 295, + 575 + ], + "spans": [ + { + "bbox": [ + 50, + 454, + 295, + 575 + ], + "type": "text", + "content": "Meanwhile, we systematically investigate the key methodologies of STFMs for modeling a variety of ST data. We begin by categorizing existing STFMs into two main classes: LLMs and Pretrained Foundation Models (PFMs). For LLMs, which are pretrained on linguistic data, we focus on their usage as a zero-shot [33] or few-shot learner [53, 73], where various prompting and fine-tuning strategies have been explored, respectively. For PFMs, which are trained from scratch based on cross-domain ST data [40, 158, 189], we examine their neural architectures, pretraining methods, and their adaptability to different types of ST data, including location data, trajectory data, events, ST raster data, and ST graph data." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 60, + 575, + 272, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 575, + 272, + 586 + ], + "spans": [ + { + "bbox": [ + 60, + 575, + 272, + 586 + ], + "type": "text", + "content": "In summary, our major contributions lie in three aspects:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 586, + 295, + 706 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 51, + 586, + 295, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 586, + 295, + 640 + ], + "spans": [ + { + "bbox": [ + 51, + 586, + 295, + 640 + ], + "type": "text", + "content": "- Comprehensive and up-to-date survey: We provide the first comprehensive and modern survey of FMs across the entire workflow of ST data science, covering data sensing, management, and mining. We also explore a broader range of downstream tasks and data types compared to most existing surveys (See Table 1)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 641, + 295, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 641, + 295, + 673 + ], + "spans": [ + { + "bbox": [ + 51, + 641, + 295, + 673 + ], + "type": "text", + "content": "- Vision and Methodologies: We propose a vision for STFMs, identifying key capabilities essential for their success, and discuss current methodologies for implementing these abilities in detail." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 673, + 295, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 673, + 295, + 706 + ], + "spans": [ + { + "bbox": [ + 51, + 673, + 295, + 706 + ], + "type": "text", + "content": "- Future directions: We highlight promising directions for advancing ST data science with foundation models, encouraging further research and exploration in this emerging field." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 84, + 560, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 84, + 560, + 140 + ], + "spans": [ + { + "bbox": [ + 313, + 84, + 560, + 140 + ], + "type": "text", + "content": "Paper Organization. The remainder of this paper is organized as follows: Sec. 2 provides essential background on FMs and ST data. Sec. 3 and 4 present a taxonomy of STFMs regarding the workflow and methodologies, respectively. Sec. 5 offers concluding remarks, and Appendix A highlights promising avenues for future research." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 144, + 395, + 157 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 144, + 395, + 157 + ], + "spans": [ + { + "bbox": [ + 315, + 144, + 395, + 157 + ], + "type": "text", + "content": "2 Background" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 159, + 560, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 159, + 560, + 346 + ], + "spans": [ + { + "bbox": [ + 313, + 159, + 560, + 346 + ], + "type": "text", + "content": "Foundation models. FMs are deep neural networks trained on vast datasets, enabling them to acquire broad, cross-domain knowledge and exceptional adaptability [45]. Unlike earlier task-specific models, FMs can be efficiently fine-tuned with relatively small amounts of task-specific data, offering remarkable flexibility, effectiveness, and cost efficiency. Pioneering attempts like BERT [58] and GPT-3 [11] have reshaped natural language processing. More recent models, e.g., GPT-4o [45] and DeepSeek-R1 [36], further push the frontiers of generative capabilities, enabling more nuanced reasoning, robust domain adaptation, and improved context-awareness in diverse tasks. In ST domains, recent FMs like Time-MoE [119], Chronos [4], and UniST [158] have made remarkable strides in time series analysis and universal ST forecasting, while UniTraj [189] serves as a versatile foundation for various trajectory-related tasks. Inspired by these successes, this survey delves into the utilization of FMs in the entire workflow of ST data science, covering data sensing, management, and mining." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 348, + 560, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 348, + 560, + 413 + ], + "spans": [ + { + "bbox": [ + 313, + 348, + 560, + 413 + ], + "type": "text", + "content": "Formulation of Spatio-Temporal Data. ST data refer to datasets that integrate spatial (location-based) and temporal (time-based) information, capturing dynamic patterns and relationships over space and time. Figure 2 depicts the basic ST data structures discussed in this survey, including locations, trajectories, events, ST rasters, and ST graphs. Their definitions are delineated as follows." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 413, + 559, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 413, + 559, + 458 + ], + "spans": [ + { + "bbox": [ + 313, + 413, + 559, + 458 + ], + "type": "text", + "content": "Definition 1 (Location). A location refers to a fixed spatial point or object in a geographical space, represented by the geospatial coordinates " + }, + { + "bbox": [ + 313, + 413, + 559, + 458 + ], + "type": "inline_equation", + "content": "l \\in \\mathbb{R}^2" + }, + { + "bbox": [ + 313, + 413, + 559, + 458 + ], + "type": "text", + "content": ", i.e., latitude and longitude. It is often profiled by the corresponding satellite image, street-view image, and descriptions." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 458, + 560, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 458, + 560, + 502 + ], + "spans": [ + { + "bbox": [ + 313, + 458, + 560, + 502 + ], + "type": "text", + "content": "Definition 2 (Trajectory). A trajectory is a sequence of time-ordered locations that describe the movements of an object in the geographical space. It can be formulated as " + }, + { + "bbox": [ + 313, + 458, + 560, + 502 + ], + "type": "inline_equation", + "content": "\\mathcal{T} = p_1\\rightarrow p_2\\rightarrow \\dots \\rightarrow p_T" + }, + { + "bbox": [ + 313, + 458, + 560, + 502 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 313, + 458, + 560, + 502 + ], + "type": "inline_equation", + "content": "p_i = (l_i,t_i)" + }, + { + "bbox": [ + 313, + 458, + 560, + 502 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 458, + 560, + 502 + ], + "type": "inline_equation", + "content": "l_{i}" + }, + { + "bbox": [ + 313, + 458, + 560, + 502 + ], + "type": "text", + "content": " denotes the object's location at time " + }, + { + "bbox": [ + 313, + 458, + 560, + 502 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 313, + 458, + 560, + 502 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 502, + 559, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 502, + 559, + 546 + ], + "spans": [ + { + "bbox": [ + 313, + 502, + 559, + 546 + ], + "type": "text", + "content": "Definition 3 (Event). An event sequence is a series of timestamped events, denoted as " + }, + { + "bbox": [ + 313, + 502, + 559, + 546 + ], + "type": "inline_equation", + "content": "\\mathcal{E} = v_{1}\\rightarrow v_{2}\\rightarrow \\dots \\rightarrow v_{T}" + }, + { + "bbox": [ + 313, + 502, + 559, + 546 + ], + "type": "text", + "content": ", describing the progress of actions or occurrences, where " + }, + { + "bbox": [ + 313, + 502, + 559, + 546 + ], + "type": "inline_equation", + "content": "v_{i} = (e_{i},t_{i})" + }, + { + "bbox": [ + 313, + 502, + 559, + 546 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 502, + 559, + 546 + ], + "type": "inline_equation", + "content": "e_i\\in \\mathbb{R}^d" + }, + { + "bbox": [ + 313, + 502, + 559, + 546 + ], + "type": "text", + "content": " is an event and " + }, + { + "bbox": [ + 313, + 502, + 559, + 546 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 313, + 502, + 559, + 546 + ], + "type": "text", + "content": " denotes the time when " + }, + { + "bbox": [ + 313, + 502, + 559, + 546 + ], + "type": "inline_equation", + "content": "e_i" + }, + { + "bbox": [ + 313, + 502, + 559, + 546 + ], + "type": "text", + "content": " occurs." + } + ] + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 325, + 548, + 545, + 693 + ], + "blocks": [ + { + "bbox": [ + 325, + 548, + 545, + 693 + ], + "lines": [ + { + "bbox": [ + 325, + 548, + 545, + 693 + ], + "spans": [ + { + "bbox": [ + 325, + 548, + 545, + 693 + ], + "type": "image", + "image_path": "037cc7fecab764f3c9489d19c64e195a2e4237fe21b75687cf6cec155fb66656.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 337, + 695, + 537, + 708 + ], + "lines": [ + { + "bbox": [ + 337, + 695, + 537, + 708 + ], + "spans": [ + { + "bbox": [ + 337, + 695, + 537, + 708 + ], + "type": "text", + "content": "Figure 2: Illustration of various types of ST data." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 192, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 192, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 192, + 69 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "type": "text", + "content": "Yuxuan Liang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 294, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 294, + 128 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 294, + 128 + ], + "type": "text", + "content": "Definition 4 (Spatio-Temporal Raster). An ST raster can be denoted as " + }, + { + "bbox": [ + 50, + 84, + 294, + 128 + ], + "type": "inline_equation", + "content": "\\mathcal{X} = < \\mathbf{X}_1,\\mathbf{X}_2,\\dots ,\\mathbf{X}_T > \\in \\mathbb{R}^{H\\times W\\times T\\times D}" + }, + { + "bbox": [ + 50, + 84, + 294, + 128 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 50, + 84, + 294, + 128 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_t\\in \\mathbb{R}^{H\\times W\\times D}" + }, + { + "bbox": [ + 50, + 84, + 294, + 128 + ], + "type": "text", + "content": " denotes the signals collected from " + }, + { + "bbox": [ + 50, + 84, + 294, + 128 + ], + "type": "inline_equation", + "content": "N = HW" + }, + { + "bbox": [ + 50, + 84, + 294, + 128 + ], + "type": "text", + "content": " evenly distributed locations at time " + }, + { + "bbox": [ + 50, + 84, + 294, + 128 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 50, + 84, + 294, + 128 + ], + "type": "text", + "content": ", each characterized by " + }, + { + "bbox": [ + 50, + 84, + 294, + 128 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 50, + 84, + 294, + 128 + ], + "type": "text", + "content": " feature attributes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "spans": [ + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "text", + "content": "Definition 5 (Spatio-Temporal Graph). An ST graph extends the ST raster to be " + }, + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "inline_equation", + "content": "X = < \\mathbf{X}_1, \\mathbf{X}_2, \\ldots, \\mathbf{X}_T > \\in \\mathbb{R}^{N \\times T \\times D}" + }, + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "text", + "content": " by explicitly incorporating spatial correlations with a graph " + }, + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "inline_equation", + "content": "G_t = (V, E_t, \\mathbf{A}_t)" + }, + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "text", + "content": " locations are not uniformly distributed. Here " + }, + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "text", + "content": " is the set of nodes, " + }, + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "inline_equation", + "content": "E_t" + }, + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "text", + "content": " is the set of edges, and " + }, + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "inline_equation", + "content": "\\mathbf{A}_t \\in \\mathbb{R}^{N \\times N}" + }, + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "text", + "content": " is the adjacency matrix at time " + }, + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "text", + "content": ". The size of " + }, + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 50, + 129, + 294, + 196 + ], + "type": "text", + "content": " is usually static." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 205, + 202, + 217 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 205, + 202, + 217 + ], + "spans": [ + { + "bbox": [ + 51, + 205, + 202, + 217 + ], + "type": "text", + "content": "3 The Workflow Perspective" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 219, + 295, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 219, + 295, + 242 + ], + "spans": [ + { + "bbox": [ + 50, + 219, + 295, + 242 + ], + "type": "text", + "content": "As shown in Figure 3, we examine STFMs from a holistic, bottom-up perspective, emphasizing their composition across four key aspects:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 243, + 295, + 452 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 51, + 243, + 295, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 243, + 295, + 298 + ], + "spans": [ + { + "bbox": [ + 51, + 243, + 295, + 298 + ], + "type": "text", + "content": "- ST Data Sensing refers to the acquisition of data that varies over both space and time from diverse resources (e.g., sensors, satellites, social media), to capture dynamic environmental, geographic, or social phenomena. We also consider synthetic data generation for enhancing data diversity and quantity." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 299, + 295, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 299, + 295, + 353 + ], + "spans": [ + { + "bbox": [ + 51, + 299, + 295, + 353 + ], + "type": "text", + "content": "- ST Data Management focuses on storing, indexing, and organizing these large-scale, heterogeneous ST datasets, incorporating strategies like distributed architectures for efficient retrieval and integration. FMs can enhance this process by facilitating data cleaning, query & retrieval, and data integration." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 354, + 295, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 354, + 295, + 408 + ], + "spans": [ + { + "bbox": [ + 51, + 354, + 295, + 408 + ], + "type": "text", + "content": "- ST Data Mining involves learning and analyzing ST data that varies across both space and time to uncover patterns, trends, and relationships, using data mining (DM), deep learning (DL) techniques, or the newly-proposed STFMs with strong capabilities in perception, optimization, and reasoning." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 408, + 295, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 408, + 295, + 452 + ], + "spans": [ + { + "bbox": [ + 51, + 408, + 295, + 452 + ], + "type": "text", + "content": "- Downstream Applications: This stage harnesses the above insights from ST data to drive real-world applications, ranging from numerical problems to inferential problems, where informed actions and policies are formulated." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 454, + 295, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 454, + 295, + 498 + ], + "spans": [ + { + "bbox": [ + 50, + 454, + 295, + 498 + ], + "type": "text", + "content": "By examining these four aspects, we can better understand how STFMs advance from raw data acquisition to high-level service providing, ultimately enabling more intelligent, adaptable, and impactful solutions. We will detail each stage in the following sections." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 53, + 505, + 293, + 693 + ], + "blocks": [ + { + "bbox": [ + 53, + 505, + 293, + 693 + ], + "lines": [ + { + "bbox": [ + 53, + 505, + 293, + 693 + ], + "spans": [ + { + "bbox": [ + 53, + 505, + 293, + 693 + ], + "type": "image", + "image_path": "7550fb89f980864400839d7d1a4cff676d111c15a655bac2fa282342c99fd317.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 60, + 696, + 285, + 706 + ], + "lines": [ + { + "bbox": [ + 60, + 696, + 285, + 706 + ], + "spans": [ + { + "bbox": [ + 60, + 696, + 285, + 706 + ], + "type": "text", + "content": "Figure 3: The framework of STFMs for ST data science." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 83, + 492, + 96 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 83, + 492, + 96 + ], + "spans": [ + { + "bbox": [ + 315, + 83, + 492, + 96 + ], + "type": "text", + "content": "3.1 Spatio-Temporal Data Sensing" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 99, + 560, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 99, + 560, + 142 + ], + "spans": [ + { + "bbox": [ + 313, + 99, + 560, + 142 + ], + "type": "text", + "content": "FMs revolutionize ST data sensing from two complementary aspects: real-world data sensing, which involves collecting data from physical sources, and synthetic data generation, which creates synthetic ST data through foundation models." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 145, + 559, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 145, + 559, + 332 + ], + "spans": [ + { + "bbox": [ + 313, + 145, + 559, + 332 + ], + "type": "text", + "content": "3.1.1 Real-World Data Sensing. Advances in sensing and data acquisition technologies have led to the generation of vast amounts of ST data. FMs are increasingly applied in human-centric active sensing, particularly in the context of citizen reporting for urban and environmental monitoring [41]. These models act as powerful agents for collecting and processing real-time data from citizens, enabling the efficient handling of ST data [19, 27, 101]. For example, citizens might constantly report incidents, environmental changes, or social events through text or voice [178]. By understanding these reports, LLMs can categorize, prioritize, and trigger appropriate responses for various urban issues, from traffic congestion to environmental hazards. This enhances the decision-making process by continuously updating their models with new data streams. Thus, LLMs are not just passive analytical tools but active participants that help make urban environments more responsive and adaptive to citizen inputs, transforming traditional citizen feedback into actionable knowledge, enabling more sustainable and resilient cities." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 332, + 559, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 332, + 559, + 475 + ], + "spans": [ + { + "bbox": [ + 313, + 332, + 559, + 475 + ], + "type": "text", + "content": "FMs can also function as intelligent schedulers or simulate multiagent systems to optimize the recruitment and coordination of participants for crowdsensing, particularly under budget constraints [41, 139, 191]. By analyzing ST data and understanding context, LLMs can identify regions and times where crowdsensing efforts will yield the most valuable information. They dynamically recruit participants based on proximity, availability, and past contributions, reducing redundant data collection. Additionally, LLMs simulate multiple agents interacting in real time, ensuring the efficient distribution of sensing tasks across a network of citizens or devices [190]. This strategic scheduling and agent-based coordination maximize coverage while minimizing costs, ensuring that crowdsensing delivers valuable, real-time insights under budgetary constraints." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 477, + 559, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 477, + 559, + 643 + ], + "spans": [ + { + "bbox": [ + 313, + 477, + 559, + 643 + ], + "type": "text", + "content": "3.1.2 Synthetic Data Generation. FMs can also facilitate data generation, which enhances ST data by increasing its diversity, improving model robustness, and compensating for missing or sparse information [95]. This is crucial for ST tasks like mobility analytics, where collecting real-world data is often costly or raises privacy concerns. For instance, Trajectory-LLM [154] generates vehicle trajectories from brief textual descriptions of vehicle interactions, whereas Traj-LLM [56] generates human trajectories by leveraging personas, memory modules, and routine profiles. LLMob [126] advances mobility data generation, offering flexibility in modeling diverse urban activities and personal mobility patterns, thus improving transportation system modeling and analysis. LLMs have also been employed to construct synthetic environments that replicate real-world conditions across diverse domains, including intelligent transportation [1] and disaster management [31]." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 651, + 518, + 663 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 651, + 518, + 663 + ], + "spans": [ + { + "bbox": [ + 314, + 651, + 518, + 663 + ], + "type": "text", + "content": "3.2 Spatio-Temporal Data Management" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 666, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 666, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 666, + 559, + 710 + ], + "type": "text", + "content": "Upon the acquisition of ST data, the challenge of effective management emerges, particularly in addressing data quality issues (e.g., missing values/views) and facilitating data retrieval and integration. Within this context, FMs can be harnessed in the following ways." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 274, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 274, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 274, + 69 + ], + "type": "text", + "content": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 85, + 294, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 85, + 294, + 260 + ], + "spans": [ + { + "bbox": [ + 53, + 85, + 294, + 260 + ], + "type": "text", + "content": "3.2.1 Data Cleaning. Data cleaning is the process of improving data quality by addressing issues such as missing values, low sampling rates, and noise. For example, ST data often exhibit missing values due to various factors like sensor malfunctions and transmission disruptions [178]. Filling in these missing values[113] is crucial for ensuring the integrity of predictive models, optimizing strategies, and facilitating informed decision-making. Recent literature reveals that LLMs can serve as powerful zero-shot [164] or few-shot [17, 172] learners to data imputation by leveraging their ability to identify and learn complex ST patterns. PLMTrajRec [135], utilizing a pretrained language model to recover sparse trajectory data by unifying intervals and inferring road conditions, showing effective generalization across varied sampling intervals in tests. Moreover, scholars have investigated the potential of leveraging LLMs to augment missing views or information, such as urban region profiling [40, 150, 163] and traffic video captioning [25]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 265, + 294, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 265, + 294, + 451 + ], + "spans": [ + { + "bbox": [ + 53, + 265, + 294, + 451 + ], + "type": "text", + "content": "3.2.2 Query & Retrieval. Meanwhile, LLM can be applied to querying and retrieval to enhance information retrieval accuracy under the ST context. By leveraging their advanced natural language understanding capabilities, LLMs can process user queries in a more contextual and semantically rich manner, enabling precise retrieval of relevant information from structured and unstructured data sources. For instance, UrbanLLM [47] finetunes LLMs for urban activity planning and management, which serves as a problem solver that decodes urban-related queries into several sub-tasks, with each one solved by suitable spatio-temporal AI models. Alamsyah et al. [2] propose an automated smart city planning system that utilizes a personalized LLM with Retrieval Augmented Generation (RAG) [30] to generate tailored urban planning recommendations while ensuring data privacy, where RAG is used to retrieve relevant urban planning documents for context-aware responses. Another line of work [67, 75, 170, 179] utilizes Multimodal LLM for cross-modal information retrieval to enhance urban computing tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 453, + 294, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 453, + 294, + 585 + ], + "spans": [ + { + "bbox": [ + 53, + 453, + 294, + 585 + ], + "type": "text", + "content": "3.2.3 Data Integration. Data integration seeks to combine information from disparate sources, often necessitating the understanding and mapping of relationships between entities in heterogeneous datasets. LLMs are increasingly being employed in this domain, particularly for knowledge graph construction [24], where they automate and enhance the extraction, integration, and reasoning of related data. In the context of ST data, LLMs facilitate data integration by leveraging heterogeneous urban data sources, performing relational triplet extraction, and completing knowledge graphs through geospatial reasoning [94, 106]. A pioneering study UrbanKGent [105] proposes an LLM-based Agent framework to automate the process of urban knowledge graph construction." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 596, + 225, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 596, + 225, + 609 + ], + "spans": [ + { + "bbox": [ + 53, + 596, + 225, + 609 + ], + "type": "text", + "content": "3.3 Spatio-Temporal Data Mining" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 611, + 294, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 611, + 294, + 708 + ], + "spans": [ + { + "bbox": [ + 53, + 611, + 294, + 708 + ], + "type": "text", + "content": "Unlike traditional data mining, which primarily focuses on structured datasets, ST data mining captures intricate spatial and temporal dependencies within ST data using machine learning or deep learning techniques [51, 129, 167]. With the emergence of FMs and LLMs, Spatio-Temporal Foundation Models (STFMs) offer new possibilities by integrating perception, optimization, and reasoning capabilities to enhance ST data mining. In this section, we explore these key capabilities, while their specific applications across different domains are detailed in Sec. 3.4." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 317, + 85, + 558, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 85, + 558, + 172 + ], + "spans": [ + { + "bbox": [ + 317, + 85, + 558, + 172 + ], + "type": "text", + "content": "3.3.1 Perception. In STFMs, perception encompasses the ability to effectively model, interpret, and generalize complex spatial and temporal patterns, enabling a deeper understanding of dynamic environments. This capability can be categorized into two key perspectives. The first view pertains to an agent's ability to perceive and understand its surrounding environment, capturing visual or contextual interactions within real-world scenarios such as smart cities [151], indoor activities [152, 153], and mobile Apps [127]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 173, + 558, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 173, + 558, + 402 + ], + "spans": [ + { + "bbox": [ + 317, + 173, + 558, + 402 + ], + "type": "text", + "content": "The second aspect involves interpreting and extracting ST patterns from sensor data, ensuring accurate predictions across diverse domains. Domain-agnostic approaches, such as STEP [117] and GPT-ST [74], have employed pretraining strategies that leverage historical observations to enhance forecasting performance. In urban computing, models like TFM [130] and OpenCity [72] utilize graph-based FMs to analyze behaviors and interactions within transportation systems, yielding promising results in traffic prediction. In climate science, Pangu [9], trained on 39 years of global climate data, delivers superior deterministic forecasting outcomes across all evaluated variables when compared to leading numerical weather prediction systems. Additional notable examples in this area include the works [60, 76, 104, 108]. Despite these advances, achieving robust generalization remains a critical challenge, as most existing research has been confined to in-domain applications. While models like UniST [158] are designed as one-for-all solutions for diverse ST scenarios, their training datasets and evaluation testbeds are predominantly limited to transportation. Nevertheless, their underlying technique stacks show promise for broader cross-domain and cross-modality generalization. Other significant contributions in this realm include UniFlow [159] and UrbanDiT [160]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 317, + 403, + 558, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 403, + 558, + 479 + ], + "spans": [ + { + "bbox": [ + 317, + 403, + 558, + 479 + ], + "type": "text", + "content": "3.3.2 Optimization. Building upon the perceptual foundations, the optimization ability focuses on refining and adapting models to achieve specific, task-oriented objectives. In other words, models are not only expected to capture rich ST patterns but also to drive actionable decision-making in dynamic, real-world scenarios. This involves integrating advanced optimization strategies that tailor model behavior to the unique demands of applications." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 317, + 479, + 558, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 479, + 558, + 708 + ], + "spans": [ + { + "bbox": [ + 317, + 479, + 558, + 708 + ], + "type": "text", + "content": "A prominent approach involves agent-based frameworks. For example, in traffic signal control, traditional methods (e.g., RL) are now augmented by frameworks that use LLMs as decision-making agents [61]. These systems leverage real-time traffic data and expert prompts to enable human-like planning, resulting in more adaptive and interpretable control strategies. Similarly, CityGPT [35] decomposes ST analysis into specialized sub-tasks, handled by temporal, spatial, and fusion agents, to efficiently process IoT data and generate insightful visualizations. AgentMove [28] addresses human mobility prediction by breaking down the task into modules for individual pattern mining, urban structure analysis, and collective behavior extraction. In geo-science, systems like Geode [38] integrate explicit optimization modules with ST data retrieval and machine learning inference to tackle zero-shot geospatial QA with enhanced precision. In urban planning, an innovative work [185] simulates planners and residents by LLM agents and enables their interactions to optimize inclusive land-use plans efficiently. Despite these promising developments, significant challenges remain. Seamlessly integrating perceptual capabilities with targeted optimization strategies is crucial for next-generation ST models that are both versatile and effective across diverse operational contexts." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 192, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 192, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 192, + 69 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 501, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 501, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 501, + 60, + 558, + 68 + ], + "type": "text", + "content": "Yuxuan Liang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 52, + 95, + 558, + 294 + ], + "blocks": [ + { + "bbox": [ + 164, + 83, + 444, + 93 + ], + "lines": [ + { + "bbox": [ + 164, + 83, + 444, + 93 + ], + "spans": [ + { + "bbox": [ + 164, + 83, + 444, + 93 + ], + "type": "text", + "content": "Table 2: Summary of representative FMs tailored for ST data science." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 95, + 558, + 294 + ], + "lines": [ + { + "bbox": [ + 52, + 95, + 558, + 294 + ], + "spans": [ + { + "bbox": [ + 52, + 95, + 558, + 294 + ], + "type": "table", + "html": "
StageTask & CapabilityExampleMethodCategoryVenueYear
SensingReal-World Data SensingIdentifying Citizen-Related Issues from Social Mediados Santos et al. [27]LLMCAiSE2024
Real-World Data SensingIntelligent Crowdsensing CoordinationAutoWebCrowds [190]LLMICWE2024
Synthetic Data GenerationTrajectories GenerationTrajectory-LLM [154]LLMICLR2025
Synthetic Data GenerationHuman Activity Data GenerationLLMob [126]LLMNeurIPS2024
ManagementData CleaningFew-Shot Learner for Filling Missing ValuesNuwaTS [17]PFMPreprint2024
Data CleaningTrajectory RecoveryPLMTrajRec [135]LLMPreprint2024
Data CleaningAugment Additional Views of DataUrbanCLIP [150]LLMWWW2024
Query & RetrievalAutonomous Query Processor for Urban ManagementUrbanLLM [47]LLMEMNLP2024
Data IntegrationUrban Knowledge Graph ConstructionUrbanKGent [105]LLMNeurIPS2024
MiningPerceptionUnderstand the EnvironmentMagma [152]PFMCVPR2025
PerceptionInterpret and Extract ST PatternsSTEP [117]PFMKDD2022
OptimizationDrive Actionable Decision-Making in Dynamic ScenariosAgentMove [28]LLMPreprint2024
OptimizationOptimize Land-Use Plans by LLM AgentsZhou et al. [185]LLMPreprint2024
ReasoningCommon-sense ReasoningCausal-VidQA [66]PFMCVPR2022
ReasoningNumerical ReasoningUrbanGPT [73]LLMKDD2024
ReasoningCausal ReasoningNuwaDynamics [128]PFMICLR2024
ApplicationForecastingGlobal Weather ForecastingPangu [9]PFMNature2023
ImputationGenerative Adversarial Network for Traffic Data ImputationSTGAN [162]PFMIEEE TBD2022
Anomaly DetectionTransformer-based Anomaly DetectorXu et al. [145]PFMICLR2022
Event AnalysisDetecting and Interpreting EventsLAMP [120]LLMNeurIPS2023
Physical GroundingGeo-localizationGeoGPT [174]LLMJAG2023
Decision MakingTransportation Analytics and ControlTrafficGPT [168]LLMTransport Policy2024
Scenario SimulationSimulation of Human BehaviorPark et al. [107]LLMUIST2023
", + "image_path": "7bf729db0134e8592812df53324e7a0bdf9e7208c0a94f185406b5acb97be084.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 300, + 294, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 300, + 294, + 356 + ], + "spans": [ + { + "bbox": [ + 50, + 300, + 294, + 356 + ], + "type": "text", + "content": "3.3.3 Reasoning. While current ST models have demonstrated notable success in recognition and agent-based tasks, their reasoning and cognitive capabilities remain underdeveloped compared to advanced systems like DeepSeek-R1 [36]. To progress toward ST general intelligence, we identify three key aspects of reasoning:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 357, + 295, + 544 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 51, + 357, + 295, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 357, + 295, + 433 + ], + "spans": [ + { + "bbox": [ + 51, + 357, + 295, + 433 + ], + "type": "text", + "content": "- Common-sense Reasoning harnesses everyday knowledge and contextual cues to draw implicit inferences from complex data. For instance, Causal-VidQA [66] enables models to infer explanations, predict future states, and generate counterfactual scenarios in video question-answering, while SituatedGen [173] integrates geographical and temporal contexts to generate coherent and contextually plausible statements." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 434, + 295, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 434, + 295, + 489 + ], + "spans": [ + { + "bbox": [ + 51, + 434, + 295, + 489 + ], + "type": "text", + "content": "- Numerical Reasoning involves interpreting and manipulating quantitative information to perform arithmetic operations, assess uncertainties, and discern relationships within ST data; for instance, STBench [69] evaluates these abilities in LLMs, while UrbanGPT [73] enhances ST forecasting with instruction tuning." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 489, + 295, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 489, + 295, + 544 + ], + "spans": [ + { + "bbox": [ + 51, + 489, + 295, + 544 + ], + "type": "text", + "content": "- Causal Reasoning seeks to uncover cause-effect relations within ST data, crucial for robust and interpretable predictions. For example, NuwaDynamics [128] identifies causal regions and applies interventions to improve generalization, and GCIM [176] learns latent causal structures to disentangle spurious correlations." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 545, + 295, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 545, + 295, + 579 + ], + "spans": [ + { + "bbox": [ + 50, + 545, + 295, + 579 + ], + "type": "text", + "content": "Collectively, these dimensions offer a promising yet underexplored pathway toward achieving ST general intelligence, bridging the gap between pattern recognition and true cognitive understanding." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 586, + 208, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 586, + 208, + 597 + ], + "spans": [ + { + "bbox": [ + 51, + 586, + 208, + 597 + ], + "type": "text", + "content": "3.4 Downstream Applications" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 600, + 295, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 600, + 295, + 676 + ], + "spans": [ + { + "bbox": [ + 50, + 600, + 295, + 676 + ], + "type": "text", + "content": "3.4.1 STFMs for Numerical Problems. ST data is predominately numeric in many real-world scenarios. Addressing these numeric challenges is critical for tasks like forecasting, imputation, and anomaly detection [52], which demand an accurate understanding of the physical world. STFMs excel in these areas by uncovering intricate patterns and dependencies, ultimately enabling more reliable data-driven decision-making." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 677, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 677, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 677, + 295, + 710 + ], + "type": "text", + "content": "- Forecasting. Early forecasting approaches often relied on task-specific neural networks like STGNNs [51, 52, 110, 116], whereas recent developments have shifted toward universal forecasting [91," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 300, + 559, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 300, + 559, + 410 + ], + "spans": [ + { + "bbox": [ + 313, + 300, + 559, + 410 + ], + "type": "text", + "content": "137, 167]. For instance, GPT-ST [74] leverages pretraining on historical observations to boost predictive performance, while UniST [158] unifies multiple traffic prediction tasks within a single model by coupling sequence modeling with attention-based mechanisms. Building on this progress, ST-LLM [86] and STG-LLM [90] enhance traffic predictions by combining ST inputs with partially frozen large language models, and UrbanGPT [73] extends this paradigm further by employing ST instruction tuning to better align textual and ST data. Similar approaches have also been widely used in other domains, such as ClimaX [104], Geo-Bench [60], and Orca [76]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 410, + 559, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 410, + 559, + 552 + ], + "spans": [ + { + "bbox": [ + 313, + 410, + 559, + 552 + ], + "type": "text", + "content": "- Imputation. This has likewise benefited from techniques that capture ST dependencies to accurately restore missing or corrupted data. For instance, NuwaTS [17] repurposes pretrained language models with contrastive learning and specialized patch embeddings (capturing missing patterns/statistics) to enable cross-domain time series imputation through a unified framework. STD-LLM [44] employs LLMs with spatial-temporal tokenizers and hypergraph learning modules to handle missing values in spatio-temporal data while capturing non-pairwise correlations through topology-aware node embeddings. DrIM [83] combines LLM-derived text representations (from masked tabular data conversions) with contrastive learning to measure similarities for nearest-neighbor imputation in heterogeneous datasets." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 553, + 559, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 553, + 559, + 707 + ], + "spans": [ + { + "bbox": [ + 313, + 553, + 559, + 707 + ], + "type": "text", + "content": "- Anomaly Detection. Anomaly detection in ST data has advanced by leveraging models that learn the normal dynamics of ST systems to identify deviations indicative of abnormal events. Whereas prior methods relied on statistical thresholding and clustering to flag outliers, recent FMs learn robust ST representations to detect even subtle anomalies. For example, early attempts [26, 89, 186] investigate the feasibility of using LLMs for anomaly detection in time series data. SigLLM [3] employs GPT-series with signal-to-text conversion techniques, offering dual pipelines (anomaly prompting and deviation detection) for time series analysis through textual or visual representations of numerical data. AD-LLM [156] introduces a benchmark framework combining GPT-4's zero-shot reasoning with contrastive learning for anomaly context enrichment and automated model selection through chain-of-thought prompting." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 273, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 273, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 273, + 69 + ], + "type": "text", + "content": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 296, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 296, + 129 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 296, + 129 + ], + "type": "text", + "content": "- Others. Furthermore, FMs have demonstrated great potential in other numerical problems such as time series classification [18], geospatial prediction [39, 100], traffic speed inference [7], and socioeconomic indicator prediction [40, 142, 150]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 138, + 295, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 138, + 295, + 248 + ], + "spans": [ + { + "bbox": [ + 50, + 138, + 295, + 248 + ], + "type": "text", + "content": "3.4.2 STFMs for Inferential Problems. Inferential problems in ST data require the integration of both reasoning and understanding of environments. These problems involve high-level cognitive tasks where accurate representation of locations, movements, and environmental context is essential. Addressing such problems goes beyond numerical predictions — it necessitates answering critical inferential questions: What happened? Where is it? What to do? What if? FMs have shown their potential to enhance solutions for these challenges by leveraging their capacity to handle ST knowledge and interpret complex, unstructured data." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 254, + 295, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 254, + 295, + 365 + ], + "spans": [ + { + "bbox": [ + 50, + 254, + 295, + 365 + ], + "type": "text", + "content": "\"What happened?\" - Event Analysis. Detecting events aims to recognize and explain significant events in time and space. Traditional models struggle with scalability, interpretability, and incorporating external knowledge. To this end, LAMP [120] integrates LLMs with event models, using abductive reasoning to suggest plausible causes for predicted events, retrieve supporting evidence, and rank predictions for improved accuracy. Meanwhile, LEAP [165] replaces GNNs and RNNs with LLMs by framing event detection as a question-answering task, predicting missing event components and forecasting future relations through self-attention mechanisms." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 369, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 369, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 369, + 295, + 710 + ], + "type": "text", + "content": "\"Where is it?\"- Physical Grounding. Grounding ST models in real-world geographical contexts is essential for various applications such as geo-localization, map reconstruction, intelligent routing and navigation. Geo-localization aims to determine an object's location based on multimodal inputs, including images, text, and sensor data. By processing these cues in conjunction with map data, LLMs such as GPT-4o, DeepSeek [36], and GeoGPT [174] can infer geographic coordinates or identify specific locations described in natural language. Map reconstruction, on the other hand, involves creating or updating digital maps by synthesizing information from satellite imagery, sensor readings, and textual reports. LLMs contribute by interpreting and generating map content, correcting inaccuracies, and filling in missing details. For instance, MapGPT [14] employs language-guided updates, incorporating textual descriptions of environmental changes into existing map structures. In personalized routing, ItiNera [123] combines LLMs with spatial optimization to generate personalized \"Citywalk\" itineraries, providing user-specific and spatially coherent urban exploration; ChinaTravel [115] provides a benchmark for real-world Chinese travel planning, enabling scalable evaluation of constraint satisfaction and preference optimization while highlighting the strengths of neuro-symbolic agents. Navigation systems further benefit from LLMs' ability to understand contextual instructions, interpret user queries, and reason about dynamic environments. For example, NavGPT [182] and NavGPT-v2 [181] integrate natural language with real-time traffic and indoor video data to generate personalized and optimized routing solutions. By incorporating STFMs across these domains, physical grounding models facilitate more precise localization, efficient navigation, and adaptive urban mobility solutions, ultimately bridging the gap between digital intelligence and real-world spatial reasoning." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 321, + 84, + 555, + 203 + ], + "blocks": [ + { + "bbox": [ + 321, + 84, + 555, + 203 + ], + "lines": [ + { + "bbox": [ + 321, + 84, + 555, + 203 + ], + "spans": [ + { + "bbox": [ + 321, + 84, + 555, + 203 + ], + "type": "image", + "image_path": "0dd90631ab1ad6054f2db1a2e36db5801f8436c980eebbece0578f2d29976011.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 328, + 209, + 544, + 220 + ], + "lines": [ + { + "bbox": [ + 328, + 209, + 544, + 220 + ], + "spans": [ + { + "bbox": [ + 328, + 209, + 544, + 220 + ], + "type": "text", + "content": "Figure 4: STFMs for addressing inferential problems." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 225, + 560, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 225, + 560, + 500 + ], + "spans": [ + { + "bbox": [ + 313, + 225, + 560, + 500 + ], + "type": "text", + "content": "\"What to do?\" - Decision Making. Optimizing policies and real-time decision-making in dynamic environments based on inferential insights plays a crucial role in a wide range of applications, including traffic control, autonomous vehicles, and disaster response. In traffic control and management, LLMs improve adaptability and interpretability compared to traditional reinforcement learning approaches [61]. Additionally, they facilitate sim-to-real transfer by modeling real-world traffic dynamics, improving the reliability of traffic signal optimization [22]. Beyond signal control, models like TrafficGPT [168] integrate multimodal traffic data with structured reasoning to analyze, predict, and optimize traffic efficiency and safety in real time. In autonomous vehicles, STFMs contribute to decision-making through both direct and indirect mechanisms. Directly, models such as DDM-Lag [88] employ diffusion-based frameworks with Lagrangian safety enhancements and hybrid policy updates to refine policy articulation and ensure safety. Indirectly, STFMs enhance autonomous driving by predicting realistic driving behaviors [55, 114] and leveraging multi-modal perception to integrate sensor data, bird's eye view maps, and traffic contexts [20, 184], improving situational awareness and vehicle control. Beyond transportation, STFMs play a critical role in disaster management and emergency response by integrating diverse spatio-temporal data sources, such as weather forecasts, remote sensing, and social media signals, to predict disaster impacts and optimize evacuation strategies [16, 31, 65]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 501, + 560, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 501, + 560, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 501, + 560, + 710 + ], + "type": "text", + "content": "\"What if?\"- Scenario Simulation. STFMs, with their advanced perception and reasoning capabilities, enable the development of STFM-based agents that integrate into Multi-Agent Systems (MAS) to model complex interactions across diverse domains [29]. In urban planning and social simulation, MAS facilitates participatory urban design by simulating interactions between planners and residents. For example, LLM-driven MAS has been used to collaboratively refine land-use plans, leading to improved accessibility and ecological outcomes that surpass human expert solutions [185]. Beyond urban planning, MAS contributes to social science research by modeling human-like behaviors in AI-driven networks. Studies such as [23, 107, 109] demonstrate that LLM-based agents can naturally develop social structures, providing valuable insights into emergent social dynamics. Beyond urban applications, MAS significantly advances game AI and strategic decision-making. Recent studies [112, 133, 187] highlight how MAS-powered reinforcement learning enables strategic gameplay, real-time opponent modeling, and interactive storytelling, fostering the development of more adaptive, intelligent, and realistic virtual agents." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 192, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 192, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 192, + 68 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "type": "text", + "content": "Yuxuan Liang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 83, + 217, + 96 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 83, + 217, + 96 + ], + "spans": [ + { + "bbox": [ + 50, + 83, + 217, + 96 + ], + "type": "text", + "content": "4 The Methodology Perspective" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 99, + 294, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 99, + 294, + 164 + ], + "spans": [ + { + "bbox": [ + 50, + 99, + 294, + 164 + ], + "type": "text", + "content": "As shown in Figure 5, we delve into STFMs from a methodology perspective, focusing on " + }, + { + "bbox": [ + 50, + 99, + 294, + 164 + ], + "type": "inline_equation", + "content": "i)" + }, + { + "bbox": [ + 50, + 99, + 294, + 164 + ], + "type": "text", + "content": " LLM-based models, which are widely applied across the entire workflow of " + }, + { + "bbox": [ + 50, + 99, + 294, + 164 + ], + "type": "inline_equation", + "content": "ST" + }, + { + "bbox": [ + 50, + 99, + 294, + 164 + ], + "type": "text", + "content": " data science by zero-shot utilization or fine-tuning and " + }, + { + "bbox": [ + 50, + 99, + 294, + 164 + ], + "type": "inline_equation", + "content": "ii)" + }, + { + "bbox": [ + 50, + 99, + 294, + 164 + ], + "type": "text", + "content": " PFM-based models, i.e., pretraining FMs from scratch, which is mainly utilized for " + }, + { + "bbox": [ + 50, + 99, + 294, + 164 + ], + "type": "inline_equation", + "content": "ST" + }, + { + "bbox": [ + 50, + 99, + 294, + 164 + ], + "type": "text", + "content": " data mining. The comparison between them can be found in Appendix C." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 52, + 171, + 291, + 384 + ], + "blocks": [ + { + "bbox": [ + 52, + 171, + 291, + 384 + ], + "lines": [ + { + "bbox": [ + 52, + 171, + 291, + 384 + ], + "spans": [ + { + "bbox": [ + 52, + 171, + 291, + 384 + ], + "type": "image", + "image_path": "60d69dbcc8c85b62a9fac9cb5b695647836692fdc243ddfde0484cf9e89d35d3.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 387, + 291, + 398 + ], + "lines": [ + { + "bbox": [ + 54, + 387, + 291, + 398 + ], + "spans": [ + { + "bbox": [ + 54, + 387, + 291, + 398 + ], + "type": "text", + "content": "Figure 5: A method-centric taxonomy. Full version: Fig. 7." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 408, + 228, + 421 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 408, + 228, + 421 + ], + "spans": [ + { + "bbox": [ + 50, + 408, + 228, + 421 + ], + "type": "text", + "content": "4.1 Large Language Models (LLM)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 423, + 295, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 423, + 295, + 511 + ], + "spans": [ + { + "bbox": [ + 50, + 423, + 295, + 511 + ], + "type": "text", + "content": "4.1.1 Zero-shot Learner. LLMs exhibit strong reasoning and contextual understanding capabilities, making them highly effective across various ST tasks, including data sensing, management, and mining. As shown in Appendix B, they can function as augmenters, predictors, or agents. To ease the presentation, we adopt a broad definition of LLMs, encompassing standard LLMs, Vision-Language Models (VLM), and Multimodal LLMs (MLLM). The zero-shot utilization of LLMs can be categorized into two primary classes." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 512, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 512, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 512, + 295, + 710 + ], + "type": "text", + "content": "- Prompt Engineering. When taking LLMs as zero-shot predictors [33, 53, 125] or data augmenters [150] for various tasks, prompt engineering plays an essential role in shaping model outputs. Below, we summarize key aspects for prompt engineering in current research: a) Prompt Construction: A well-designed prompt typically contains key elements like Task Instruction, Tokenization, and Few-shot Examples. Task instruction [53, 147, 149] aims to explicitly guide LLMs to execute specific operations, incorporating domain knowledge [157] if applicable. Tokenization [33, 53] is crucial to aligning ST data formats with LLM input structures. Additionally, presenting a small number of annotated examples [175] facilitates in-context learning, enabling LLMs to better generalize to complex tasks while ensuring output consistency and adherence to the expected format. b) Prompt Learning: [73, 148] Also known as instruction-tuning, this method learns prompts dynamically rather than relying on manually crafted ones. By optimizing prompt structures during training, it provides a flexible and efficient way to adapt LLMs to new tasks without altering their underlying model weights." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 84, + 559, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 84, + 559, + 140 + ], + "spans": [ + { + "bbox": [ + 313, + 84, + 559, + 140 + ], + "type": "text", + "content": "c) Chain-of-Thought (CoT) Prompting: CoT [87, 175] enhances LLMs' reasoning capabilities by guiding them through step-by-step logical progression. This method improves their ability to tackle complex spatio-temporal tasks, ensuring more interpretable, structured, and accurate outputs in decision-making processes." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 141, + 559, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 141, + 559, + 372 + ], + "spans": [ + { + "bbox": [ + 313, + 141, + 559, + 372 + ], + "type": "text", + "content": "- Agentic Engineering. The emergence of LLM-based agents [49, 144, 168, 174, 185] with reasoning, memory and tool-calling capabilities is transforming ST data science, enabling more adaptive and autonomous decision-making. When designing agent-based solutions, existing works primarily consider the following key aspects: a) Role Assignment. [50, 144, 174] clearly specify the responsibilities and functional boundaries of each agent within the system. b) Memorization [64, 174] refers to the agent's capability to store, recall, and leverage past information and context during task execution. A basic approach involves embedding past interactions into prompts, while more advanced techniques like Retrieval-Augmented Generation (RAG) [143, 155] dynamically retrieve relevant information from external knowledge bases, incorporating only the most pertinent content into the prompt. c) Tool Definition [168, 174], which identify and integrate various tools and functionalities that an agent can call upon to solve complex tasks. In ST data science, various expert models like STGNNs [51] can be wrapped as a tool and added into the agent in a plug-and-play manner. d) Multi-Agent System. Deploying multiple specialized agents to work collaboratively (each with distinct roles) enhances the efficiency and robustness of solutions for intricate ST challenges [49, 63, 185]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 378, + 559, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 378, + 559, + 433 + ], + "spans": [ + { + "bbox": [ + 313, + 378, + 559, + 433 + ], + "type": "text", + "content": "4.1.2 Supervised Fine-Tuning for LLMs. Fine-tuning adapts LLMs to ST tasks by adjusting their parameters based on domain-specific datasets, sometimes incorporating additional modalities such as texts [79, 150] and vision [180]. We categorize fine-tuning methods into three approaches based on the extent of parameter updates:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 315, + 435, + 559, + 709 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 315, + 435, + 558, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 435, + 558, + 489 + ], + "spans": [ + { + "bbox": [ + 315, + 435, + 558, + 489 + ], + "type": "text", + "content": "- Full Parameter Fine-Tuning [68, 98, 100, 104, 108] updates all model parameters based on downstream ST datasets, achieving maximal adaptation to specific tasks. However, it requires substantial labeled data and high computational resources, making it impractical for many real-world applications." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 491, + 558, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 491, + 558, + 555 + ], + "spans": [ + { + "bbox": [ + 315, + 491, + 558, + 555 + ], + "type": "text", + "content": "- Partial Parameter Fine-tuning. To reduce computational overhead, this method [13, 183] freezes most parameters, such as attention weights, while fine-tuning only a small subset (e.g., position encodings and layer normalization). However, modifying a subset of parameters can disrupt the LLM's learned representations, leading to catastrophic forgetting of general knowledge." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 556, + 559, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 556, + 559, + 709 + ], + "spans": [ + { + "bbox": [ + 315, + 556, + 559, + 709 + ], + "type": "text", + "content": "- Add-on Parameter Fine-Tuning. To mitigate forgetting while maintaining efficiency, this technique [61] introduces trainable low-rank matrices (e.g., LoRA [42]), while keeping the original LLM weights frozen. This strategy preserves pretrained knowledge while enabling efficient adaptation to ST tasks. Besides fine-tuning LLMs' weights, another way is training additional layers for input tokenization or task adaption. For instance, TimeLLM [53] trains a self-attention layer that aligns patched time series representations with pretrained text prototype embeddings. Similarly, Time-VLM [180] trains a memory-enhanced attention to capture both short- and long-term dependencies. For task adaption, existing methods typically train an additional prediction head (e.g., linear layers) to project the LLM's output embeddings into a domain-specific space [53, 180]." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 273, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 273, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 273, + 69 + ], + "type": "text", + "content": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 83, + 261, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 83, + 261, + 95 + ], + "spans": [ + { + "bbox": [ + 50, + 83, + 261, + 95 + ], + "type": "text", + "content": "4.2 Pretrained Foundation Models (PFM)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 98, + 295, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 98, + 295, + 163 + ], + "spans": [ + { + "bbox": [ + 50, + 98, + 295, + 163 + ], + "type": "text", + "content": "Unlike LLMs, which build STFMs by directly using or fine-tuning LLMs, PFMs are developed from scratch, independent of existing LLMs. This approach enables domain-specific optimization, allowing models to better capture ST dependencies from cross-domain ST data without constraints imposed by linguistic priors. Following this, we examine PFMs through three key dimensions:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 167, + 295, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 167, + 295, + 201 + ], + "spans": [ + { + "bbox": [ + 50, + 167, + 295, + 201 + ], + "type": "text", + "content": "4.2.1 Neural Architecture. The architecture of PFMs is a fundamental design choice that directly influences their capabilities, efficiency, and adaptability in ST tasks, which can be categorized into:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 204, + 295, + 401 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 51, + 204, + 295, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 204, + 295, + 247 + ], + "spans": [ + { + "bbox": [ + 51, + 204, + 295, + 247 + ], + "type": "text", + "content": "- Transformer-based PFMs. Transformers have been the predominant architecture choice for building PFMs thanks to its powerful sequential modeling ability introduced by the self-attention mechanism [7, 72, 77, 85, 158]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 248, + 295, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 248, + 295, + 314 + ], + "spans": [ + { + "bbox": [ + 51, + 248, + 295, + 314 + ], + "type": "text", + "content": "- Diffusion-based PFMs. Diffusion-based models have recently emerged as a powerful approach for ST representation learning [12, 21, 136, 160, 161, 188], particularly in generative and predictive modeling. These models iteratively learn to reverse a stochastic noise process, enabling them to generate high-fidelity spatio-temporal sequences with strong generalization properties." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 314, + 295, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 314, + 295, + 357 + ], + "spans": [ + { + "bbox": [ + 51, + 314, + 295, + 357 + ], + "type": "text", + "content": "- Graph-based PFMs. Unlike sequential models, GNNs excel at representing spatially structured data such as road networks. [62, 130] build FMs based on graph neural networks to learn the complex correlation between different entities in ST applications." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 357, + 295, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 357, + 295, + 401 + ], + "spans": [ + { + "bbox": [ + 51, + 357, + 295, + 401 + ], + "type": "text", + "content": "- Others. Another emerging class of PFMs is State Space Model (SSM)-based models [8, 43, 97], which construct PFMs using structured state-space representations. Meanwhile, several studies utilize CNNs [118] as backbones for developing PFMs." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 407, + 295, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 407, + 295, + 572 + ], + "spans": [ + { + "bbox": [ + 50, + 407, + 295, + 572 + ], + "type": "text", + "content": "4.2.2 Pretraining Scheme. To enhance generalization ability, PFMs are usually pretrained based on cross-domain datasets [91, 137, 158], enabling them to learn diverse ST patterns across multiple domains. Existing pretraining schemes of PFMs can be classified into three types based on the training objectives: a) Generative Pretraining [85, 98, 130, 138, 189] focuses on reconstructing input data by learning its underlying distribution, enabling the model to generate realistic time series or ST data, while b) Contrastive Pretraining [7, 84, 171] emphasize distinguishing between similar and dissimilar data pairs to learn robust representations by maximizing agreement between augmented views of the same sample. It is particularly effective in multimodal ST learning, aligning heterogeneous data sources such as satellite imagery and its text description. c) Hybrid Pretraining [77] integrates both generative and contrastive objectives, leveraging their complementary strengths." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 575, + 295, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 575, + 295, + 609 + ], + "spans": [ + { + "bbox": [ + 50, + 575, + 295, + 609 + ], + "type": "text", + "content": "4.2.3 Data Modality. ST data manifests in various modalities, each characterized by unique properties (see Section 2), necessitating the development of modality-specific STFMs:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 610, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 610, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 51, + 610, + 295, + 710 + ], + "type": "text", + "content": "- Location. PFMs for location data [7, 40, 71, 124, 138, 150, 171] aim to learn general embedding for geographical entities. For instance, GeoVectors [124] and SpaBERT [71] learn location embeddings based on open-source data such OpenStreetMap, while G2PTL [138] learns from massive logistics delivery data. Notably, there is a noticeable trend that leverages multi-modalities (such as satellite image and text) for comprehensive location embeddings. For example, both UrbanCLIP [150], UrbanVLP [40], and ReFound [142] utilize satellite images for urban region profiling." + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 317, + 81, + 553, + 217 + ], + "blocks": [ + { + "bbox": [ + 317, + 81, + 553, + 217 + ], + "lines": [ + { + "bbox": [ + 317, + 81, + 553, + 217 + ], + "spans": [ + { + "bbox": [ + 317, + 81, + 553, + 217 + ], + "type": "image", + "image_path": "04153b45ab02f3e79c0ab49c30b7605ee00c77f9060519cff8dcabcf84cc5fc2.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 315, + 218, + 558, + 230 + ], + "lines": [ + { + "bbox": [ + 315, + 218, + 558, + 230 + ], + "spans": [ + { + "bbox": [ + 315, + 218, + 558, + 230 + ], + "type": "text", + "content": "Figure 6: Representative PFMs for different types of ST data." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 233, + 560, + 573 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 315, + 233, + 560, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 233, + 560, + 365 + ], + "spans": [ + { + "bbox": [ + 315, + 233, + 560, + 365 + ], + "type": "text", + "content": "- Trajectory & Event. PFMs for trajectory/event data [21, 84, 85, 103, 121, 189] are designed to learn general sequential patterns from inputs. A pioneering effort in this direction is TrajFM [85], which introduces a trajectory FM capable of supporting both regional and task transferability. Pretrained on vehicle trajectories from multiple cities, TrajFM employs a trajectory-masking and autoregressive recovery mechanism to enhance its learning capabilities. To tackle the limited resources of cross-domain trajectories, UniTraj [189] curates a billion-scale mobility dataset spanning diverse geographic regions to facilitate the advancement of trajectory-based FMs. For event data, MOTOR [121] proposes a time-to-event FM for structured medical records." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 315, + 365, + 559, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 365, + 559, + 496 + ], + "spans": [ + { + "bbox": [ + 315, + 365, + 559, + 496 + ], + "type": "text", + "content": "- ST Raster. PFMs for ST raster data [10, 15, 98, 104, 108, 117, 160] organize spatial information in a grid-like format, with a typical applied domain being weather/climate forecasting. For instance, W-MAE [98] trains a mask autoencoder for ST grid forecasting. CimaX [104] develops a general-purpose climate foundation model, pretrained on diverse datasets spanning various variables, ST scales, and physical contexts. Pangu [10] is trained on 39 years of global climate data, which achieves superior forecasting performance compared to leading numerical weather prediction systems. UniST [158] first pretrains the model in various ST raster data via masked pretraining, and then proposes a learnable ST prompt to enhance the model's generalization ability." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 497, + 559, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 497, + 559, + 573 + ], + "spans": [ + { + "bbox": [ + 315, + 497, + 559, + 573 + ], + "type": "text", + "content": "- ST Graph. PFMs for ST graph data [62, 72, 93, 111, 117, 134] learn the ST dependencies from ST graphs that generalize effectively in unseen spatial and temporal contexts. Unlike ST Raster PFMs, there are limited works in this area, which is more challenging due to the complex graph correlation. One representative is OpenCity [72] for ST graph forecasting, which integrates Transformer and GNN to model the ST dependencies in traffic data." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 576, + 501, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 576, + 501, + 586 + ], + "spans": [ + { + "bbox": [ + 315, + 576, + 501, + 586 + ], + "type": "text", + "content": "5 Conclusion and Future Directions" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 589, + 560, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 589, + 560, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 589, + 560, + 710 + ], + "type": "text", + "content": "The rapid advancement of FMs has transformed ST data science, impacting sensing, management, and mining. This survey provides a comprehensive review of FMs for ST data science, identifying key capabilities such as perception, reasoning, and optimization while exploring diverse downstream tasks and datasets. We also establish a systematic taxonomy of methodologies, enhancing understanding of how STFMs model ST data. Despite progress, challenges remain in generalization, interpretability, and efficiency. By consolidating recent advances and outlining future directions (see Appendix A), this survey aims to inspire further innovations, driving the development of scalable and adaptive STFMs for real practice." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 192, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 192, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 192, + 69 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "type": "text", + "content": "Yuxuan Liang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 83, + 108, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 83, + 108, + 95 + ], + "spans": [ + { + "bbox": [ + 52, + 83, + 108, + 95 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 98, + 295, + 703 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 58, + 98, + 295, + 121 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 98, + 295, + 121 + ], + "spans": [ + { + "bbox": [ + 58, + 98, + 295, + 121 + ], + "type": "text", + "content": "[1] Oluwanifemi Adebayo Moses Adekanye. 2024. LIm-powered synthetic environments for self-driving scenarios. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 23721-23723." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 58, + 121, + 295, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 121, + 295, + 153 + ], + "spans": [ + { + "bbox": [ + 58, + 121, + 295, + 153 + ], + "type": "text", + "content": "[2] Nurwahyu Alamsyah, Muhamad Amirul Haq, and Chayadi Oktomy Noto Susanto. 2024. Automated Smart City Planning through Personalized Large Language Model with Retrieval Augmented Generation. In 2024 International Conference on Information Technology and Computing (ICITCOM). IEEE, 306-311." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 58, + 154, + 294, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 154, + 294, + 177 + ], + "spans": [ + { + "bbox": [ + 58, + 154, + 294, + 177 + ], + "type": "text", + "content": "[3] Sarah Alnegheimish, Linh Nguyen, Laure Berti-Equille, and Kalyan Veeramacheneni. 2024. Large language models can be zero-shot anomaly detectors for time series? arXiv preprint arXiv:2405.14755 (2024)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 58, + 178, + 294, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 178, + 294, + 209 + ], + "spans": [ + { + "bbox": [ + 58, + 178, + 294, + 209 + ], + "type": "text", + "content": "[4] Abdul Fatir Ansari, Lorenzo Stella, Caner Turkmen, Xiyuan Zhang, Pedro Mercado, Huibin Shen, Oleksandr Shchur, Syama Sundar Rangapuram, Sebastian Pineda Arango, Shubham Kapoor, et al. 2024. Chronos: Learning the language of time series. arXiv preprint arXiv:2403.07815 (2024)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 58, + 209, + 294, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 209, + 294, + 232 + ], + "spans": [ + { + "bbox": [ + 58, + 209, + 294, + 232 + ], + "type": "text", + "content": "[5] Growtham Atluri, Anuj Karpatne, and Vipin Kumar. 2018. Spatio-temporal data mining: A survey of problems and methods. ACM Computing Surveys (CSUR) 51, 4 (2018), 1-41." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 58, + 233, + 294, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 233, + 294, + 256 + ], + "spans": [ + { + "bbox": [ + 58, + 233, + 294, + 256 + ], + "type": "text", + "content": "[6] Lei Bai, Lina Yao, Can Li, Xianzhi Wang, and Can Wang. 2020. Adaptive graph convolutional recurrent network for traffic forecasting. In NeurIPS, Vol. 33. 17804-17815." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 58, + 257, + 294, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 257, + 294, + 289 + ], + "spans": [ + { + "bbox": [ + 58, + 257, + 294, + 289 + ], + "type": "text", + "content": "[7] Pasquale Balsebre, Weiming Huang, Gao Cong, and Yi Li. 2024. City foundation models for learning general purpose representations from openstreetmap. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 87-97." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 58, + 289, + 294, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 289, + 294, + 312 + ], + "spans": [ + { + "bbox": [ + 58, + 289, + 294, + 312 + ], + "type": "text", + "content": "[8] Sathya Kamesh Bhethanabhotla, Omar Swelam, Julien Siems, David Salinas, and Frank Hutter. 2024. Mamba4Cast: Efficient Zero-Shot Time Series Forecasting with State Space Models. arXiv preprint arXiv:2410.09385 (2024)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 58, + 313, + 294, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 313, + 294, + 336 + ], + "spans": [ + { + "bbox": [ + 58, + 313, + 294, + 336 + ], + "type": "text", + "content": "[9] Kaifeng Bi, Lingxi Xie, Hengheng Zhang, Xin Chen, Xiaotao Gu, and Qi Tian. 2023. Accurate medium-range global weather forecasting with 3D neural networks. Nature 619, 7970 (2023), 533-538." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 337, + 294, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 337, + 294, + 360 + ], + "spans": [ + { + "bbox": [ + 55, + 337, + 294, + 360 + ], + "type": "text", + "content": "[10] Kaifeng Bi, Lingxi Xie, Hengheng Zhang, Xin Chen, Xiaotao Gu, and Qi Tian. 2023. Accurate medium-range global weather forecasting with 3D neural networks. Nature 619, 7970 (2023), 533-538." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 361, + 294, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 361, + 294, + 392 + ], + "spans": [ + { + "bbox": [ + 55, + 361, + 294, + 392 + ], + "type": "text", + "content": "[11] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in neural information processing systems 33 (2020), 1877-1901." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 392, + 294, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 392, + 294, + 415 + ], + "spans": [ + { + "bbox": [ + 55, + 392, + 294, + 415 + ], + "type": "text", + "content": "[12] Defu Cao, Wen Ye, and Yan Liu. [n.d.]. TimeDiT: General-purpose Diffusion Transformers for Time Series Foundation Model. In ICML 2024 Workshop on Foundation Models in the Wild." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 55, + 416, + 294, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 416, + 294, + 439 + ], + "spans": [ + { + "bbox": [ + 55, + 416, + 294, + 439 + ], + "type": "text", + "content": "[13] Ching Chang, Wen-Chih Peng, and Tien-Fu Chen. 2023. LLM4TS: Two-Stage Fine-Tuning for Time-Series Forecasting with Pre-Trained LLMs. arXiv preprint arXiv:2308.08469 (2023)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 55, + 440, + 294, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 440, + 294, + 464 + ], + "spans": [ + { + "bbox": [ + 55, + 440, + 294, + 464 + ], + "type": "text", + "content": "[14] Jiaqi Chen, Bingqian Lin, Ran Xu, Zhenhua Chai, Xiaodan Liang, and KwanYee K Wong. 2024. Mapppt: Map-guided prompting with adaptive path planning for vision-and-language navigation. arXiv preprint arXiv:2401.07314 (2024)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 55, + 464, + 294, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 464, + 294, + 496 + ], + "spans": [ + { + "bbox": [ + 55, + 464, + 294, + 496 + ], + "type": "text", + "content": "[15] Kang Chen, Tao Han, Junchao Gong, Lei Bai, Fenghua Ling, Jing-Jia Luo, Xi Chen, Leiming Ma, Tianning Zhang, Rui Su, et al. 2023. FengWu: Pushing the Skillful Global Medium-range Weather Forecast beyond 10 Days Lead. arXiv preprint arXiv:2304.02948 (2023)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 55, + 496, + 294, + 527 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 496, + 294, + 527 + ], + "spans": [ + { + "bbox": [ + 55, + 496, + 294, + 527 + ], + "type": "text", + "content": "[16] Minze Chen, Zhenxiang Tao, Weitong Tang, Tingxin Qin, Rui Yang, and Chunli Zhu. 2024. Enhancing emergency decision-making with knowledge graphs and large language models. International Journal of Disaster Risk Reduction 113 (2024), 104804." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 55, + 528, + 294, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 528, + 294, + 552 + ], + "spans": [ + { + "bbox": [ + 55, + 528, + 294, + 552 + ], + "type": "text", + "content": "[17] Jinguo Cheng, Chunwei Yang, Wanlin Cai, Yuxuan Liang, Qingsong Wen, and Yuankai Wu. 2024. NuwaTS: a Foundation Model Mending Every Incomplete Time Series. arXiv preprint arXiv:2405.15317 (2024)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 55, + 552, + 294, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 552, + 294, + 575 + ], + "spans": [ + { + "bbox": [ + 55, + 552, + 294, + 575 + ], + "type": "text", + "content": "[18] Mingyue Cheng, Yiheng Chen, Qi Liu, Zhiding Liu, and Yucong Luo. 2024. Advancing time series classification with multimodal language modeling. arXiv preprint arXiv:2403.12371 (2024)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 55, + 576, + 294, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 576, + 294, + 608 + ], + "spans": [ + { + "bbox": [ + 55, + 576, + 294, + 608 + ], + "type": "text", + "content": "[19] Garima Chhikara, Anurag Sharma, V Gurucharan, Kripabandhu Ghosh, and Abhijnan Chakraborty. 2024. LaMSUM: Amplifying Voices Against Harassment through LLM Guided Extractive Summarization of User Incident Reports. arXiv preprint arXiv:2406.15809 (2024)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 55, + 609, + 294, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 609, + 294, + 647 + ], + "spans": [ + { + "bbox": [ + 55, + 609, + 294, + 647 + ], + "type": "text", + "content": "[20] Tushar Choudhary, Vikrant Dewangan, Shivam Chandhok, Shubham Priyadarshan, Anushka Jain, Arun K Singh, Siddharth Srivastava, Krishna Murthy Jatavalabhula, and K Madhava Krishna. 2024. Talk2BEV: Language-enhanced Bird's-eye view maps for autonomous driving. In 2024 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 16345-16352." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 55, + 647, + 294, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 647, + 294, + 678 + ], + "spans": [ + { + "bbox": [ + 55, + 647, + 294, + 678 + ], + "type": "text", + "content": "[21] Chen Chu, Hengcai Zhang, and Feng Lu. 2023. TrajGDM: A New Trajectory Foundation Model for Simulating Human Mobility. In Proceedings of the 31st ACM International Conference on Advances in Geographic Information Systems. 1-2." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 55, + 679, + 294, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 679, + 294, + 703 + ], + "spans": [ + { + "bbox": [ + 55, + 679, + 294, + 703 + ], + "type": "text", + "content": "[22] Longchao Da, Minchiuan Gao, Hao Mei, and Hua Wei. 2023. Lm powered sim-to-real transfer for traffic signal control. arXiv preprint arXiv:2308.14284 (2023)." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 320, + 87, + 559, + 716 + ], + "type": "list", + "angle": 0, + "index": 50, + "blocks": [ + { + "bbox": [ + 320, + 87, + 559, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 87, + 559, + 118 + ], + "spans": [ + { + "bbox": [ + 320, + 87, + 559, + 118 + ], + "type": "text", + "content": "[23] Gordon Dai, Weijia Zhang, Jinhan Li, Siqi Yang, Srihas Rao, Arthur Caetano, Misha Sra, et al. 2024. Artificial leviathan: Exploring social evolution of lIm agents through the lens of hobbesian social contract theory. arXiv preprint arXiv:2406.14373 (2024)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 320, + 119, + 559, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 119, + 559, + 159 + ], + "spans": [ + { + "bbox": [ + 320, + 119, + 559, + 159 + ], + "type": "text", + "content": "[24] Zifeng Ding, Heling Cai, Jingpei Wu, Yunpu Ma, Ruotong Liao, Bo Xiong, and Volker Tresp. 2024. zrLLM: Zero-Shot Relational Learning on Temporal Knowledge Graphs with Large Language Models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers). 1877-1895." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 320, + 159, + 559, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 159, + 559, + 190 + ], + "spans": [ + { + "bbox": [ + 320, + 159, + 559, + 190 + ], + "type": "text", + "content": "[25] Quang Minh Dinh, Minh Khoi Ho, Anh Quan Dang, and Hung Phong Tran. 2024. Trafficvlm: A controllable visual language model for traffic video captioning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshop. 7134-7143." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 320, + 191, + 559, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 191, + 559, + 206 + ], + "spans": [ + { + "bbox": [ + 320, + 191, + 559, + 206 + ], + "type": "text", + "content": "[26] Manqing Dong, Hao Huang, and Longbing Cao. 2024. Can LLMs Serve As Time Series Anomaly Detectors? arXiv preprint arXiv:2408.03475 (2024)." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 320, + 206, + 559, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 206, + 559, + 238 + ], + "spans": [ + { + "bbox": [ + 320, + 206, + 559, + 238 + ], + "type": "text", + "content": "[27] Vitor Gaboardi dos Santos, Guto Leoni Santos, Theo Lynn, and Boualem Benatallah. 2024. Identifying Citizen-Related Issues from Social Media Using LLM-Based Data Augmentation. In International Conference on Advanced Information Systems Engineering. Springer, 531-546." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 320, + 238, + 559, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 238, + 559, + 261 + ], + "spans": [ + { + "bbox": [ + 320, + 238, + 559, + 261 + ], + "type": "text", + "content": "[28] Jie Feng, Yuwei Du, Jie Zhao, and Yong Li. 2024. Agentmove: Predicting human mobility anywhere using large language model based agentic framework. arXiv preprint arXiv:2408.13986 (2024)." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 320, + 262, + 559, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 262, + 559, + 294 + ], + "spans": [ + { + "bbox": [ + 320, + 262, + 559, + 294 + ], + "type": "text", + "content": "[29] Chen Gao, Xiaochong Lan, Nian Li, Yuan Yuan, Jingtao Ding, Zhilun Zhou, Fengli Xu, and Yong Li. 2024. Large language models empowered agent-based modeling and simulation: A survey and perspectives. *Humanities and Social Sciences Communications* 11, 1 (2024), 1-24." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 320, + 294, + 559, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 294, + 559, + 318 + ], + "spans": [ + { + "bbox": [ + 320, + 294, + 559, + 318 + ], + "type": "text", + "content": "[30] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, and Haofen Wang. 2023. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997 (2023)." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 320, + 318, + 559, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 318, + 559, + 342 + ], + "spans": [ + { + "bbox": [ + 320, + 318, + 559, + 342 + ], + "type": "text", + "content": "[31] Vinicius G Goecks and Nicholas R Waytowich. 2023. Disasterresponsept: Large language models for accelerated plan of action development in disaster response scenarios. arXiv preprint arXiv:2306.17271 (2023)." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 320, + 342, + 559, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 342, + 559, + 365 + ], + "spans": [ + { + "bbox": [ + 320, + 342, + 559, + 365 + ], + "type": "text", + "content": "[32] Adam Goodge, Wee Siong Ng, Bryan Hooi, and See Kiong Ng. 2025. Spatio-Temporal Foundation Models: Vision, Challenges, and Opportunities. arXiv preprint arXiv:2501.09045 (2025)." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 320, + 365, + 559, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 365, + 559, + 389 + ], + "spans": [ + { + "bbox": [ + 320, + 365, + 559, + 389 + ], + "type": "text", + "content": "[33] Nate Gruver, Marc Finzi, Shikai Qiu, and Andrew Gordon Wilson. 2023. Large language models are zero-shot time series forecasters. Advances in neural information processing systems (2023)." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 320, + 389, + 559, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 389, + 559, + 414 + ], + "spans": [ + { + "bbox": [ + 320, + 389, + 559, + 414 + ], + "type": "text", + "content": "[34] Nate Gruver, Marc Finzi, Shikai Qiu, and Andrew G Wilson. 2024. Large language models are zero-shot time series forecasters. Advances in Neural Information Processing Systems 36 (2024)." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 320, + 414, + 559, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 414, + 559, + 437 + ], + "spans": [ + { + "bbox": [ + 320, + 414, + 559, + 437 + ], + "type": "text", + "content": "[35] Qinghua Guan, Jinhui Ouyang, Di Wu, and Weiren Yu. 2024. CityGPT: Towards Urban IoT Learning, Analysis and Interaction with Multi-Agent System. arXiv preprint arXiv:2405.14691 (2024)." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 320, + 437, + 559, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 437, + 559, + 469 + ], + "spans": [ + { + "bbox": [ + 320, + 437, + 559, + 469 + ], + "type": "text", + "content": "[36] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025)." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 320, + 469, + 559, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 469, + 559, + 493 + ], + "spans": [ + { + "bbox": [ + 320, + 469, + 559, + 493 + ], + "type": "text", + "content": "[37] Shengnan Guo, Youfang Lin, Ning Feng, Chao Song, and Huaiyu Wan. 2019. Attention based spatial-temporal graph convolutional networks for traffic flow forecasting. In AAAI, Vol. 33: 922-929." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 320, + 493, + 559, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 493, + 559, + 525 + ], + "spans": [ + { + "bbox": [ + 320, + 493, + 559, + 525 + ], + "type": "text", + "content": "[38] Devashish Vikas Gupta, Azeez Syed Ali Ishaqui, and Divya Kiran Kadiyala. 2024. Geode: A Zero-shot Geospatial Question-Answering Agent with Explicit Reasoning and Precise Spatio-Temporal Retrieval. arXiv preprint arXiv:2407.11014 (2024)." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 320, + 525, + 559, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 525, + 559, + 541 + ], + "spans": [ + { + "bbox": [ + 320, + 525, + 559, + 541 + ], + "type": "text", + "content": "[39] Wes Gurnee and Max Tegmark. 2023. Language models represent space and time. arXiv preprint arXiv:2310.02207 (2023)." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 320, + 541, + 559, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 541, + 559, + 572 + ], + "spans": [ + { + "bbox": [ + 320, + 541, + 559, + 572 + ], + "type": "text", + "content": "[40] Xixuan Hao, Wei Chen, Yibo Yan, Siru Zhong, Kun Wang, Qingsong Wen, and Yuxuan Liang. 2024. UrbanVLP: A Multi-Granularity Vision-Language Pre-Trained Foundation Model for Urban Indicator Prediction. arXiv preprint arXiv:2403.16831 (2024)." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 320, + 572, + 559, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 572, + 559, + 597 + ], + "spans": [ + { + "bbox": [ + 320, + 572, + 559, + 597 + ], + "type": "text", + "content": "[41] Ce Hou, Fan Zhang, Yong Li, Haifeng Li, Gengchen Mai, Yuhao Kang, Ling Yao, Wenhao Yu, Yao Yao, Song Gao, et al. 2025. Urban sensing in the era of large language models. The Innovation 6, 1 (2025)." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 320, + 597, + 559, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 597, + 559, + 620 + ], + "spans": [ + { + "bbox": [ + 320, + 597, + 559, + 620 + ], + "type": "text", + "content": "[42] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685 (2021)." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 320, + 620, + 559, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 620, + 559, + 644 + ], + "spans": [ + { + "bbox": [ + 320, + 620, + 559, + 644 + ], + "type": "text", + "content": "[43] Jiaxi Hu, Disen Lan, Ziyu Zhou, Qingsong Wen, and Yuxuan Liang. 2024. TimeSSM: Simplifying and Unifying State Space Models for Time Series Forecasting. arXiv preprint arXiv:2405.16312 (2024)." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 320, + 644, + 559, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 644, + 559, + 677 + ], + "spans": [ + { + "bbox": [ + 320, + 644, + 559, + 677 + ], + "type": "text", + "content": "[44] Yiheng Huang, Xiaowei Mao, Shengnan Guo, Yubin Chen, Junfeng Shen, Tiankuo Li, Youfang Lin, and Huaiyu Wan. 2024. STD-PLM: Understanding Both Spatial and Temporal Properties of Spatial-Temporal Data with PLM. arXiv preprint arXiv:2407.09096 (2024)." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 320, + 677, + 559, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 677, + 559, + 700 + ], + "spans": [ + { + "bbox": [ + 320, + 677, + 559, + 700 + ], + "type": "text", + "content": "[45] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. 2024. Gpt-40 system card. arXiv preprint arXiv:2410.21276 (2024)." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 320, + 700, + 559, + 716 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 700, + 559, + 716 + ], + "spans": [ + { + "bbox": [ + 320, + 700, + 559, + 716 + ], + "type": "text", + "content": "[46] Junzhong Ji, Fan Yu, and Minglong Lei. 2022. Self-Supervised Spatiotemporal Graph Neural Networks With Self-Distillation for Traffic Prediction. IEEE TITS" + } + ] + } + ], + "index": 49 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 273, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 273, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 273, + 69 + ], + "type": "text", + "content": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 87, + 294, + 709 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 71, + 87, + 90, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 87, + 90, + 95 + ], + "spans": [ + { + "bbox": [ + 71, + 87, + 90, + 95 + ], + "type": "text", + "content": "(2022)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 95, + 294, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 95, + 294, + 119 + ], + "spans": [ + { + "bbox": [ + 56, + 95, + 294, + 119 + ], + "type": "text", + "content": "[47] Yue Jiang, Qin Chao, Yile Chen, Xiucheng Li, Shuai Liu, and Gao Cong. 2024. UrbanLLM: Autonomous Urban Activity Planning and Management with Large Language Models. arXiv preprint arXiv:2406.12360 (2024)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 119, + 294, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 119, + 294, + 167 + ], + "spans": [ + { + "bbox": [ + 56, + 119, + 294, + 167 + ], + "type": "text", + "content": "[48] Yushan Jiang, Zijie Pan, Xikun Zhang, Sahil Garg, Anderson Schneider, Yuriy Nevmyvaka, and Dongjin Song. 2024. Empowering Time Series Analysis with Large Language Models: A Survey. In Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence, IfCAI-24, Kate Larson (Ed.). International Joint Conferences on Artificial Intelligence Organization, 8095-8103. https://doi.org/10.24963/ijcai.2024/895 Survey Track." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 167, + 294, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 167, + 294, + 191 + ], + "spans": [ + { + "bbox": [ + 56, + 167, + 294, + 191 + ], + "type": "text", + "content": "[49] Yushan Jiang, Wenzhao Yu, Geon Lee, Dongjin Song, Kijung Shin, Wei Cheng, Yanchi Liu, and Haifeng Chen. 2026. Explanable Multi-modal Time Series Prediction with LLM-in-the-Loop. arXiv preprint arXiv:2503.01013 (2026)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 191, + 294, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 191, + 294, + 223 + ], + "spans": [ + { + "bbox": [ + 56, + 191, + 294, + 223 + ], + "type": "text", + "content": "[50] WANG JIAWEI, Renhe Jiang, Chuang Yang, Zengqing Wu, Ryosuke Shibasaki, Noboru Koshizuka, Chuan Xiao, et al. 2024. Large language models as urban residents: An llm agent framework for personal mobility generation. Advances in Neural Information Processing Systems 37 (2024), 124547-124574." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 223, + 294, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 223, + 294, + 255 + ], + "spans": [ + { + "bbox": [ + 56, + 223, + 294, + 255 + ], + "type": "text", + "content": "[51] Guangyin Jin, Yuxuan Liang, Yuchen Fang, Zezhi Shao, Jincai Huang, Junbo Zhang, and Yu Zheng. 2023. Spatio-temporal graph neural networks for predictive learning in urban computing: A survey. IEEE Transactions on Knowledge and Data Engineering (2023)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 255, + 294, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 255, + 294, + 286 + ], + "spans": [ + { + "bbox": [ + 56, + 255, + 294, + 286 + ], + "type": "text", + "content": "[52] Ming Jin, Huan Yee Koh, Qingsong Wen, Daniele Zambon, Cesare Alippi, Geoffrey I Webb, Irwin King, and Shirui Pan. 2024. A survey on graph neural networks for time series: Forecasting, classification, imputation, and anomaly detection. IEEE Transactions on Pattern Analysis and Machine Intelligence (2024)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 286, + 294, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 286, + 294, + 318 + ], + "spans": [ + { + "bbox": [ + 56, + 286, + 294, + 318 + ], + "type": "text", + "content": "[53] Ming Jin, Shiyu Wang, Lintao Ma, Zhixuan Chu, James Y Zhang, Xiaoming Shi, Pin-Yu Chen, Yuxuan Liang, Yuan-Fang Li, Shirui Pan, et al. 2023. Time-LLM: Time series forecasting by reprogramming large language models. arXiv preprint arXiv:2310.01728 (2023)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 318, + 294, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 318, + 294, + 350 + ], + "spans": [ + { + "bbox": [ + 56, + 318, + 294, + 350 + ], + "type": "text", + "content": "[54] Ming Jin, Qingsong Wen, Yuxuan Liang, Chaoli Zhang, Siqiao Xue, Xue Wang, James Zhang, Yi Wang, Haifeng Chen, Xiaoli Li, et al. 2023. Large models for time series and spatio-temporal data: A survey and outlook. arXiv preprint arXiv:2310.10196 (2023)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 350, + 294, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 350, + 294, + 382 + ], + "spans": [ + { + "bbox": [ + 56, + 350, + 294, + 382 + ], + "type": "text", + "content": "[55] Ye Jin, Xiaoxi Shen, Huiling Peng, Xiaohan Liu, Jingli Qin, Jiayang Li, Jintao Xie, Peizhong Gao, Guyue Zhou, and Jiangtao Gong. 2023. Surrealdriver: Designing generative driver agent simulation framework in urban contexts based on large language model. arXiv preprint arXiv:2309.13193 (2023)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 382, + 294, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 382, + 294, + 406 + ], + "spans": [ + { + "bbox": [ + 56, + 382, + 294, + 406 + ], + "type": "text", + "content": "[56] Chenlu Ju, Jiaxin Liu, Shobhit Sinha, Hao Xue, and Flora Salim. 2025. TrajLLM: A Modular LLM-Enhanced Agent-Based Framework for Realistic Human Trajectory Simulation. (2025)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 406, + 294, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 406, + 294, + 437 + ], + "spans": [ + { + "bbox": [ + 56, + 406, + 294, + 437 + ], + "type": "text", + "content": "[57] Subbarao Kambhampati, Karthik Valmeekam, Lin Guan, Mudit Verma, Kaya Stechly, Siddhant Bhambri, Lucas Saldyt, and Anil Murthy. 2024. LLMs can't plan, but can help planning in LLM-modulo frameworks. arXiv preprint arXiv:2402.01817 (2024)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 437, + 294, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 437, + 294, + 461 + ], + "spans": [ + { + "bbox": [ + 56, + 437, + 294, + 461 + ], + "type": "text", + "content": "[58] Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of naacL-HLT, Vol. 1. Minneapolis, Minnesota." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 461, + 294, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 461, + 294, + 493 + ], + "spans": [ + { + "bbox": [ + 56, + 461, + 294, + 493 + ], + "type": "text", + "content": "[59] Dmitrii Kochkov, Janni Yuval, Ian Langmore, Peter Norgaard, Jamie Smith, Griffin Mooers, Milan Klower, James Lottes, Stephan Rasp, Peter Duben, et al. 2024. Neural general circulation models for weather and climate. Nature 632, 8027 (2024), 1060–1066." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 493, + 294, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 493, + 294, + 525 + ], + "spans": [ + { + "bbox": [ + 56, + 493, + 294, + 525 + ], + "type": "text", + "content": "[60] Alexandre Lacoste, Nils Lehmann, Pau Rodriguez, Evan Sherwin, Hannah Kerner, Björn Lütjens, Jeremy Irvin, David Dao, Hamed Alemohammad, Alexandre Drouin, et al. 2024. Geo-bench: Toward foundation models for earth monitoring. Advances in Neural Information Processing Systems 36 (2024)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 56, + 525, + 294, + 549 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 525, + 294, + 549 + ], + "spans": [ + { + "bbox": [ + 56, + 525, + 294, + 549 + ], + "type": "text", + "content": "[61] Siqi Lai, Zhao Xu, Weijia Zhang, Hao Liu, and Hui Xiong. 2025. Large language models as traffic signal control agents: Capacity and opportunity. In Proceedings of the 31st ACM SIGKDD conference on knowledge discovery and data mining." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 56, + 549, + 294, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 549, + 294, + 582 + ], + "spans": [ + { + "bbox": [ + 56, + 549, + 294, + 582 + ], + "type": "text", + "content": "[62] Remi Lam, Alvaro Sanchez-Gonzalez, Matthew Willson, Peter Wirsnsberger, Meire Fortunato, Ferran Alet, Suman Ravuri, Timo Ewalds, Zach Eaton-Rosen, Weihua Hu, et al. 2023. GraphCast: Learning skillful medium-range global weather forecasting. Science 382, 6677 (2023), 1416-1421." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 56, + 582, + 294, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 582, + 294, + 605 + ], + "spans": [ + { + "bbox": [ + 56, + 582, + 294, + 605 + ], + "type": "text", + "content": "[63] Geon Lee, Wenchao Yu, Kijung Shin, Wei Cheng, and Haifeng Chen. 2025. TimeCAP: Learning to Contextualize, Augment, and Predict Time Series Events with Large Language Model Agents. arXiv preprint arXiv:2502.11418 (2025)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 56, + 605, + 294, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 605, + 294, + 628 + ], + "spans": [ + { + "bbox": [ + 56, + 605, + 294, + 628 + ], + "type": "text", + "content": "[64] Mingcong Lei, Yiming Zhao, Ge Wang, Zhixin Mai, Shuguang Cui, Yatong Han, and Jinke Ren. 2025. STMA: A Spatio-Temporal Memory Agent for Long-Horizon Embodied Task Planning. arXiv preprint arXiv:2502.10177 (2025)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 56, + 629, + 294, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 629, + 294, + 653 + ], + "spans": [ + { + "bbox": [ + 56, + 629, + 294, + 653 + ], + "type": "text", + "content": "[65] Zhenyu Lei, Yushun Dong, Weiyu Li, Rong Ding, Qi Wang, and Jundong Li. 2025. Harnessing Large Language Models for Disaster Management: A Survey. arXiv preprint arXiv:2501.06932 (2025)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 56, + 653, + 294, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 653, + 294, + 685 + ], + "spans": [ + { + "bbox": [ + 56, + 653, + 294, + 685 + ], + "type": "text", + "content": "[66] Jiangtong Li, Li Niu, and Liqing Zhang. 2022. From representation to reasoning: Towards both evidence and commonsense reasoning for video question-answering. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 21273–21282." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 56, + 685, + 294, + 709 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 685, + 294, + 709 + ], + "spans": [ + { + "bbox": [ + 56, + 685, + 294, + 709 + ], + "type": "text", + "content": "[67] Jinpeng Li, Haiping Wang, Yuan Liu, Zhiyang Dou, Yuexin Ma, Sibei Yang, Yuan Li, Wenping Wang, Zhen Dong, Bisheng Yang, et al. [n.d.]. CityAnchor: City-scale 3D Visual Grounding with Multi-modality LLMs. In The Thirteenth" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 320, + 87, + 559, + 709 + ], + "type": "list", + "angle": 0, + "index": 48, + "blocks": [ + { + "bbox": [ + 335, + 87, + 486, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 87, + 486, + 95 + ], + "spans": [ + { + "bbox": [ + 335, + 87, + 486, + 95 + ], + "type": "text", + "content": "International Conference on Learning Representations." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 320, + 95, + 559, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 95, + 559, + 127 + ], + "spans": [ + { + "bbox": [ + 320, + 95, + 559, + 127 + ], + "type": "text", + "content": "[68] Peibo Li, Maarten de Rijke, Hao Xue, Shuang Ao, Yang Song, and Flora D Salim. 2024. Large language models for next point-of-interest recommendation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1463-1472." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 320, + 127, + 559, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 127, + 559, + 158 + ], + "spans": [ + { + "bbox": [ + 320, + 127, + 559, + 158 + ], + "type": "text", + "content": "[69] Wenbin Li, Di Yao, Ruibo Zhao, Wenjie Chen, Zijie Xu, Chengxue Luo, Chang Gong, Quanliang Jing, Haining Tan, and Jingping Bi. 2024. STBench: Assessing the ability of large language models in spatio-temporal analysis. arXiv preprint arXiv:2406.19065 (2024)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 320, + 159, + 559, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 159, + 559, + 182 + ], + "spans": [ + { + "bbox": [ + 320, + 159, + 559, + 182 + ], + "type": "text", + "content": "[70] Yaguang Li, Rose Yu, Cyrus Shahabi, and Yan Liu. 2017. Diffusion convolutional recurrent neural network: Data-driven traffic forecasting. arXiv preprint arXiv:1707.01926 (2017)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 320, + 182, + 559, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 182, + 559, + 206 + ], + "spans": [ + { + "bbox": [ + 320, + 182, + 559, + 206 + ], + "type": "text", + "content": "[71] Zekun Li, Jina Kim, Yao-Yi Chiang, and Muhao Chen. 2022. SpaBERT: A pretrained language model from geographic data for geo-entity representation. arXiv preprint arXiv:2210.12213 (2022)." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 320, + 206, + 559, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 206, + 559, + 231 + ], + "spans": [ + { + "bbox": [ + 320, + 206, + 559, + 231 + ], + "type": "text", + "content": "[72] Zhonghang Li, Long Xia, Lei Shi, Yong Xu, Dawei Yin, and Chao Huang. 2024. Opencity: Open spatio-temporal foundation models for traffic prediction. arXiv preprint arXiv:2408.10269 (2024)." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 320, + 231, + 559, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 231, + 559, + 262 + ], + "spans": [ + { + "bbox": [ + 320, + 231, + 559, + 262 + ], + "type": "text", + "content": "[73] Zhonghang Li, Lianghao Xia, Jiabin Tang, Yong Xu, Lei Shi, Long Xia, Dawei Yin, and Chao Huang. 2024. Urbangpt: Spatio-temporal large language models. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 5351-5362." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 320, + 262, + 559, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 262, + 559, + 286 + ], + "spans": [ + { + "bbox": [ + 320, + 262, + 559, + 286 + ], + "type": "text", + "content": "[74] Zhonghang Li, Lianghao Xia, Yong Xu, and Chao Huang. 2024. GPT-ST: generative pre-training of spatio-temporal graph neural networks. Advances in Neural Information Processing Systems 36 (2024)." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 320, + 286, + 559, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 286, + 559, + 311 + ], + "spans": [ + { + "bbox": [ + 320, + 286, + 559, + 311 + ], + "type": "text", + "content": "[75] Zongrong Li, Junhao Xu, Siqin Wang, Yifan Wu, and Haiyang Li. 2024. StreetviewLLM: Extracting Geographic Information Using a Chain-of-Thought Multimodal Large Language Model. arXiv preprint arXiv:2411.14476 (2024)." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 320, + 311, + 559, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 311, + 559, + 342 + ], + "spans": [ + { + "bbox": [ + 320, + 311, + 559, + 342 + ], + "type": "text", + "content": "[76] Zhe Li, Ronghui Xu, Jilin Hu, Zhong Peng, Xi Lu, Chenjuan Guo, and Bin Yang. 2024. Ocean Significant Wave Height Estimation with Spatio-temporally Aware Large Language Models. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 3892-3896." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 320, + 342, + 559, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 342, + 559, + 365 + ], + "spans": [ + { + "bbox": [ + 320, + 342, + 559, + 365 + ], + "type": "text", + "content": "[77] Zekun Li, Wenxuan Zhou, Yao-Yi Chiang, and Muhao Chen. 2023. Geolm: Empowering language models for geospatially grounded language understanding. arXiv preprint arXiv:2310.14478 (2023)." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 320, + 365, + 559, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 365, + 559, + 389 + ], + "spans": [ + { + "bbox": [ + 320, + 365, + 559, + 389 + ], + "type": "text", + "content": "[78] Yuxuan Liang, Songyu Ke, Junbo Zhang, Xiwen Yi, and Yu Zheng. 2018. Geom: Multi-level attention networks for geo-sensory time series prediction.. In ICAI, Vol. 2018. 3428-3434." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 320, + 389, + 559, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 389, + 559, + 414 + ], + "spans": [ + { + "bbox": [ + 320, + 389, + 559, + 414 + ], + "type": "text", + "content": "[79] Yuebing Liang, Yichao Liu, Xiaohan Wang, and Zhan Zhao. 2023. Exploring large language models for human mobility prediction under public events. arXiv preprint arXiv:2311.17351 (2023)." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 320, + 414, + 559, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 414, + 559, + 445 + ], + "spans": [ + { + "bbox": [ + 320, + 414, + 559, + 445 + ], + "type": "text", + "content": "[80] Yuxuan Liang, Kun Ouyang, Yiwei Wang, Zheyi Pan, Yifang Yin, Hongyang Chen, Junbo Zhang, Yu Zheng, David S Rosenblum, and Roger Zimmermann. 2022. Mixed-Order Relation-Aware Recurrent Neural Networks for Spatio-Temporal Forecasting. IEEE TKDE (2022)." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 320, + 445, + 559, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 445, + 559, + 478 + ], + "spans": [ + { + "bbox": [ + 320, + 445, + 559, + 478 + ], + "type": "text", + "content": "[81] Yuxuan Liang, Haomin Wen, Yuqi Nie, Yushan Jiang, Ming Jin, Dongjin Song, Shirui Pan, and Qingsong Wen. 2024. Foundation models for time series analysis: A tutorial and survey. In Proceedings of the 30th ACM SIGKDD conference on knowledge discovery and data mining. 6555-6565." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 320, + 478, + 559, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 478, + 559, + 510 + ], + "spans": [ + { + "bbox": [ + 320, + 478, + 559, + 510 + ], + "type": "text", + "content": "[82] Yuxuan Liang, Yutong Xia, Songyu Ke, Yiwei Wang, Qingsong Wen, Junbo Zhang, Yu Zheng, and Roger Zimmermann. 2023. Airformer: Predicting nationwide air quality in china with transformers. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 37. 14329-14337." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 320, + 510, + 559, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 510, + 559, + 533 + ], + "spans": [ + { + "bbox": [ + 320, + 510, + 559, + 533 + ], + "type": "text", + "content": "[83] Jaesung Lim, Seunghwan An, Gyeongdong Woo, ChangHyun Kim, and Jong-June Jeon. [n.d.]. Context-Driven Missing Data Imputation via Large Language Model. ([n.d.])." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 320, + 533, + 559, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 533, + 559, + 564 + ], + "spans": [ + { + "bbox": [ + 320, + 533, + 559, + 564 + ], + "type": "text", + "content": "[84] Yan Lin, Yichen Liu, Zeyu Zhou, Haomin Wen, Erwen Zheng, Shengnan Guo, Youfang Lin, and Huaiyu Wan. 2024. PTraJM: Efficient and Semantic-rich Trajectory Learning with Pretrained Trajectory-Mamba. arXiv preprint arXiv:2408.04916 (2024)." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 320, + 564, + 559, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 564, + 559, + 597 + ], + "spans": [ + { + "bbox": [ + 320, + 564, + 559, + 597 + ], + "type": "text", + "content": "[85] Yan Lin, Tonglong Wei, Zeyu Zhou, Haomin Wen, Jilin Hu, Shengnan Guo, Youfang Lin, and Huaiyu Wan. 2024. TrajFM: A Vehicle Trajectory Foundation Model for Region and Task Transferability. arXiv preprint arXiv:2408.15251 (2024)." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 320, + 597, + 559, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 597, + 559, + 621 + ], + "spans": [ + { + "bbox": [ + 320, + 597, + 559, + 621 + ], + "type": "text", + "content": "[86] Chenxi Liu, Sun Yang, Qianxiong Xu, Zhishuai Li, Cheng Long, Ziyue Li, and Rui Zhao. 2024. Spatial-temporal large language model for traffic prediction. arXiv preprint arXiv:2401.10134 (2024)." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 320, + 621, + 559, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 621, + 559, + 653 + ], + "spans": [ + { + "bbox": [ + 320, + 621, + 559, + 653 + ], + "type": "text", + "content": "[87] Haoxin Liu, Zhiyuan Zhao, Jindong Wang, Harshavardhan Kamarthi, and B Aditya Prakash. 2024. Lstprompt: Large language models as zero-shot time series forecasters by long-short-term prompting. arXiv preprint arXiv:2402.16132 (2024)." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 320, + 653, + 559, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 653, + 559, + 685 + ], + "spans": [ + { + "bbox": [ + 320, + 653, + 559, + 685 + ], + "type": "text", + "content": "[88] Jiaqi Liu, Peng Hang, Xiaocong Zhao, Jianqiang Wang, and Jian Sun. 2024. DDM-lag: A diffusion-based decision-making model for autonomous vehicles with lagrangian safety enhancement. IEEE Transactions on Artificial Intelligence (2024)." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 320, + 685, + 559, + 709 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 685, + 559, + 709 + ], + "spans": [ + { + "bbox": [ + 320, + 685, + 559, + 709 + ], + "type": "text", + "content": "[89] Jun Liu, Chaoyun Zhang, Jiaxu Qian, Minghua Ma, Si Qin, Chetan Bansal, Qingwei Lin, Saravanan Rajmohan, and Dongmei Zhang. 2024. Large language models can deliver accurate and interpretable time series anomaly detection." + } + ] + } + ], + "index": 47 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 192, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 192, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 192, + 69 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "type": "text", + "content": "Yuxuan Liang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 87, + 294, + 715 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 71, + 87, + 180, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 87, + 180, + 95 + ], + "spans": [ + { + "bbox": [ + 71, + 87, + 180, + 95 + ], + "type": "text", + "content": "arXiv preprint arXiv:2405.15370 (2024)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 95, + 294, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 95, + 294, + 118 + ], + "spans": [ + { + "bbox": [ + 56, + 95, + 294, + 118 + ], + "type": "text", + "content": "[90] Lei Liu, Shuo Yu, Runze Wang, Zhenxun Ma, and Yanming Shen. 2024. How can large language models understand spatial-temporal data? arXiv preprint arXiv:2401.14192 (2024)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 119, + 294, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 119, + 294, + 150 + ], + "spans": [ + { + "bbox": [ + 56, + 119, + 294, + 150 + ], + "type": "text", + "content": "[91] Xu Liu, Junfeng Hu, Yuan Li, Shizhe Diao, Yuxuan Liang, Bryan Hooi, and Roger Zimmermann. 2024. Unitime: A language-empowered unified model for cross-domain time series forecasting. In Proceedings of the ACM Web Conference 2024. 4095-4106." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 151, + 294, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 151, + 294, + 175 + ], + "spans": [ + { + "bbox": [ + 56, + 151, + 294, + 175 + ], + "type": "text", + "content": "[92] Xu Liu, Yuxuan Liang, Chao Huang, Yu Zheng, Bryan Hooi, and Roger Zimmermann. 2022. When do contrastive learning signals help spatio-temporal graph forecasting? In SIGSPATIAL. 1-12." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 175, + 294, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 175, + 294, + 206 + ], + "spans": [ + { + "bbox": [ + 56, + 175, + 294, + 206 + ], + "type": "text", + "content": "[93] Xu Liu, Juncheng Liu, Gerald Woo, Taha Aksu, Yuxuan Liang, Roger Zimmermann, Chenghao Liu, Silvio Savarese, Caiming Xiong, and Doyen Sahoo. 2024. Moirai-MoE: Empowering Time Series Foundation Models with Sparse Mixture of Experts. arXiv preprint arXiv:2410.10469 (2024)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 206, + 294, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 206, + 294, + 230 + ], + "spans": [ + { + "bbox": [ + 56, + 206, + 294, + 230 + ], + "type": "text", + "content": "[94] Yu Liu, Jingtao Ding, Yanjie Fu, and Yong Li. 2023. Urban knowledge graph system. ACM Transactions on Intelligent Systems and Technology 14, 4 (2023), 1-25." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 230, + 294, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 230, + 294, + 255 + ], + "spans": [ + { + "bbox": [ + 56, + 230, + 294, + 255 + ], + "type": "text", + "content": "[95] Lin Long, Rui Wang, Ruixuan Xiao, Junbo Zhao, Xiao Ding, Gang Chen, and Haobo Wang. 2024. On llms-driven synthetic data generation, curation, and evaluation: A survey. arXiv preprint arXiv:2406.15126 (2024)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 255, + 294, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 255, + 294, + 270 + ], + "spans": [ + { + "bbox": [ + 56, + 255, + 294, + 270 + ], + "type": "text", + "content": "[96] Qingyue Long, Yuan Yuan, and Yong Li. 2024. A Universal Model for Human Mobility Prediction. arXiv preprint arXiv:2412.15294 (2024)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 270, + 294, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 270, + 294, + 301 + ], + "spans": [ + { + "bbox": [ + 56, + 270, + 294, + 301 + ], + "type": "text", + "content": "[97] Haoyu Ma, Yushu Chen, Wenlai Zhao, Jinzhe Yang, Yingsheng Ji, Xinghua Xu, Xiaozhu Liu, Hao Jing, Shengzhuo Liu, and Guangwen Yang. 2024. A Mamba Foundation Model for Time Series Forecasting. arXiv preprint arXiv:2411.02941 (2024)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 302, + 294, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 302, + 294, + 326 + ], + "spans": [ + { + "bbox": [ + 56, + 302, + 294, + 326 + ], + "type": "text", + "content": "[98] Xin Man, Chenghong Zhang, Changyu Li, and Jie Shao. 2023. W-MAE: Pretrained weather model with masked autoencoder for multi-variable weather forecasting. arXiv preprint arXiv:2304.08754 (2023)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 326, + 294, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 326, + 294, + 350 + ], + "spans": [ + { + "bbox": [ + 56, + 326, + 294, + 350 + ], + "type": "text", + "content": "[99] Rohin Manvi, Samar Khanna, Gengchen Mai, Marshall Burke, David Lobell, and Stefano Ermon. 2023. Geolm: Extracting geospatial knowledge from large language models. arXiv preprint arXiv:2310.06213 (2023)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 350, + 294, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 350, + 294, + 381 + ], + "spans": [ + { + "bbox": [ + 53, + 350, + 294, + 381 + ], + "type": "text", + "content": "[100] Rohin Manvi, Samar Khanna, Gengchen Mai, Marshall Burke, David B Lobell, and Stefano Ermon. 2024. GeoLLM: Extracting Geospatial Knowledge from Large Language Models. In The Twelfth International Conference on Learning Representations." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 382, + 294, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 382, + 294, + 406 + ], + "spans": [ + { + "bbox": [ + 53, + 382, + 294, + 406 + ], + "type": "text", + "content": "[101] Justin M Mittelstädt, Julia Maier, Panja Goerke, Frank Zinn, and Michael Hermes. 2024. Large language models can outperform humans in social situational judgments. Scientific Reports 14, 1 (2024), 27449." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 53, + 406, + 294, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 406, + 294, + 437 + ], + "spans": [ + { + "bbox": [ + 53, + 406, + 294, + 437 + ], + "type": "text", + "content": "[102] Seungwhan Moon, Andrea Madotto, Zhaojiang Lin, Aparajita Saraf, Amy Bearman, and Babak Damavandi. 2023. IMU2CLIP: Language-grounded Motion Sensor Translation with Multimodal Contrastive Learning. In Findings of the Association for Computational Linguistics: EMNLP 2023. 13246-13253." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 53, + 437, + 294, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 437, + 294, + 453 + ], + "spans": [ + { + "bbox": [ + 53, + 437, + 294, + 453 + ], + "type": "text", + "content": "[103] Alameen Najjar. 2023. Towards A Foundation Model For Trajectory Intelligence. In IEEE ICDMW. IEEE, 832-835." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 53, + 453, + 294, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 453, + 294, + 477 + ], + "spans": [ + { + "bbox": [ + 53, + 453, + 294, + 477 + ], + "type": "text", + "content": "[104] Tung Nguyen, Johannes Brandstetter, Ashish Kapoor, Jayesh K Gupta, and Aditya Grover. 2023. Climax: A foundation model for weather and climate. International Conference on Machine Learning (2023)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 53, + 478, + 294, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 478, + 294, + 501 + ], + "spans": [ + { + "bbox": [ + 53, + 478, + 294, + 501 + ], + "type": "text", + "content": "[105] Yansong Ning and Hao Liu. 2024. UrbanKGent: A Unified Large Language Model Agent Framework for Urban Knowledge Graph Construction. arXiv preprint arXiv:2402.06861 (2024)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 53, + 502, + 294, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 502, + 294, + 525 + ], + "spans": [ + { + "bbox": [ + 53, + 502, + 294, + 525 + ], + "type": "text", + "content": "[106] Yansong Ning, Hao Liu, Hao Wang, Zhenyu Zeng, and Hui Xiong. 2024. UUKG: unified urban knowledge graph dataset for urban spatiotemporal prediction. Advances in Neural Information Processing Systems 36 (2024)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 53, + 525, + 294, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 525, + 294, + 557 + ], + "spans": [ + { + "bbox": [ + 53, + 525, + 294, + 557 + ], + "type": "text", + "content": "[107] Joon Sung Park, Joseph O'Brien, Carrie Jun Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. 2023. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology. 1-22." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 53, + 557, + 294, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 557, + 294, + 597 + ], + "spans": [ + { + "bbox": [ + 53, + 557, + 294, + 597 + ], + "type": "text", + "content": "[108] Jaideep Pathak, Shashank Subramanian, Peter Harrington, Sanjeev Raja, Ashesh Chattopadhyay, Morteza Mardani, Thorsten Kurth, David Hall, Zongyi Li, Kamyar Azizzadenesheli, et al. 2022. Fourcastnet: A global data-driven high-resolution weather model using adaptive fourier neural operators. arXiv preprint arXiv:2202.11214 (2022)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 53, + 597, + 294, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 597, + 294, + 620 + ], + "spans": [ + { + "bbox": [ + 53, + 597, + 294, + 620 + ], + "type": "text", + "content": "[109] Jinghua Piao, Zhihong Lu, Chen Gao, Fengli Xu, Fernando P Santos, Yong Li, and James Evans. 2025. Emergence of human-like polarization among large language model agents. arXiv preprint arXiv:2501.05171 (2025)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 53, + 621, + 294, + 652 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 621, + 294, + 652 + ], + "spans": [ + { + "bbox": [ + 53, + 621, + 294, + 652 + ], + "type": "text", + "content": "[110] Arian Prabowo, Wei Shao, Hao Xue, Piotr Koniusz, and Flora D Salim. 2023. Because every sensor is unique, so is every pair: Handling dynamicity in traffic forecasting. In Proceedings of the 8th ACM/IEEE Conference on Internet of Things Design and Implementation. 93-104." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 53, + 653, + 294, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 653, + 294, + 677 + ], + "spans": [ + { + "bbox": [ + 53, + 653, + 294, + 677 + ], + "type": "text", + "content": "[111] Arian Prabowo, Hao Xue, Wei Shao, Piotr Koniusz, and Flora D Salim. 2024. Traffic forecasting on new roads using spatial contrastive pre-training (SCPT). Data Mining and Knowledge Discovery 38, 3 (2024), 913-937." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 53, + 677, + 294, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 677, + 294, + 715 + ], + "spans": [ + { + "bbox": [ + 53, + 677, + 294, + 715 + ], + "type": "text", + "content": "[112] Siyuan Qi, Shuo Chen, Yexin Li, Xiangyu Kong, Junqi Wang, Bangcheng Yang, Pring Wong, Yifan Zhong, Xiaoyuan Zhang, Zhaowei Zhang, et al. 2024. CivRealm: A Learning and Reasoning Odyssey in Civilization for Decision-Making Agents. In The Twelfth International Conference on Learning Representations." + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 317, + 87, + 558, + 701 + ], + "type": "list", + "angle": 0, + "index": 47, + "blocks": [ + { + "bbox": [ + 317, + 87, + 558, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 87, + 558, + 118 + ], + "spans": [ + { + "bbox": [ + 317, + 87, + 558, + 118 + ], + "type": "text", + "content": "[113] Kyle K Qin, Yongli Ren, Wei Shao, Brennan Lake, Filippo Privitera, and Flora D Salim. 2023. Multiple-level point embedding for solving human trajectory imputation with prediction. ACM Transactions on Spatial Algorithms and Systems 9, 2 (2023), 1-22." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 119, + 558, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 119, + 558, + 150 + ], + "spans": [ + { + "bbox": [ + 317, + 119, + 558, + 150 + ], + "type": "text", + "content": "[114] Hao Sha, Yao Mu, Yuxuan Jiang, Li Chen, Chenfeng Xu, Ping Luo, Shengbo Eben Li, Masayoshi Tomizuka, Wei Zhan, and Mingyu Ding. 2023. *Languagempc: Large language models as decision makers for autonomous driving.* arXiv preprint arXiv:2310.03026 (2023)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 151, + 558, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 151, + 558, + 182 + ], + "spans": [ + { + "bbox": [ + 317, + 151, + 558, + 182 + ], + "type": "text", + "content": "[115] Jie-Jing Shao, Xiao-Wen Yang, Bo-Wen Zhang, Baizhi Chen, Wen-Da Wei, Lan-Zhe Guo, and Yu-feng Li. 2024. ChinaTravel: A Real-World Benchmark for Language Agents in Chinese Travel Planning. arXiv preprint arXiv:2412.13682 (2024)." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 182, + 558, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 182, + 558, + 214 + ], + "spans": [ + { + "bbox": [ + 317, + 182, + 558, + 214 + ], + "type": "text", + "content": "[116] Wei Shao, Zhiling Jin, Shuo Wang, Yufan Kang, Xiao Xiao, Hamid Menouar, Zhaofeng Zhang, Junshan Zhang, and Flora Salim. 2022. Long-term spatiotemporal forecasting via dynamic multiple-graph attention. In Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence, JFCAI-22." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 317, + 214, + 558, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 214, + 558, + 246 + ], + "spans": [ + { + "bbox": [ + 317, + 214, + 558, + 246 + ], + "type": "text", + "content": "[117] Zezhi Shao, Zhao Zhang, Fei Wang, and Yongjun Xu. 2022. Pre-training enhanced spatial-temporal graph neural network for multivariate time series forecasting. In Proceedings of the 28th ACM SIGKDD conference on knowledge discovery and data mining. 1567-1577." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 317, + 247, + 558, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 247, + 558, + 278 + ], + "spans": [ + { + "bbox": [ + 317, + 247, + 558, + 278 + ], + "type": "text", + "content": "[118] Qichao Shentu, Beibu Li, Kai Zhao, Yang Shu, Zhongwen Rao, Lujia Pan, Bin Yang, and Chenjuan Guo. 2024. Towards a General Time Series Anomaly Detector with Adaptive Bottlenecks and Dual Adversarial Decoders. arXiv preprint arXiv:2405.15273 (2024)." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 317, + 278, + 558, + 310 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 278, + 558, + 310 + ], + "spans": [ + { + "bbox": [ + 317, + 278, + 558, + 310 + ], + "type": "text", + "content": "[119] Xiaoming Shi, Shiyu Wang, Yuqi Nie, Dianqi Li, Zhou Ye, Qingsong Wen, and Ming Jin. 2025. Time-MoE: Billion-Scale Time Series Foundation Models with Mixture of Experts. In The Thirteenth International Conference on Learning Representations (ICLR)." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 317, + 310, + 558, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 310, + 558, + 342 + ], + "spans": [ + { + "bbox": [ + 317, + 310, + 558, + 342 + ], + "type": "text", + "content": "[120] Xiaoming Shi, Sqiao Xue, Kangrui Wang, Fan Zhou, James Zhang, Jun Zhou, Chenhao Tan, and Hongyuan Mei. 2023. Language models can improve event prediction by few-shot abductive reasoning. Advances in Neural Information Processing Systems 36 (2023), 29532-29557." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 317, + 342, + 558, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 342, + 558, + 365 + ], + "spans": [ + { + "bbox": [ + 317, + 342, + 558, + 365 + ], + "type": "text", + "content": "[121] Ethan Steinberg, Jason Fries, Yizhe Xu, and Nigam Shah. 2023. MOTOR: A Time-To-Event Foundation Model For Structured Medical Records. arXiv preprint arXiv:2301.03150 (2023)." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 317, + 366, + 558, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 366, + 558, + 397 + ], + "spans": [ + { + "bbox": [ + 317, + 366, + 558, + 397 + ], + "type": "text", + "content": "[122] Mingtian Tan, Mike A Merrill, Vinayak Gupta, Tim Althoff, and Thomas Hartvigsen. 2024. Are language models actually useful for time series forecasting?. In The Thirty-eighth Annual Conference on Neural Information Processing Systems." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 317, + 398, + 558, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 398, + 558, + 437 + ], + "spans": [ + { + "bbox": [ + 317, + 398, + 558, + 437 + ], + "type": "text", + "content": "[123] Yihong Tang, Zhaokai Wang, Ao Qu, Yihao Yan, Zhaofeng Wu, Dingyi Zhuang, Jushi Kai, Kebing Hou, Xiaotong Guo, Jinhua Zhao, et al. 2024. ITINERA: Integrating Spatial Optimization with Large Language Models for Open-domain Urban Itinerary Planning. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track. 1413-1432." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 317, + 437, + 558, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 437, + 558, + 469 + ], + "spans": [ + { + "bbox": [ + 317, + 437, + 558, + 469 + ], + "type": "text", + "content": "[124] Nicolas Tempelmeier, Simon Gottschalk, and Elena Demidova. 2021. GeoVectors: a linked open corpus of OpenStreetMap Embeddings on world scale. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management. 4604-4612." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 317, + 469, + 558, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 469, + 558, + 501 + ], + "spans": [ + { + "bbox": [ + 317, + 469, + 558, + 501 + ], + "type": "text", + "content": "[125] Saeid Ashraf Vaghefi, Dominik Stammbach, Veruska Muccione, Julia Bingler, Jingwei Ni, Mathias Kraus, Simon Allen, Chiara Colesanti-Senni, Tobias Wekhof, Tobias Schimanski, et al. 2023. ChatClimate: Grounding conversational AI in climate science. Communications Earth & Environment 4, 1 (2023), 480." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 317, + 502, + 558, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 502, + 558, + 533 + ], + "spans": [ + { + "bbox": [ + 317, + 502, + 558, + 533 + ], + "type": "text", + "content": "[126] Jiawei Wang, Renhe Jiang, Chuang Yang, Zengqing Wu, Makoto Onizuka, Ryosuke Shibasaki, Noboru Koshizuka, and Chuan Xiao. 2024. Large language models as urban residents: An llm agent framework for personal mobility generation. Advances in Neural Information Processing Systems (2024)." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 317, + 533, + 558, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 533, + 558, + 557 + ], + "spans": [ + { + "bbox": [ + 317, + 533, + 558, + 557 + ], + "type": "text", + "content": "[127] Junyang Wang, Haiyang Xu, Jiabo Ye, Ming Yan, Weizhou Shen, Ji Zhang, Fei Huang, and Jitao Sang. 2024. Mobile-agent: Autonomous multi-modal mobile device agent with visual perception. arXiv preprint arXiv:2401.16158 (2024)." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 317, + 557, + 558, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 557, + 558, + 589 + ], + "spans": [ + { + "bbox": [ + 317, + 557, + 558, + 589 + ], + "type": "text", + "content": "[128] Kun Wang, Hao Wu, Yifan Duan, Guibin Zhang, Kai Wang, Xiaojiang Peng, Yu Zheng, Yuxuan Liang, and Yang Wang. 2024. NuwaDynamics: Discovering and Updating in Causal Spatio-Temporal Modeling. In The Twelfth International Conference on Learning Representations." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 317, + 589, + 558, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 589, + 558, + 605 + ], + "spans": [ + { + "bbox": [ + 317, + 589, + 558, + 605 + ], + "type": "text", + "content": "[129] Senzhang Wang, Jiannong Cao, and Philip Yu. 2020. Deep learning for spatiotemporal data mining: A survey. IEEE TKDE (2020)." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 317, + 605, + 558, + 636 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 605, + 558, + 636 + ], + "spans": [ + { + "bbox": [ + 317, + 605, + 558, + 636 + ], + "type": "text", + "content": "[130] Xuhong Wang, Ding Wang, Liang Chen, Fei-Yue Wang, and Yilun Lin. 2023. Building transportation foundation model via generative graph transformer. In 2023 IEEE 26th International Conference on Intelligent Transportation Systems (ITSC). IEEE, 6042-6047." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 317, + 637, + 558, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 637, + 558, + 668 + ], + "spans": [ + { + "bbox": [ + 317, + 637, + 558, + 668 + ], + "type": "text", + "content": "[131] Yihang Wang, Yuying Qiu, Peng Chen, Kai Zhao, Yang Shu, Zhongwen Rao, Lujia Pan, Bin Yang, and Chenjuan Guo. 2024. ROSE: Register Assisted General Time Series Forecasting with Decomposed Frequency Learning. CoRR abs/2405.17478 (2024)." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 317, + 669, + 558, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 669, + 558, + 701 + ], + "spans": [ + { + "bbox": [ + 317, + 669, + 558, + 701 + ], + "type": "text", + "content": "[132] Yu Wang, Tongya Zheng, Shunyu Liu, Zunlei Feng, Kaixuan Chen, Yunzhi Hao, and Mingli Song. 2024. Spatiotemporal-Augmented Graph Neural Networks for Human Mobility Simulation. IEEE Transactions on Knowledge and Data Engineering (2024)." + } + ] + } + ], + "index": 46 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 273, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 273, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 273, + 69 + ], + "type": "text", + "content": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 86, + 294, + 715 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 52, + 86, + 294, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 86, + 294, + 126 + ], + "spans": [ + { + "bbox": [ + 52, + 86, + 294, + 126 + ], + "type": "text", + "content": "[133] Zihao Wang, Shaofei Cai, Guanzhou Chen, Anji Liu, Xiaojian Ma, Yitao Liang, and Team CraftJarvis. 2023. Describe, explain, plan and select: interactive planning with large language models enables open-world multi-task agents. In Proceedings of the 37th International Conference on Neural Information Processing Systems. 34153-34189." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 126, + 294, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 126, + 294, + 150 + ], + "spans": [ + { + "bbox": [ + 52, + 126, + 294, + 150 + ], + "type": "text", + "content": "[134] Zhaonan Wang, Renhe Jiang, Hao Xue, Flora D Salim, Xuan Song, and Ryosuke Shibasaki. 2022. Event-aware multimodal mobility nowcasting. In AAAI, Vol. 36. 4228-4236." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 150, + 294, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 150, + 294, + 175 + ], + "spans": [ + { + "bbox": [ + 52, + 150, + 294, + 175 + ], + "type": "text", + "content": "[135] Tonglong Wei, Yan Lin, Youfang Lin, Shengnan Guo, Jilin Hu, Gao Cong, and Huaiyu Wan. 2024. PTR: A Pre-trained Language Model for Trajectory Recovery. arXiv preprint arXiv:2410.14281 (2024)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 175, + 294, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 175, + 294, + 199 + ], + "spans": [ + { + "bbox": [ + 52, + 175, + 294, + 199 + ], + "type": "text", + "content": "[136] Haomin Wen, Youfang Lin, Yutong Xia, Huaiyu Wan, Qingsong Wen, Roger Zimmermann, and Yuxuan Liang. 2023. Diffstg: Probabilistic spatio-temporal graph forecasting with denoising diffusion models. In ACM SIGSPATIAL. 1-12." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 199, + 294, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 199, + 294, + 222 + ], + "spans": [ + { + "bbox": [ + 52, + 199, + 294, + 222 + ], + "type": "text", + "content": "[137] Gerald Woo, Chenghao Liu, Akshit Kumar, Caiming Xiong, Silvio Savarese, and Doyen Sahoo. 2024. Unified training of universal time series forecasting transformers. (2024)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 222, + 294, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 222, + 294, + 255 + ], + "spans": [ + { + "bbox": [ + 52, + 222, + 294, + 255 + ], + "type": "text", + "content": "[138] Lixia Wu, Jianlin Liu, Junhong Lou, Minhui Deng, Jianbin Zheng, Haomin Wen, Chao Song, and Shu He. 2024. G2PTL: A Geography-Graph Pre-trained Model. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 4991-4999." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 255, + 294, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 255, + 294, + 286 + ], + "spans": [ + { + "bbox": [ + 52, + 255, + 294, + 286 + ], + "type": "text", + "content": "[139] Wansen Wu, Weiyi Yang, Juanjuan Li, Yong Zhao, Zhengqiu Zhu, Bin Chen, Sihang Qiu, Yong Peng, and Fei-Yue Wang. 2024. Autonomous crowdsensing: operating and organizing crowdsensing for sensing automation. IEEE Transactions on Intelligent Vehicles (2024)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 286, + 294, + 310 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 286, + 294, + 310 + ], + "spans": [ + { + "bbox": [ + 52, + 286, + 294, + 310 + ], + "type": "text", + "content": "[140] Zonghan Wu, Shirui Pan, Guodong Long, Jing Jiang, Xiaojun Chang, and Chengqi Zhang. 2020. Connecting the dots: Multivariate time series forecasting with graph neural networks. In SIGKDD. 753-763." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 310, + 294, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 310, + 294, + 334 + ], + "spans": [ + { + "bbox": [ + 52, + 310, + 294, + 334 + ], + "type": "text", + "content": "[141] Zonghan Wu, Shirui Pan, Guodong Long, Jing Jiang, and Chengqi Zhang. 2019. Graph wavenet for deep spatial-temporal graph modeling. arXiv preprint arXiv:1906.00121 (2019)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 334, + 294, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 334, + 294, + 366 + ], + "spans": [ + { + "bbox": [ + 52, + 334, + 294, + 366 + ], + "type": "text", + "content": "[142] Congxi Xiao, Jingbo Zhou, Yixiong Xiao, Jizhou Huang, and Hui Xiong. 2024. ReFound: Crafting a Foundation Model for Urban Region Understanding upon Language and Visual Foundations. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 3527-3538." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 366, + 294, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 366, + 294, + 397 + ], + "spans": [ + { + "bbox": [ + 52, + 366, + 294, + 397 + ], + "type": "text", + "content": "[143] Mengxi Xiao, Zihao Jiang, Lingfei Qian, Zhengyu Chen, Yueru He, Yijing Xu, Yuecheng Jiang, Dong Li, Ruey-Ling Weng, Min Peng, et al. 2025. Retrievalaugmented Large Language Models for Financial Time Series Forecasting. arXiv preprint arXiv:2502.05878 (2025)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 398, + 294, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 398, + 294, + 422 + ], + "spans": [ + { + "bbox": [ + 52, + 398, + 294, + 422 + ], + "type": "text", + "content": "[144] Fengli Xu, Jun Zhang, Chen Gao, Jie Feng, and Yong Li. 2023. Urban generative intelligence (ugi): A foundational platform for agents in embodied city environment. arXiv preprint arXiv:2312.11813 (2023)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 52, + 422, + 294, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 422, + 294, + 445 + ], + "spans": [ + { + "bbox": [ + 52, + 422, + 294, + 445 + ], + "type": "text", + "content": "[145] Jiehui Xu, Haixu Wu, Jianmin Wang, and Mingsheng Long. 2022. Anomaly Transformer: Time Series Anomaly Detection with Association Discrepancy. In International Conference on Learning Representations." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 52, + 445, + 294, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 445, + 294, + 469 + ], + "spans": [ + { + "bbox": [ + 52, + 445, + 294, + 469 + ], + "type": "text", + "content": "[146] Mingxing Xu, Wenrui Dai, Chunmiao Liu, Xing Gao, Weiyao Lin, Guo-Jun Qi, and Hongkai Xiong. 2020. Spatial-temporal transformer networks for traffic flow forecasting. arXiv preprint arXiv:2001.02908 (2020)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 52, + 469, + 294, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 469, + 294, + 493 + ], + "spans": [ + { + "bbox": [ + 52, + 469, + 294, + 493 + ], + "type": "text", + "content": "[147] Hao Xue and Flora D Salim. 2023. Promptcast: A new prompt-based learning paradigm for time series forecasting. IEEE Transactions on Knowledge and Data Engineering 36, 11 (2023), 6851-6864." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 52, + 493, + 294, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 493, + 294, + 517 + ], + "spans": [ + { + "bbox": [ + 52, + 493, + 294, + 517 + ], + "type": "text", + "content": "[148] Hao Xue, Tianye Tang, Ali Payani, and Flora D Salim. 2024. Prompt Mining for Language Models-based Mobility Flow Forecasting. In Proceedings of the 32nd ACM International Conference on Advances in Geographic Information Systems." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 52, + 517, + 294, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 517, + 294, + 541 + ], + "spans": [ + { + "bbox": [ + 52, + 517, + 294, + 541 + ], + "type": "text", + "content": "[149] Hao Xue, Bhanu Prakash Voutharoja, and Flora D Salim. 2022. Leveraging language foundation models for human mobility forecasting. In Proceedings of the 30th International Conference on Advances in Geographic Information Systems." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 52, + 541, + 294, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 541, + 294, + 572 + ], + "spans": [ + { + "bbox": [ + 52, + 541, + 294, + 572 + ], + "type": "text", + "content": "[150] Yibo Yan, Haomin Wen, Siru Zhong, Wei Chen, Haodong Chen, Qingsong Wen, Roger Zimmermann, and Yuxuan Liang. 2024. Urbanclip: Learning text-enhanced urban region profiling with contrastive language-image pretraining from the web. In Proceedings of the ACM on Web Conference 2024. 4006-4017." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 52, + 572, + 294, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 572, + 294, + 597 + ], + "spans": [ + { + "bbox": [ + 52, + 572, + 294, + 597 + ], + "type": "text", + "content": "[151] Yuwei Yan, Qingbin Zeng, Zhiheng Zheng, Jingzhe Yuan, Jie Feng, Jun Zhang, Fengli Xu, and Yong Li. 2024. OpenCity: A Scalable Platform to Simulate Urban Activities with Massive LLM Agents. arXiv preprint arXiv:2410.21286 (2024)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 52, + 597, + 294, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 597, + 294, + 620 + ], + "spans": [ + { + "bbox": [ + 52, + 597, + 294, + 620 + ], + "type": "text", + "content": "[152] Jianwei Yang, Reuben Tan, Qianhui Wu, Ruijie Zheng, Baolin Peng, Yongyuan Liang, Yu Gu, Mu Cai, Seonghyeon Ye, Joel Jang, et al. 2025. Magma: A Foundation Model for Multimodal AI Agents. arXiv preprint arXiv:2502.13130 (2025)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 52, + 620, + 294, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 620, + 294, + 644 + ], + "spans": [ + { + "bbox": [ + 52, + 620, + 294, + 644 + ], + "type": "text", + "content": "[153] Jihan Yang, Shusheng Yang, Anjali W Gupta, Rilyn Han, Li Fei-Fei, and Saining Xie. 2024. Thinking in space: How multimodal large language models see, remember, and recall spaces. arXiv preprint arXiv:2412.14171 (2024)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 52, + 644, + 294, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 644, + 294, + 677 + ], + "spans": [ + { + "bbox": [ + 52, + 644, + 294, + 677 + ], + "type": "text", + "content": "[154] Kairui Yang, Zihao Guo, Gengjie Lin, Haotian Dong, Zhao Huang, Yipeng Wu, Die Zuo, Jibin Peng, Ziyuan Zhong, Xin WANG, Qing Guo, Xiaosong Jia, Junchi Yan, and Di Lin. 2025. Trajectory-LLM: A Language-based Data Generator for Trajectory Prediction in Autonomous Driving. In ICLR." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 52, + 677, + 294, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 677, + 294, + 700 + ], + "spans": [ + { + "bbox": [ + 52, + 677, + 294, + 700 + ], + "type": "text", + "content": "[155] Silin Yang, Dong Wang, Haoqi Zheng, and Ruochun Jin. 2024. TimeRAG: BOOSTING LLM Time Series Forecasting via Retrieval-Augmented Generation. arXiv preprint arXiv:2412.16643 (2024)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 52, + 700, + 294, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 700, + 294, + 715 + ], + "spans": [ + { + "bbox": [ + 52, + 700, + 294, + 715 + ], + "type": "text", + "content": "[156] Tiankai Yang, Yi Nian, Shawn Li, Ruiyao Xu, Yuangang Li, Jiaqi Li, Zhuo Xiao, Xiyang Hu, Ryan Rossi, Kaize Ding, et al. 2024. Ad-llm: Benchmarking large" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 317, + 86, + 558, + 701 + ], + "type": "list", + "angle": 0, + "index": 52, + "blocks": [ + { + "bbox": [ + 333, + 86, + 556, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 86, + 556, + 95 + ], + "spans": [ + { + "bbox": [ + 333, + 86, + 556, + 95 + ], + "type": "text", + "content": "language models for anomaly detection. arXiv preprint arXiv:2412.11142 (2024)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 95, + 558, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 95, + 558, + 118 + ], + "spans": [ + { + "bbox": [ + 317, + 95, + 558, + 118 + ], + "type": "text", + "content": "[157] Xinli Yu, Zheng Chen, Yuan Ling, Shujing Dong, Zongyi Liu, and Yanbin Lu. 2023. Temporal data meets LLM-explainable financial time series forecasting. arXiv preprint arXiv:2306.11025 (2023)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 118, + 558, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 118, + 558, + 150 + ], + "spans": [ + { + "bbox": [ + 317, + 118, + 558, + 150 + ], + "type": "text", + "content": "[158] Yuan Yuan, Jingtao Ding, Jie Feng, Depeng Jin, and Yong Li. 2024. Unist: A prompt-empowered universal model for urban spatio-temporal prediction. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4095-4106." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 150, + 558, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 150, + 558, + 175 + ], + "spans": [ + { + "bbox": [ + 317, + 150, + 558, + 175 + ], + "type": "text", + "content": "[159] Yuan Yuan, Jingtao Ding, Chonghua Han, Depeng Jin, and Yong Li. 2024. A Foundation Model for Unified Urban Spatio-Temporal Flow Prediction. arXiv preprint arXiv:2411.12972 (2024)." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 317, + 175, + 558, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 175, + 558, + 198 + ], + "spans": [ + { + "bbox": [ + 317, + 175, + 558, + 198 + ], + "type": "text", + "content": "[160] Yuan Yuan, Chonghua Han, Jingtao Ding, Depeng Jin, and Yong Li. 2024. Urbanfit: A foundation model for open-world urban spatio-temporal learning. arXiv preprint arXiv:2411.12164 (2024)." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 317, + 198, + 558, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 198, + 558, + 222 + ], + "spans": [ + { + "bbox": [ + 317, + 198, + 558, + 222 + ], + "type": "text", + "content": "[161] Yuan Yuan, Chenyang Shao, Jingtao Ding, Depeng Jin, and Yong Li. 2024. Spatiotemporal few-shot learning via diffusive neural network generation. In The Twelfth International Conference on Learning Representations." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 317, + 222, + 558, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 222, + 558, + 247 + ], + "spans": [ + { + "bbox": [ + 317, + 222, + 558, + 247 + ], + "type": "text", + "content": "[162] Ye Yuan, Yong Zhang, Boyue Wang, Yuan Peng, Yongli Hu, and Baocai Yin. 2022. STGAN: Spatio-temporal generative adversarial network for traffic data imputation. IEEE Transactions on Big Data 9, 1 (2022), 200-211." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 317, + 247, + 558, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 247, + 558, + 270 + ], + "spans": [ + { + "bbox": [ + 317, + 247, + 558, + 270 + ], + "type": "text", + "content": "[163] Zhenghang Yuan, Zhitong Xiong, Lichao Mou, and Xiao Xiang Zhu. 2024. Chatearthnet: A global-scale, high-quality image-text dataset for remote sensing. arXiv preprint arXiv:2402.11325 (2024)." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 317, + 270, + 558, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 270, + 558, + 294 + ], + "spans": [ + { + "bbox": [ + 317, + 270, + 558, + 294 + ], + "type": "text", + "content": "[164] Kunpeng Zhang, Feng Zhou, Lan Wu, Na Xie, and Zhengbing He. 2024. Semantic understanding and prompt engineering for large-scale traffic data imputation. Information Fusion 102 (2024), 102038." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 317, + 294, + 558, + 310 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 294, + 558, + 310 + ], + "spans": [ + { + "bbox": [ + 317, + 294, + 558, + 310 + ], + "type": "text", + "content": "[165] Libo Zhang and Yue Ning. 2024. Large Language Models as Event Forecasters. arXiv preprint arXiv:2406.10492 (2024)." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 317, + 310, + 558, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 310, + 558, + 334 + ], + "spans": [ + { + "bbox": [ + 317, + 310, + 558, + 334 + ], + "type": "text", + "content": "[166] Qianru Zhang, Xubin Ren, Lianghao Xia, Siu Ming Yiu, and Chao Huang. 2024. Spatio-Temporal Graph Learning With Large Language Model. https://openreview.net/forum?id=QUKcfq6GX" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 317, + 334, + 558, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 334, + 558, + 365 + ], + "spans": [ + { + "bbox": [ + 317, + 334, + 558, + 365 + ], + "type": "text", + "content": "[167] Qianru Zhang, Haixin Wang, Cheng Long, Liangcai Su, Xingwei He, Jianlong Chang, Tailin Wu, Hongzhi Yin, Siu-Ming Yiu, Qi Tian, et al. 2024. A Survey of Generative Techniques for Spatial-Temporal Data Mining. arXiv preprint arXiv:2405.09592 (2024)." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 317, + 365, + 558, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 365, + 558, + 389 + ], + "spans": [ + { + "bbox": [ + 317, + 365, + 558, + 389 + ], + "type": "text", + "content": "[168] Siyao Zhang, Daocheng Fu, Wenzhe Liang, Zhao Zhang, Bin Yu, Pinlong Cai, and Baozhen Yao. 2024. Trafficcpt: Viewing, processing and interacting with traffic foundation models. Transport Policy 150 (2024), 95-105." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 317, + 389, + 558, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 389, + 558, + 414 + ], + "spans": [ + { + "bbox": [ + 317, + 389, + 558, + 414 + ], + "type": "text", + "content": "[169] Weijia Zhang, Jindong Han, Zhao Xu, Hang Ni, Hao Liu, and Hui Xiong. 2024. Urban Foundation Models: A Survey. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 6633-6643." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 317, + 414, + 558, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 414, + 558, + 437 + ], + "spans": [ + { + "bbox": [ + 317, + 414, + 558, + 437 + ], + "type": "text", + "content": "[170] Xin Zhang, Tianjian Ouyang, Yu Shang, Qingmin Liao, and Yong Li. [n.d.]. UrbanMLLM: Joint Learning of Cross-view Imagery for Urban Understanding. ([n.d.])." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 317, + 437, + 558, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 437, + 558, + 469 + ], + "spans": [ + { + "bbox": [ + 317, + 437, + 558, + 469 + ], + "type": "text", + "content": "[171] Yu Zhang, Weiming Huang, Yao Yao, Song Gao, Lizhen Cui, and Zhongmin Yan. 2024. Urban region representation learning with human trajectories: a multiview approach incorporating transition, spatial, and temporal perspectives. GIScience & Remote Sensing 61, 1 (2024), 2387392." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 317, + 469, + 558, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 469, + 558, + 493 + ], + "spans": [ + { + "bbox": [ + 317, + 469, + 558, + 493 + ], + "type": "text", + "content": "[172] Yimei Zhang, Xiangjie Kong, Wenfeng Zhou, Jin Liu, Yanjie Fu, and Guojiang Shen. 2024. A comprehensive survey on traffic missing data imputation. IEEE Transactions on Intelligent Transportation Systems (2024)." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 317, + 493, + 558, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 493, + 558, + 517 + ], + "spans": [ + { + "bbox": [ + 317, + 493, + 558, + 517 + ], + "type": "text", + "content": "[173] Yunxiang Zhang and Xiaojun Wan. 2024. SITUATEDGEN: incorporating geographical and temporal contexts into generative commonsense reasoning. Advances in Neural Information Processing Systems 36 (2024)." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 317, + 517, + 558, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 517, + 558, + 541 + ], + "spans": [ + { + "bbox": [ + 317, + 517, + 558, + 541 + ], + "type": "text", + "content": "[174] Yifan Zhang, Cheng Wei, Shangyou Wu, Zhengting He, and Wenhao Yu. 2023. GeoGPT: understanding and processing geospatial tasks through an autonomous GPT. arXiv preprint arXiv:2307.07930 (2023)." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 317, + 541, + 558, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 541, + 558, + 573 + ], + "spans": [ + { + "bbox": [ + 317, + 541, + 558, + 573 + ], + "type": "text", + "content": "[175] Zeyang Zhang, Xin Wang, Ziwei Zhang, Haoyang Li, Yijian Qin, and Wenwu Zhu. 2024. LLM4DyG: can large language models solve spatial-temporal problems on dynamic graphs? In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4350-4361." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 317, + 573, + 558, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 573, + 558, + 605 + ], + "spans": [ + { + "bbox": [ + 317, + 573, + 558, + 605 + ], + "type": "text", + "content": "[176] Yu Zhao, Pan Deng, Junting Liu, Xiaofeng Jia, and Jianwei Zhang. 2023. Generative Causal Interpretation Model for Spatio-Temporal Representation Learning. In Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 3537-3548." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 317, + 605, + 558, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 605, + 558, + 628 + ], + "spans": [ + { + "bbox": [ + 317, + 605, + 558, + 628 + ], + "type": "text", + "content": "[177] Chuanpan Zheng, Xiaoliang Fan, Cheng Wang, and Jianzhong Qi. 2020. Gman: A graph multi-attention network for traffic prediction. In AAAI, Vol. 34. 1234–1241." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 317, + 628, + 558, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 628, + 558, + 645 + ], + "spans": [ + { + "bbox": [ + 317, + 628, + 558, + 645 + ], + "type": "text", + "content": "[178] Yu Zheng, Licia Capra, Ouri Wolfson, and Hai Yang. 2014. Urban computing: concepts, methodologies, and applications. ACM TIST 5, 3 (2014), 1-55." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 317, + 645, + 558, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 645, + 558, + 677 + ], + "spans": [ + { + "bbox": [ + 317, + 645, + 558, + 677 + ], + "type": "text", + "content": "[179] Siru Zhong, Xixuan Hao, Yibo Yan, Ying Zhang, Yangqiu Song, and Yuxuan Liang. 2024. Urbancross: Enhancing satellite image-text retrieval with cross-domain adaptation. In Proceedings of the 32nd ACM International Conference on Multimedia. 6307-6315." + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 317, + 677, + 558, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 677, + 558, + 701 + ], + "spans": [ + { + "bbox": [ + 317, + 677, + 558, + 701 + ], + "type": "text", + "content": "[180] Siru Zhong, Weilin Ruan, Ming Jin, Huan Li, Qingsong Wen, and Yuxuan Liang. 2025. Time-VLM: Exploring Multimodal Vision-Language Models for Augmented Time Series Forecasting. arXiv preprint arXiv:2502.04395 (2025)." + } + ] + } + ], + "index": 51 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 192, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 192, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 192, + 68 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "type": "text", + "content": "Yuxuan Liang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 86, + 296, + 373 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 52, + 86, + 296, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 86, + 296, + 110 + ], + "spans": [ + { + "bbox": [ + 52, + 86, + 296, + 110 + ], + "type": "text", + "content": "[181] Gengze Zhou, Yicong Hong, Zun Wang, Xin Eric Wang, and Qi Wu. 2024. Navigpt-2: Unleashing navigational reasoning capability for large vision-language models. In European Conference on Computer Vision. Springer, 260-278." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 111, + 296, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 111, + 296, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 111, + 296, + 135 + ], + "type": "text", + "content": "[182] Gengze Zhou, Yicong Hong, and Qi Wu. 2024. Navigpt: Explicit reasoning in vision-and-language navigation with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 7641-7649." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 135, + 295, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 135, + 295, + 159 + ], + "spans": [ + { + "bbox": [ + 53, + 135, + 295, + 159 + ], + "type": "text", + "content": "[183] Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, and Rong Jin. 2023. One Fits All: Power General Time Series Analysis by Pretrained LM. Advances in Neural Information Processing Systems (2023)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 159, + 294, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 159, + 294, + 182 + ], + "spans": [ + { + "bbox": [ + 53, + 159, + 294, + 182 + ], + "type": "text", + "content": "[184] Xingcheng Zhou, Mingyu Liu, Bare Luka Zagar, Ekim Yurtsever, and Alois C Knoll. 2023. Vision language models in autonomous driving and intelligent transportation systems. arXiv preprint arXiv:2310.14414 (2023)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 182, + 294, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 182, + 294, + 198 + ], + "spans": [ + { + "bbox": [ + 53, + 182, + 294, + 198 + ], + "type": "text", + "content": "[185] Zhilun Zhou, Yuming Lin, Depeng Jin, and Yong Li. 2024. Large language model for participatory urban planning. arXiv preprint arXiv:2402.17161 (2024)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 198, + 294, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 198, + 294, + 214 + ], + "spans": [ + { + "bbox": [ + 53, + 198, + 294, + 214 + ], + "type": "text", + "content": "[186] Zihao Zhou and Rose Yu. 2024. Can LLMs Understand Time Series Anomalies? arXiv preprint arXiv:2410.05440 (2024)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 214, + 294, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 214, + 294, + 255 + ], + "spans": [ + { + "bbox": [ + 53, + 214, + 294, + 255 + ], + "type": "text", + "content": "[187] Xizhou Zhu, Yuntao Chen, Hao Tian, Chenxin Tao, Weijie Su, Chenyu Yang, Gao Huang, Bin Li, Lewei Lu, Xiaogang Wang, et al. 2023. Ghost in the minecraft: Generally capable agents for open-world environments via large language models with text-based knowledge and memory. arXiv preprint arXiv:2305.17144 (2023)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 255, + 294, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 255, + 294, + 294 + ], + "spans": [ + { + "bbox": [ + 53, + 255, + 294, + 294 + ], + "type": "text", + "content": "[188] Yuanshao Zhu, James Jianqiao Yu, Xiangyu Zhao, Qidong Liu, Yongchao Ye, Wei Chen, Zijian Zhang, Xuetao Wei, and Yuxuan Liang. 2024. Controllraj: Controllable trajectory generation with topology-constrained diffusion model. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4676-4687." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 294, + 294, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 294, + 294, + 318 + ], + "spans": [ + { + "bbox": [ + 53, + 294, + 294, + 318 + ], + "type": "text", + "content": "[189] Yuanshao Zhu, James Jianqiao Yu, Xiangyu Zhao, Xuetao Wei, and Yuxuan Liang. 2024. UniTraj: Universal Human Trajectory Modeling from Billion-Scale Worldwide Traces. arXiv preprint arXiv:2411.03859 (2024)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 318, + 294, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 318, + 294, + 342 + ], + "spans": [ + { + "bbox": [ + 53, + 318, + 294, + 342 + ], + "type": "text", + "content": "[190] Zhengqiu Zhu, Yatai Ji, Sihang Qiu, Yong Zhao, Kai Xu, Rusheng Ju, and Bin Chen. 2024. A Prototype Design of LLM-Based Autonomous Web Crowdsensing. In International Conference on Web Engineering. Springer, 406-409." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 342, + 294, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 342, + 294, + 373 + ], + "spans": [ + { + "bbox": [ + 53, + 342, + 294, + 373 + ], + "type": "text", + "content": "[191] Zhengqiu Zhu, Yong Zhao, Bin Chen, Sihang Qiu, Kai Xu, Quanjun Yin, Jincai Huang, Zhong Liu, and Fei-Yue Wang. 2024. Conversational Crowdsensing: A Parallel Intelligence Powered Novel Sensing Approach. arXiv preprint arXiv:2402.06654 (2024)." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 51, + 384, + 259, + 397 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 384, + 259, + 397 + ], + "spans": [ + { + "bbox": [ + 51, + 384, + 259, + 397 + ], + "type": "text", + "content": "A Limitations and Future Opportunities" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 50, + 399, + 294, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 399, + 294, + 432 + ], + "spans": [ + { + "bbox": [ + 50, + 399, + 294, + 432 + ], + "type": "text", + "content": "We further discuss the potential limitations of current research and identify several key future directions aimed at advancing the development of more powerful, transparent, and reliable STFMs:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 51, + 434, + 295, + 708 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 51, + 434, + 295, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 434, + 295, + 554 + ], + "spans": [ + { + "bbox": [ + 51, + 434, + 295, + 554 + ], + "type": "text", + "content": "- The curse of accuracy against interpretability. We have identified a significant challenge in developing FMs for addressing numerical problems in ST data science. Directly leveraging LLMs for numerical tasks such as forecasting proves to be non-trivial [34]. Meanwhile, fine-tuning LLMs or training STFMs from scratch using large-scale, cross-domain ST data often comes at the cost of interactive capabilities, thereby hindering interpretability in the prediction outcomes. These limitations motivate us to explore a novel paradigm that not only retains strong numerical reasoning abilities but also enhances interpretability, bridging the gap between predictive accuracy and explanatory insight." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 51, + 554, + 295, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 554, + 295, + 674 + ], + "spans": [ + { + "bbox": [ + 51, + 554, + 295, + 674 + ], + "type": "text", + "content": "- Large foundation models are all we need? While the extensive parameterization of FMs enables impressive generalization capabilities, particularly in zero/few-shot settings, their superiority over smaller expert models remains context-dependent. In ST domains such as time series analysis [122] and urban planning [57], smaller expert models often outperform FMs when provided with sufficient domain-specific training data. This raises fundamental questions about the trade-offs between model scalability, efficiency, and task-specific optimization. Future research should delve into hybrid approaches that combine the adaptability of large models with the precision of expert models." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 51, + 674, + 294, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 674, + 294, + 708 + ], + "spans": [ + { + "bbox": [ + 51, + 674, + 294, + 708 + ], + "type": "text", + "content": "- One-fit-all FMs across the full workflow. While current FMs are typically designed to support only specific stages of ST data science, we envision a more unified FM capable of seamlessly" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 322, + 84, + 560, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 84, + 560, + 140 + ], + "spans": [ + { + "bbox": [ + 322, + 84, + 560, + 140 + ], + "type": "text", + "content": "spanning the entire workflow, from initial data sensing and management to mining and supporting downstream applications. Achieving this goal will likely require the development of advanced LLM agents that can function as full-stack engineers (i.e., strongly benefiting all stages) for ST data science." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 140, + 560, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 140, + 560, + 239 + ], + "spans": [ + { + "bbox": [ + 315, + 140, + 560, + 239 + ], + "type": "text", + "content": "- Integrating STFMs with multimodal understanding. While current STFMs excel in processing structured ST data, their ability to integrate and reason over multimodal information, including text, images, video, and sensor data, remains underdeveloped. Many tasks require models to jointly interpret geospatial context, temporal dynamics, and text descriptions. Future research can focus on designing multimodal STFMs that effectively align, fuse, and reason over heterogeneous data sources, enabling more context-aware and human-interpretable decision-making." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 315, + 247, + 484, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 247, + 484, + 258 + ], + "spans": [ + { + "bbox": [ + 315, + 247, + 484, + 258 + ], + "type": "text", + "content": "B Zero-shot Utilization of LLMs" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 315, + 262, + 558, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 262, + 558, + 273 + ], + "spans": [ + { + "bbox": [ + 315, + 262, + 558, + 273 + ], + "type": "text", + "content": "There are three ways of directly using LLMs for various ST tasks:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 315, + 275, + 559, + 494 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 315, + 275, + 559, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 275, + 559, + 352 + ], + "spans": [ + { + "bbox": [ + 315, + 275, + 559, + 352 + ], + "type": "text", + "content": "- LLM-as-Augmenter. Pretrained LLMs can enhance both data understanding and model performance. On the one hand, it can serve as the input augmenter, which enhances data interoperability or provides external information [40, 79] (e.g., textual or visual). On the other hand, LLMs can serve as a parameter-frozen model component [102, 150, 166], thus augmenting domain models by injecting the pretrained external knowledge in LLMs." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 315, + 352, + 559, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 352, + 559, + 407 + ], + "spans": [ + { + "bbox": [ + 315, + 352, + 559, + 407 + ], + "type": "text", + "content": "- LLM-as-Predictor. LLMs can be directly employed as predictors [33, 53, 73, 125] for various tasks. Due to the modality gap between text and ST data, preprocessing is required to fit the input spaces of LLMs. Such step typically contains prompt engineering [73, 125, 147-149] or patch & tokenization [53]." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 315, + 407, + 559, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 407, + 559, + 494 + ], + "spans": [ + { + "bbox": [ + 315, + 407, + 559, + 494 + ], + "type": "text", + "content": "- LLM-as-Agent. LLM-based agents are typically equipped with the ability to memorize and call various tools. When applied to ST data science, various domain-expert models can be wrapped as a tool and added into the agent in a plug-and-play manner [144, 168, 174]. As such, LLM serves as a router to access different models with both flexibility and performance guarantees. Furthermore, multi-agent systems [185] can be built to solve more complex tasks in the ST domain." + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 503, + 523, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 503, + 523, + 516 + ], + "spans": [ + { + "bbox": [ + 315, + 503, + 523, + 516 + ], + "type": "text", + "content": "C Comparison between LLMs and PFMs" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 313, + 518, + 559, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 518, + 559, + 584 + ], + "spans": [ + { + "bbox": [ + 313, + 518, + 559, + 584 + ], + "type": "text", + "content": "Table 3 demonstrates the comparison between LLMs and PFMs on their capabilities, including perception, optimization, and reasoning. For example, PFMs excel in exceptional numerical reasoning abilities, yet they often struggle with common-sense understanding. There is still no free lunch, and the user can choose either LLMs or PFMs according to the downstream applications." + } + ] + } + ], + "index": 29 + }, + { + "type": "table", + "bbox": [ + 317, + 628, + 555, + 705 + ], + "blocks": [ + { + "bbox": [ + 315, + 594, + 559, + 616 + ], + "lines": [ + { + "bbox": [ + 315, + 594, + 559, + 616 + ], + "spans": [ + { + "bbox": [ + 315, + 594, + 559, + 616 + ], + "type": "text", + "content": "Table 3: A capability comparison between LLMs and PFMs for ST data science." + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 628, + 555, + 705 + ], + "lines": [ + { + "bbox": [ + 317, + 628, + 555, + 705 + ], + "spans": [ + { + "bbox": [ + 317, + 628, + 555, + 705 + ], + "type": "table", + "html": "
CapabilitiesLarge Language Models (LLMs)Pretrained Foundation Models (PFMs)
Perception▲ Limited native ST perception; can be enhanced via fine-tuning✓ Strong ST perception, integrating sensor data and domain-specific learning
Optimization✓ Agent-based reasoning for decision-making; relies on prompting and heuristics▲ Limited; lacks decision-making ability for control and planning
Common-sense Reasoning✓ Strong via pretraining on vast textual data; can be enhanced with fine-tuning▲ Limited; relies on structured ST data rather than broad world knowledge
Numerical Reasoning▲ Handles arithmetic but struggles with structured ST computations✓ Designed for numerical problems, e.g., forecasting, anomaly detection
Causal Reasoning▲ Can infer causal relations from text but lacks structured ST modeling✓ Built-in graph-based and ST causal modeling
", + "image_path": "146f81da04a218bcff0caa24a2b9f78a939ebf9ec6c05eb8643c1f4789377d85.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "table_body" + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 273, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 273, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 273, + 69 + ], + "type": "text", + "content": "Foundation Models for Spatio-Temporal Data Science: A Tutorial and Survey" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 559, + 69 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 82, + 558, + 379 + ], + "blocks": [ + { + "bbox": [ + 52, + 82, + 558, + 379 + ], + "lines": [ + { + "bbox": [ + 52, + 82, + 558, + 379 + ], + "spans": [ + { + "bbox": [ + 52, + 82, + 558, + 379 + ], + "type": "image", + "image_path": "0e2c71ca92a3b1948076a1507053343e497304cd0c184fa18d2ff53d67b1e756.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 190, + 390, + 420, + 402 + ], + "lines": [ + { + "bbox": [ + 190, + 390, + 420, + 402 + ], + "spans": [ + { + "bbox": [ + 190, + 390, + 420, + 402 + ], + "type": "text", + "content": "Figure 7: Taxonomy from the methodology perspective." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 192, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 192, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 192, + 69 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 501, + 60, + 558, + 69 + ], + "type": "text", + "content": "Yuxuan Liang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file